FFmpeg  2.6.9
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
vf_blend.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/imgutils.h"
22 #include "libavutil/eval.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/pixfmt.h"
25 #include "avfilter.h"
26 #include "bufferqueue.h"
27 #include "formats.h"
28 #include "internal.h"
29 #include "dualinput.h"
30 #include "video.h"
31 
32 #define TOP 0
33 #define BOTTOM 1
34 
35 enum BlendMode {
63 };
64 
65 static const char *const var_names[] = { "X", "Y", "W", "H", "SW", "SH", "T", "N", "A", "B", "TOP", "BOTTOM", NULL };
67 
68 typedef struct FilterParams {
70  double opacity;
72  char *expr_str;
73  void (*blend)(const uint8_t *top, int top_linesize,
74  const uint8_t *bottom, int bottom_linesize,
75  uint8_t *dst, int dst_linesize,
76  int width, int start, int end,
77  struct FilterParams *param, double *values);
78 } FilterParams;
79 
80 typedef struct ThreadData {
81  const AVFrame *top, *bottom;
84  int plane;
85  int w, h;
87 } ThreadData;
88 
89 typedef struct {
90  const AVClass *class;
92  int hsub, vsub; ///< chroma subsampling values
93  int nb_planes;
94  char *all_expr;
95  enum BlendMode all_mode;
96  double all_opacity;
97 
99  int tblend;
100  AVFrame *prev_frame; /* only used with tblend */
101 } BlendContext;
102 
103 #define COMMON_OPTIONS \
104  { "c0_mode", "set component #0 blend mode", OFFSET(params[0].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
105  { "c1_mode", "set component #1 blend mode", OFFSET(params[1].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
106  { "c2_mode", "set component #2 blend mode", OFFSET(params[2].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
107  { "c3_mode", "set component #3 blend mode", OFFSET(params[3].mode), AV_OPT_TYPE_INT, {.i64=0}, 0, BLEND_NB-1, FLAGS, "mode"},\
108  { "all_mode", "set blend mode for all components", OFFSET(all_mode), AV_OPT_TYPE_INT, {.i64=-1},-1, BLEND_NB-1, FLAGS, "mode"},\
109  { "addition", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_ADDITION}, 0, 0, FLAGS, "mode" },\
110  { "and", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AND}, 0, 0, FLAGS, "mode" },\
111  { "average", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_AVERAGE}, 0, 0, FLAGS, "mode" },\
112  { "burn", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_BURN}, 0, 0, FLAGS, "mode" },\
113  { "darken", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DARKEN}, 0, 0, FLAGS, "mode" },\
114  { "difference", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE}, 0, 0, FLAGS, "mode" },\
115  { "difference128", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIFFERENCE128}, 0, 0, FLAGS, "mode" },\
116  { "divide", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DIVIDE}, 0, 0, FLAGS, "mode" },\
117  { "dodge", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_DODGE}, 0, 0, FLAGS, "mode" },\
118  { "exclusion", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_EXCLUSION}, 0, 0, FLAGS, "mode" },\
119  { "hardlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_HARDLIGHT}, 0, 0, FLAGS, "mode" },\
120  { "lighten", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_LIGHTEN}, 0, 0, FLAGS, "mode" },\
121  { "multiply", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_MULTIPLY}, 0, 0, FLAGS, "mode" },\
122  { "negation", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NEGATION}, 0, 0, FLAGS, "mode" },\
123  { "normal", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_NORMAL}, 0, 0, FLAGS, "mode" },\
124  { "or", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OR}, 0, 0, FLAGS, "mode" },\
125  { "overlay", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_OVERLAY}, 0, 0, FLAGS, "mode" },\
126  { "phoenix", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PHOENIX}, 0, 0, FLAGS, "mode" },\
127  { "pinlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_PINLIGHT}, 0, 0, FLAGS, "mode" },\
128  { "reflect", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_REFLECT}, 0, 0, FLAGS, "mode" },\
129  { "screen", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SCREEN}, 0, 0, FLAGS, "mode" },\
130  { "softlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SOFTLIGHT}, 0, 0, FLAGS, "mode" },\
131  { "subtract", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_SUBTRACT}, 0, 0, FLAGS, "mode" },\
132  { "vividlight", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_VIVIDLIGHT}, 0, 0, FLAGS, "mode" },\
133  { "xor", "", 0, AV_OPT_TYPE_CONST, {.i64=BLEND_XOR}, 0, 0, FLAGS, "mode" },\
134  { "c0_expr", "set color component #0 expression", OFFSET(params[0].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
135  { "c1_expr", "set color component #1 expression", OFFSET(params[1].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
136  { "c2_expr", "set color component #2 expression", OFFSET(params[2].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
137  { "c3_expr", "set color component #3 expression", OFFSET(params[3].expr_str), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
138  { "all_expr", "set expression for all color components", OFFSET(all_expr), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },\
139  { "c0_opacity", "set color component #0 opacity", OFFSET(params[0].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
140  { "c1_opacity", "set color component #1 opacity", OFFSET(params[1].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
141  { "c2_opacity", "set color component #2 opacity", OFFSET(params[2].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
142  { "c3_opacity", "set color component #3 opacity", OFFSET(params[3].opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },\
143  { "all_opacity", "set opacity for all color components", OFFSET(all_opacity), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS}
144 
145 #define OFFSET(x) offsetof(BlendContext, x)
146 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
147 
148 static const AVOption blend_options[] = {
150  { "shortest", "force termination when the shortest input terminates", OFFSET(dinput.shortest), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS },
151  { "repeatlast", "repeat last bottom frame", OFFSET(dinput.repeatlast), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS },
152  { NULL }
153 };
154 
155 AVFILTER_DEFINE_CLASS(blend);
156 
157 static void blend_normal(const uint8_t *top, int top_linesize,
158  const uint8_t *bottom, int bottom_linesize,
159  uint8_t *dst, int dst_linesize,
160  int width, int start, int end,
161  FilterParams *param, double *values)
162 {
163  av_image_copy_plane(dst, dst_linesize, top, top_linesize, width, end - start);
164 }
165 
166 #define DEFINE_BLEND(name, expr) \
167 static void blend_## name(const uint8_t *top, int top_linesize, \
168  const uint8_t *bottom, int bottom_linesize, \
169  uint8_t *dst, int dst_linesize, \
170  int width, int start, int end, \
171  FilterParams *param, double *values) \
172 { \
173  double opacity = param->opacity; \
174  int i, j; \
175  \
176  for (i = start; i < end; i++) { \
177  for (j = 0; j < width; j++) { \
178  dst[j] = top[j] + ((expr) - top[j]) * opacity; \
179  } \
180  dst += dst_linesize; \
181  top += top_linesize; \
182  bottom += bottom_linesize; \
183  } \
184 }
185 
186 #define A top[j]
187 #define B bottom[j]
188 
189 #define MULTIPLY(x, a, b) ((x) * (((a) * (b)) / 255))
190 #define SCREEN(x, a, b) (255 - (x) * ((255 - (a)) * (255 - (b)) / 255))
191 #define BURN(a, b) (((a) == 0) ? (a) : FFMAX(0, 255 - ((255 - (b)) << 8) / (a)))
192 #define DODGE(a, b) (((a) == 255) ? (a) : FFMIN(255, (((b) << 8) / (255 - (a)))))
193 
194 DEFINE_BLEND(addition, FFMIN(255, A + B))
195 DEFINE_BLEND(average, (A + B) / 2)
196 DEFINE_BLEND(subtract, FFMAX(0, A - B))
197 DEFINE_BLEND(multiply, MULTIPLY(1, A, B))
198 DEFINE_BLEND(negation, 255 - FFABS(255 - A - B))
199 DEFINE_BLEND(difference, FFABS(A - B))
200 DEFINE_BLEND(difference128, av_clip_uint8(128 + A - B))
201 DEFINE_BLEND(screen, SCREEN(1, A, B))
202 DEFINE_BLEND(overlay, (A < 128) ? MULTIPLY(2, A, B) : SCREEN(2, A, B))
203 DEFINE_BLEND(hardlight, (B < 128) ? MULTIPLY(2, B, A) : SCREEN(2, B, A))
204 DEFINE_BLEND(darken, FFMIN(A, B))
205 DEFINE_BLEND(lighten, FFMAX(A, B))
206 DEFINE_BLEND(divide, ((float)A / ((float)B) * 255))
207 DEFINE_BLEND(dodge, DODGE(A, B))
208 DEFINE_BLEND(burn, BURN(A, B))
209 DEFINE_BLEND(softlight, (A > 127) ? B + (255 - B) * (A - 127.5) / 127.5 * (0.5 - FFABS(B - 127.5) / 255): B - B * ((127.5 - A) / 127.5) * (0.5 - FFABS(B - 127.5)/255))
210 DEFINE_BLEND(exclusion, A + B - 2 * A * B / 255)
211 DEFINE_BLEND(pinlight, (B < 128) ? FFMIN(A, 2 * B) : FFMAX(A, 2 * (B - 128)))
212 DEFINE_BLEND(phoenix, FFMIN(A, B) - FFMAX(A, B) + 255)
213 DEFINE_BLEND(reflect, (B == 255) ? B : FFMIN(255, (A * A / (255 - B))))
214 DEFINE_BLEND(and, A & B)
215 DEFINE_BLEND(or, A | B)
216 DEFINE_BLEND(xor, A ^ B)
217 DEFINE_BLEND(vividlight, (B < 128) ? BURN(A, 2 * B) : DODGE(A, 2 * (B - 128)))
218 
219 static void blend_expr(const uint8_t *top, int top_linesize,
220  const uint8_t *bottom, int bottom_linesize,
221  uint8_t *dst, int dst_linesize,
222  int width, int start, int end,
223  FilterParams *param, double *values)
224 {
225  AVExpr *e = param->e;
226  int y, x;
227 
228  for (y = start; y < end; y++) {
229  values[VAR_Y] = y;
230  for (x = 0; x < width; x++) {
231  values[VAR_X] = x;
232  values[VAR_TOP] = values[VAR_A] = top[x];
233  values[VAR_BOTTOM] = values[VAR_B] = bottom[x];
234  dst[x] = av_expr_eval(e, values, NULL);
235  }
236  dst += dst_linesize;
237  top += top_linesize;
238  bottom += bottom_linesize;
239  }
240 }
241 
242 static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
243 {
244  ThreadData *td = arg;
245  int slice_start = (td->h * jobnr ) / nb_jobs;
246  int slice_end = (td->h * (jobnr+1)) / nb_jobs;
247  const uint8_t *top = td->top->data[td->plane];
248  const uint8_t *bottom = td->bottom->data[td->plane];
249  uint8_t *dst = td->dst->data[td->plane];
250  double values[VAR_VARS_NB];
251 
252  values[VAR_N] = td->inlink->frame_count;
253  values[VAR_T] = td->dst->pts == AV_NOPTS_VALUE ? NAN : td->dst->pts * av_q2d(td->inlink->time_base);
254  values[VAR_W] = td->w;
255  values[VAR_H] = td->h;
256  values[VAR_SW] = td->w / (double)td->dst->width;
257  values[VAR_SH] = td->h / (double)td->dst->height;
258 
259  td->param->blend(top + slice_start * td->top->linesize[td->plane],
260  td->top->linesize[td->plane],
261  bottom + slice_start * td->bottom->linesize[td->plane],
262  td->bottom->linesize[td->plane],
263  dst + slice_start * td->dst->linesize[td->plane],
264  td->dst->linesize[td->plane],
265  td->w, slice_start, slice_end, td->param, &values[0]);
266  return 0;
267 }
268 
270  const AVFrame *bottom_buf)
271 {
272  BlendContext *b = ctx->priv;
273  AVFilterLink *inlink = ctx->inputs[0];
274  AVFilterLink *outlink = ctx->outputs[0];
275  AVFrame *dst_buf;
276  int plane;
277 
278  dst_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
279  if (!dst_buf)
280  return top_buf;
281  av_frame_copy_props(dst_buf, top_buf);
282 
283  for (plane = 0; plane < b->nb_planes; plane++) {
284  int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
285  int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
286  int outw = FF_CEIL_RSHIFT(dst_buf->width, hsub);
287  int outh = FF_CEIL_RSHIFT(dst_buf->height, vsub);
288  FilterParams *param = &b->params[plane];
289  ThreadData td = { .top = top_buf, .bottom = bottom_buf, .dst = dst_buf,
290  .w = outw, .h = outh, .param = param, .plane = plane,
291  .inlink = inlink };
292 
293  ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outh, ctx->graph->nb_threads));
294  }
295 
296  if (!b->tblend)
297  av_frame_free(&top_buf);
298 
299  return dst_buf;
300 }
301 
302 static av_cold int init(AVFilterContext *ctx)
303 {
304  BlendContext *b = ctx->priv;
305  int ret, plane;
306 
307  b->tblend = !strcmp(ctx->filter->name, "tblend");
308 
309  for (plane = 0; plane < FF_ARRAY_ELEMS(b->params); plane++) {
310  FilterParams *param = &b->params[plane];
311 
312  if (b->all_mode >= 0)
313  param->mode = b->all_mode;
314  if (b->all_opacity < 1)
315  param->opacity = b->all_opacity;
316 
317  switch (param->mode) {
318  case BLEND_ADDITION: param->blend = blend_addition; break;
319  case BLEND_AND: param->blend = blend_and; break;
320  case BLEND_AVERAGE: param->blend = blend_average; break;
321  case BLEND_BURN: param->blend = blend_burn; break;
322  case BLEND_DARKEN: param->blend = blend_darken; break;
323  case BLEND_DIFFERENCE: param->blend = blend_difference; break;
324  case BLEND_DIFFERENCE128: param->blend = blend_difference128; break;
325  case BLEND_DIVIDE: param->blend = blend_divide; break;
326  case BLEND_DODGE: param->blend = blend_dodge; break;
327  case BLEND_EXCLUSION: param->blend = blend_exclusion; break;
328  case BLEND_HARDLIGHT: param->blend = blend_hardlight; break;
329  case BLEND_LIGHTEN: param->blend = blend_lighten; break;
330  case BLEND_MULTIPLY: param->blend = blend_multiply; break;
331  case BLEND_NEGATION: param->blend = blend_negation; break;
332  case BLEND_NORMAL: param->blend = blend_normal; break;
333  case BLEND_OR: param->blend = blend_or; break;
334  case BLEND_OVERLAY: param->blend = blend_overlay; break;
335  case BLEND_PHOENIX: param->blend = blend_phoenix; break;
336  case BLEND_PINLIGHT: param->blend = blend_pinlight; break;
337  case BLEND_REFLECT: param->blend = blend_reflect; break;
338  case BLEND_SCREEN: param->blend = blend_screen; break;
339  case BLEND_SOFTLIGHT: param->blend = blend_softlight; break;
340  case BLEND_SUBTRACT: param->blend = blend_subtract; break;
341  case BLEND_VIVIDLIGHT: param->blend = blend_vividlight; break;
342  case BLEND_XOR: param->blend = blend_xor; break;
343  }
344 
345  if (b->all_expr && !param->expr_str) {
346  param->expr_str = av_strdup(b->all_expr);
347  if (!param->expr_str)
348  return AVERROR(ENOMEM);
349  }
350  if (param->expr_str) {
351  ret = av_expr_parse(&param->e, param->expr_str, var_names,
352  NULL, NULL, NULL, NULL, 0, ctx);
353  if (ret < 0)
354  return ret;
355  param->blend = blend_expr;
356  }
357  }
358 
360  return 0;
361 }
362 
364 {
365  static const enum AVPixelFormat pix_fmts[] = {
370  };
371 
373  return 0;
374 }
375 
376 static av_cold void uninit(AVFilterContext *ctx)
377 {
378  BlendContext *b = ctx->priv;
379  int i;
380 
383 
384  for (i = 0; i < FF_ARRAY_ELEMS(b->params); i++)
385  av_expr_free(b->params[i].e);
386 }
387 
388 #if CONFIG_BLEND_FILTER
389 
390 static int config_output(AVFilterLink *outlink)
391 {
392  AVFilterContext *ctx = outlink->src;
393  AVFilterLink *toplink = ctx->inputs[TOP];
394  AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
395  BlendContext *b = ctx->priv;
396  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
397  int ret;
398 
399  if (toplink->format != bottomlink->format) {
400  av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
401  return AVERROR(EINVAL);
402  }
403  if (toplink->w != bottomlink->w ||
404  toplink->h != bottomlink->h ||
405  toplink->sample_aspect_ratio.num != bottomlink->sample_aspect_ratio.num ||
406  toplink->sample_aspect_ratio.den != bottomlink->sample_aspect_ratio.den) {
407  av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
408  "(size %dx%d, SAR %d:%d) do not match the corresponding "
409  "second input link %s parameters (%dx%d, SAR %d:%d)\n",
410  ctx->input_pads[TOP].name, toplink->w, toplink->h,
411  toplink->sample_aspect_ratio.num,
412  toplink->sample_aspect_ratio.den,
413  ctx->input_pads[BOTTOM].name, bottomlink->w, bottomlink->h,
414  bottomlink->sample_aspect_ratio.num,
415  bottomlink->sample_aspect_ratio.den);
416  return AVERROR(EINVAL);
417  }
418 
419  outlink->w = toplink->w;
420  outlink->h = toplink->h;
421  outlink->time_base = toplink->time_base;
422  outlink->sample_aspect_ratio = toplink->sample_aspect_ratio;
423  outlink->frame_rate = toplink->frame_rate;
424 
425  b->hsub = pix_desc->log2_chroma_w;
426  b->vsub = pix_desc->log2_chroma_h;
428 
429  if ((ret = ff_dualinput_init(ctx, &b->dinput)) < 0)
430  return ret;
431 
432  return 0;
433 }
434 
435 static int request_frame(AVFilterLink *outlink)
436 {
437  BlendContext *b = outlink->src->priv;
438  return ff_dualinput_request_frame(&b->dinput, outlink);
439 }
440 
441 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
442 {
443  BlendContext *b = inlink->dst->priv;
444  return ff_dualinput_filter_frame(&b->dinput, inlink, buf);
445 }
446 
447 static const AVFilterPad blend_inputs[] = {
448  {
449  .name = "top",
450  .type = AVMEDIA_TYPE_VIDEO,
451  .filter_frame = filter_frame,
452  },{
453  .name = "bottom",
454  .type = AVMEDIA_TYPE_VIDEO,
455  .filter_frame = filter_frame,
456  },
457  { NULL }
458 };
459 
460 static const AVFilterPad blend_outputs[] = {
461  {
462  .name = "default",
463  .type = AVMEDIA_TYPE_VIDEO,
464  .config_props = config_output,
465  .request_frame = request_frame,
466  },
467  { NULL }
468 };
469 
470 AVFilter ff_vf_blend = {
471  .name = "blend",
472  .description = NULL_IF_CONFIG_SMALL("Blend two video frames into each other."),
473  .init = init,
474  .uninit = uninit,
475  .priv_size = sizeof(BlendContext),
477  .inputs = blend_inputs,
478  .outputs = blend_outputs,
479  .priv_class = &blend_class,
481 };
482 
483 #endif
484 
485 #if CONFIG_TBLEND_FILTER
486 
487 static int tblend_config_output(AVFilterLink *outlink)
488 {
489  AVFilterContext *ctx = outlink->src;
490  AVFilterLink *inlink = ctx->inputs[0];
491  BlendContext *b = ctx->priv;
492  const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
493 
494  b->hsub = pix_desc->log2_chroma_w;
495  b->vsub = pix_desc->log2_chroma_h;
497  outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
498 
499  return 0;
500 }
501 
502 static int tblend_filter_frame(AVFilterLink *inlink, AVFrame *frame)
503 {
504  BlendContext *b = inlink->dst->priv;
505  AVFilterLink *outlink = inlink->dst->outputs[0];
506 
507  if (b->prev_frame) {
508  AVFrame *out = blend_frame(inlink->dst, frame, b->prev_frame);
510  b->prev_frame = frame;
511  return ff_filter_frame(outlink, out);
512  }
513  b->prev_frame = frame;
514  return 0;
515 }
516 
517 static const AVOption tblend_options[] = {
519  { NULL }
520 };
521 
522 AVFILTER_DEFINE_CLASS(tblend);
523 
524 static const AVFilterPad tblend_inputs[] = {
525  {
526  .name = "default",
527  .type = AVMEDIA_TYPE_VIDEO,
528  .filter_frame = tblend_filter_frame,
529  },
530  { NULL }
531 };
532 
533 static const AVFilterPad tblend_outputs[] = {
534  {
535  .name = "default",
536  .type = AVMEDIA_TYPE_VIDEO,
537  .config_props = tblend_config_output,
538  },
539  { NULL }
540 };
541 
542 AVFilter ff_vf_tblend = {
543  .name = "tblend",
544  .description = NULL_IF_CONFIG_SMALL("Blend successive frames."),
545  .priv_size = sizeof(BlendContext),
546  .priv_class = &tblend_class,
548  .init = init,
549  .uninit = uninit,
550  .inputs = tblend_inputs,
551  .outputs = tblend_outputs,
553 };
554 
555 #endif
FFDualInputContext dinput
Definition: vf_blend.c:91
static AVFrame * blend_frame(AVFilterContext *ctx, AVFrame *top_buf, const AVFrame *bottom_buf)
Definition: vf_blend.c:269
AVExpr * e
Definition: vf_blend.c:71
static const AVOption blend_options[]
Definition: vf_blend.c:148
#define NULL
Definition: coverity.c:32
static const char *const var_names[]
Definition: vf_blend.c:65
Definition: vf_blend.c:66
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2029
This structure describes decoded (raw) audio or video data.
Definition: frame.h:163
double opacity
Definition: vf_blend.c:70
AVOption.
Definition: opt.h:255
AVFILTER_DEFINE_CLASS(blend)
enum BlendMode all_mode
Definition: vf_blend.c:95
void(* blend)(const uint8_t *top, int top_linesize, const uint8_t *bottom, int bottom_linesize, uint8_t *dst, int dst_linesize, int width, int start, int end, struct FilterParams *param, double *values)
Definition: vf_blend.c:73
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
misc image utilities
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:246
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2069
Main libavfilter public API header.
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:181
int num
numerator
Definition: rational.h:44
const char * b
Definition: vf_curves.c:109
#define DODGE(a, b)
Definition: vf_blend.c:192
#define FF_ARRAY_ELEMS(a)
#define BURN(a, b)
Definition: vf_blend.c:191
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:652
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:109
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_blend.c:376
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
Definition: vf_blend.c:66
int tblend
Definition: vf_blend.c:99
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:294
struct AVFilterGraph * graph
filtergraph this filter belongs to
Definition: avfilter.h:656
const char * name
Pad name.
Definition: internal.h:67
int ff_dualinput_filter_frame(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in)
Definition: dualinput.c:69
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1145
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
uint8_t
char * expr_str
Definition: vf_blend.c:72
#define av_cold
Definition: attributes.h:74
AVOptions.
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:249
const AVFrame * bottom
Definition: vf_blend.c:81
Definition: eval.c:143
#define OFFSET(x)
Definition: vf_blend.c:145
static AVFrame * frame
enum BlendMode mode
Definition: vf_blend.c:69
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
int plane
Definition: vf_blend.c:84
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range ...
Definition: pixfmt.h:107
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:1203
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:81
#define BOTTOM
Definition: vf_blend.c:33
void ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:539
FilterParams params[4]
Definition: vf_blend.c:98
void ff_dualinput_uninit(FFDualInputContext *s)
Definition: dualinput.c:80
#define av_log(a,...)
AVFrame * prev_frame
Definition: vf_blend.c:100
A filter pad used for either input or output.
Definition: internal.h:61
AVFilterPad * input_pads
array of input pads
Definition: avfilter.h:640
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:267
int width
width and height of the video frame
Definition: frame.h:212
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:175
#define td
Definition: regdef.h:70
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
Frame requests may need to loop in order to be fulfilled.
Definition: internal.h:359
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:180
void * priv
private data for use by the filter
Definition: avfilter.h:654
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:442
static int config_output(AVFilterLink *outlink)
Definition: af_aecho.c:228
const char * arg
Definition: jacosubdec.c:66
GLenum GLint * params
Definition: opengl_enc.c:114
#define FLAGS
Definition: vf_blend.c:146
#define FFMAX(a, b)
Definition: common.h:79
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
#define TOP
Definition: vf_blend.c:32
BlendMode
Definition: vf_blend.c:35
static SDL_Surface * screen
Definition: ffplay.c:354
static int request_frame(AVFilterLink *outlink)
Definition: aeval.c:257
#define FFMIN(a, b)
Definition: common.h:81
float y
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
ret
Definition: avfilter.c:974
#define FF_CEIL_RSHIFT(a, b)
Definition: common.h:57
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: aeval.c:398
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
static av_cold int init(AVFilterContext *ctx)
Definition: vf_blend.c:302
#define SCREEN(x, a, b)
Definition: vf_blend.c:190
#define B
Definition: vf_blend.c:187
Definition: vf_blend.c:66
Definition: vf_blend.c:66
AVFrame *(* process)(AVFilterContext *ctx, AVFrame *main, const AVFrame *second)
Definition: dualinput.h:36
FilterParams * param
Definition: vf_blend.c:86
Definition: vf_blend.c:66
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:312
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:265
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:266
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
#define COMMON_OPTIONS
Definition: vf_blend.c:103
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
void * buf
Definition: avisynth_c.h:595
filter data
Definition: mlp.h:74
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:74
Describe the class of an AVClass context structure.
Definition: log.h:66
Filter definition.
Definition: avfilter.h:470
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:237
const char * name
Filter name.
Definition: avfilter.h:474
Definition: vf_blend.c:66
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:459
int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
Definition: dualinput.c:43
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:648
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
Definition: avfilter.h:679
static int flags
Definition: cpu.c:47
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
static void blend_normal(const uint8_t *top, int top_linesize, const uint8_t *bottom, int bottom_linesize, uint8_t *dst, int dst_linesize, int width, int start, int end, FilterParams *param, double *values)
Definition: vf_blend.c:157
int nb_planes
Definition: vf_blend.c:93
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
char * all_expr
Definition: vf_blend.c:94
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Y , 8bpp.
Definition: pixfmt.h:76
int vsub
chroma subsampling values
Definition: vf_blend.c:92
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:285
double all_opacity
Definition: vf_blend.c:96
#define A
Definition: vf_blend.c:186
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:75
int den
denominator
Definition: rational.h:45
avfilter_execute_func * execute
Definition: internal.h:162
const AVFrame * top
Definition: vf_blend.c:81
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2062
pixel format definitions
#define NAN
Definition: math.h:28
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:703
Double input streams helper for filters.
static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_blend.c:242
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:288
AVFrame * dst
Definition: vf_blend.c:82
An instance of a filter.
Definition: avfilter.h:633
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
Definition: dualinput.c:75
int height
Definition: frame.h:212
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
void INT64 start
Definition: avisynth_c.h:595
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:273
AVFilterLink * inlink
Definition: vf_blend.c:83
Definition: vf_blend.c:66
internal API functions
#define MULTIPLY(x, a, b)
Definition: vf_blend.c:189
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:636
static int query_formats(AVFilterContext *ctx)
Definition: vf_blend.c:363
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:463
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:241
#define DEFINE_BLEND(name, expr)
Definition: vf_blend.c:166
Definition: vf_blend.c:66
simple arithmetic expression evaluator
static int width