FFmpeg  2.7.2
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_ISATTY
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 #endif
43 
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
47 #include "libavutil/opt.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
63 #include "libavcodec/mathops.h"
64 #include "libavformat/os_support.h"
65 
66 # include "libavfilter/avcodec.h"
67 # include "libavfilter/avfilter.h"
68 # include "libavfilter/buffersrc.h"
69 # include "libavfilter/buffersink.h"
70 
71 #if HAVE_SYS_RESOURCE_H
72 #include <sys/time.h>
73 #include <sys/types.h>
74 #include <sys/resource.h>
75 #elif HAVE_GETPROCESSTIMES
76 #include <windows.h>
77 #endif
78 #if HAVE_GETPROCESSMEMORYINFO
79 #include <windows.h>
80 #include <psapi.h>
81 #endif
82 
83 #if HAVE_SYS_SELECT_H
84 #include <sys/select.h>
85 #endif
86 
87 #if HAVE_TERMIOS_H
88 #include <fcntl.h>
89 #include <sys/ioctl.h>
90 #include <sys/time.h>
91 #include <termios.h>
92 #elif HAVE_KBHIT
93 #include <conio.h>
94 #endif
95 
96 #if HAVE_PTHREADS
97 #include <pthread.h>
98 #endif
99 
100 #include <time.h>
101 
102 #include "ffmpeg.h"
103 #include "cmdutils.h"
104 
105 #include "libavutil/avassert.h"
106 
107 const char program_name[] = "ffmpeg";
108 const int program_birth_year = 2000;
109 
110 static FILE *vstats_file;
111 
112 const char *const forced_keyframes_const_names[] = {
113  "n",
114  "n_forced",
115  "prev_forced_n",
116  "prev_forced_t",
117  "t",
118  NULL
119 };
120 
121 static void do_video_stats(OutputStream *ost, int frame_size);
122 static int64_t getutime(void);
123 static int64_t getmaxrss(void);
124 
125 static int run_as_daemon = 0;
126 static int nb_frames_dup = 0;
127 static int nb_frames_drop = 0;
128 static int64_t decode_error_stat[2];
129 
130 static int current_time;
132 
134 
135 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
136 
141 
146 
149 
150 #if HAVE_TERMIOS_H
151 
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156 
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160 
161 /* sub2video hack:
162  Convert subtitles to video with alpha to insert them in filter graphs.
163  This is a temporary solution until libavfilter gets real subtitles support.
164  */
165 
167 {
168  int ret;
169  AVFrame *frame = ist->sub2video.frame;
170 
171  av_frame_unref(frame);
172  ist->sub2video.frame->width = ist->sub2video.w;
173  ist->sub2video.frame->height = ist->sub2video.h;
175  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176  return ret;
177  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178  return 0;
179 }
180 
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182  AVSubtitleRect *r)
183 {
184  uint32_t *pal, *dst2;
185  uint8_t *src, *src2;
186  int x, y;
187 
188  if (r->type != SUBTITLE_BITMAP) {
189  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190  return;
191  }
192  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
194  return;
195  }
196 
197  dst += r->y * dst_linesize + r->x * 4;
198  src = r->pict.data[0];
199  pal = (uint32_t *)r->pict.data[1];
200  for (y = 0; y < r->h; y++) {
201  dst2 = (uint32_t *)dst;
202  src2 = src;
203  for (x = 0; x < r->w; x++)
204  *(dst2++) = pal[*(src2++)];
205  dst += dst_linesize;
206  src += r->pict.linesize[0];
207  }
208 }
209 
210 static void sub2video_push_ref(InputStream *ist, int64_t pts)
211 {
212  AVFrame *frame = ist->sub2video.frame;
213  int i;
214 
215  av_assert1(frame->data[0]);
216  ist->sub2video.last_pts = frame->pts = pts;
217  for (i = 0; i < ist->nb_filters; i++)
221 }
222 
223 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
224 {
225  int w = ist->sub2video.w, h = ist->sub2video.h;
226  AVFrame *frame = ist->sub2video.frame;
227  int8_t *dst;
228  int dst_linesize;
229  int num_rects, i;
230  int64_t pts, end_pts;
231 
232  if (!frame)
233  return;
234  if (sub) {
235  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
236  AV_TIME_BASE_Q, ist->st->time_base);
237  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
238  AV_TIME_BASE_Q, ist->st->time_base);
239  num_rects = sub->num_rects;
240  } else {
241  pts = ist->sub2video.end_pts;
242  end_pts = INT64_MAX;
243  num_rects = 0;
244  }
245  if (sub2video_get_blank_frame(ist) < 0) {
247  "Impossible to get a blank canvas.\n");
248  return;
249  }
250  dst = frame->data [0];
251  dst_linesize = frame->linesize[0];
252  for (i = 0; i < num_rects; i++)
253  sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
254  sub2video_push_ref(ist, pts);
255  ist->sub2video.end_pts = end_pts;
256 }
257 
258 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
259 {
260  InputFile *infile = input_files[ist->file_index];
261  int i, j, nb_reqs;
262  int64_t pts2;
263 
264  /* When a frame is read from a file, examine all sub2video streams in
265  the same file and send the sub2video frame again. Otherwise, decoded
266  video frames could be accumulating in the filter graph while a filter
267  (possibly overlay) is desperately waiting for a subtitle frame. */
268  for (i = 0; i < infile->nb_streams; i++) {
269  InputStream *ist2 = input_streams[infile->ist_index + i];
270  if (!ist2->sub2video.frame)
271  continue;
272  /* subtitles seem to be usually muxed ahead of other streams;
273  if not, subtracting a larger time here is necessary */
274  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
275  /* do not send the heartbeat frame if the subtitle is already ahead */
276  if (pts2 <= ist2->sub2video.last_pts)
277  continue;
278  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
279  sub2video_update(ist2, NULL);
280  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
281  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
282  if (nb_reqs)
283  sub2video_push_ref(ist2, pts2);
284  }
285 }
286 
287 static void sub2video_flush(InputStream *ist)
288 {
289  int i;
290 
291  if (ist->sub2video.end_pts < INT64_MAX)
292  sub2video_update(ist, NULL);
293  for (i = 0; i < ist->nb_filters; i++)
294  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
295 }
296 
297 /* end of sub2video hack */
298 
299 static void term_exit_sigsafe(void)
300 {
301 #if HAVE_TERMIOS_H
302  if(restore_tty)
303  tcsetattr (0, TCSANOW, &oldtty);
304 #endif
305 }
306 
307 void term_exit(void)
308 {
309  av_log(NULL, AV_LOG_QUIET, "%s", "");
311 }
312 
313 static volatile int received_sigterm = 0;
314 static volatile int received_nb_signals = 0;
315 static volatile int transcode_init_done = 0;
316 static int main_return_code = 0;
317 
318 static void
320 {
321  received_sigterm = sig;
324  if(received_nb_signals > 3)
325  exit(123);
326 }
327 
328 void term_init(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(!run_as_daemon){
332  struct termios tty;
333  int istty = 1;
334 #if HAVE_ISATTY
335  istty = isatty(0) && isatty(2);
336 #endif
337  if (istty && tcgetattr (0, &tty) == 0) {
338  oldtty = tty;
339  restore_tty = 1;
340 
341  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
342  |INLCR|IGNCR|ICRNL|IXON);
343  tty.c_oflag |= OPOST;
344  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
345  tty.c_cflag &= ~(CSIZE|PARENB);
346  tty.c_cflag |= CS8;
347  tty.c_cc[VMIN] = 1;
348  tty.c_cc[VTIME] = 0;
349 
350  tcsetattr (0, TCSANOW, &tty);
351  }
352  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
353  }
354 #endif
355 
356  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
357  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
358 #ifdef SIGXCPU
359  signal(SIGXCPU, sigterm_handler);
360 #endif
361 }
362 
363 /* read a key without blocking */
364 static int read_key(void)
365 {
366  unsigned char ch;
367 #if HAVE_TERMIOS_H
368  int n = 1;
369  struct timeval tv;
370  fd_set rfds;
371 
372  FD_ZERO(&rfds);
373  FD_SET(0, &rfds);
374  tv.tv_sec = 0;
375  tv.tv_usec = 0;
376  n = select(1, &rfds, NULL, NULL, &tv);
377  if (n > 0) {
378  n = read(0, &ch, 1);
379  if (n == 1)
380  return ch;
381 
382  return n;
383  }
384 #elif HAVE_KBHIT
385 # if HAVE_PEEKNAMEDPIPE
386  static int is_pipe;
387  static HANDLE input_handle;
388  DWORD dw, nchars;
389  if(!input_handle){
390  input_handle = GetStdHandle(STD_INPUT_HANDLE);
391  is_pipe = !GetConsoleMode(input_handle, &dw);
392  }
393 
394  if (stdin->_cnt > 0) {
395  read(0, &ch, 1);
396  return ch;
397  }
398  if (is_pipe) {
399  /* When running under a GUI, you will end here. */
400  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
401  // input pipe may have been closed by the program that ran ffmpeg
402  return -1;
403  }
404  //Read it
405  if(nchars != 0) {
406  read(0, &ch, 1);
407  return ch;
408  }else{
409  return -1;
410  }
411  }
412 # endif
413  if(kbhit())
414  return(getch());
415 #endif
416  return -1;
417 }
418 
419 static int decode_interrupt_cb(void *ctx)
420 {
422 }
423 
425 
426 static void ffmpeg_cleanup(int ret)
427 {
428  int i, j;
429 
430  if (do_benchmark) {
431  int maxrss = getmaxrss() / 1024;
432  printf("bench: maxrss=%ikB\n", maxrss);
433  }
434 
435  for (i = 0; i < nb_filtergraphs; i++) {
436  FilterGraph *fg = filtergraphs[i];
438  for (j = 0; j < fg->nb_inputs; j++) {
439  av_freep(&fg->inputs[j]->name);
440  av_freep(&fg->inputs[j]);
441  }
442  av_freep(&fg->inputs);
443  for (j = 0; j < fg->nb_outputs; j++) {
444  av_freep(&fg->outputs[j]->name);
445  av_freep(&fg->outputs[j]);
446  }
447  av_freep(&fg->outputs);
448  av_freep(&fg->graph_desc);
449 
450  av_freep(&filtergraphs[i]);
451  }
452  av_freep(&filtergraphs);
453 
455 
456  /* close files */
457  for (i = 0; i < nb_output_files; i++) {
458  OutputFile *of = output_files[i];
460  if (!of)
461  continue;
462  s = of->ctx;
463  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
464  avio_closep(&s->pb);
466  av_dict_free(&of->opts);
467 
468  av_freep(&output_files[i]);
469  }
470  for (i = 0; i < nb_output_streams; i++) {
471  OutputStream *ost = output_streams[i];
473 
474  if (!ost)
475  continue;
476 
477  bsfc = ost->bitstream_filters;
478  while (bsfc) {
479  AVBitStreamFilterContext *next = bsfc->next;
481  bsfc = next;
482  }
483  ost->bitstream_filters = NULL;
485  av_frame_free(&ost->last_frame);
486 
487  av_parser_close(ost->parser);
488 
489  av_freep(&ost->forced_keyframes);
491  av_freep(&ost->avfilter);
492  av_freep(&ost->logfile_prefix);
493 
495  ost->audio_channels_mapped = 0;
496 
498 
499  av_freep(&output_streams[i]);
500  }
501 #if HAVE_PTHREADS
503 #endif
504  for (i = 0; i < nb_input_files; i++) {
505  avformat_close_input(&input_files[i]->ctx);
506  av_freep(&input_files[i]);
507  }
508  for (i = 0; i < nb_input_streams; i++) {
509  InputStream *ist = input_streams[i];
510 
513  av_dict_free(&ist->decoder_opts);
516  av_freep(&ist->filters);
517  av_freep(&ist->hwaccel_device);
518 
520 
521  av_freep(&input_streams[i]);
522  }
523 
524  if (vstats_file)
525  fclose(vstats_file);
527 
528  av_freep(&input_streams);
529  av_freep(&input_files);
530  av_freep(&output_streams);
531  av_freep(&output_files);
532 
533  uninit_opts();
534 
536 
537  if (received_sigterm) {
538  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
539  (int) received_sigterm);
540  } else if (ret && transcode_init_done) {
541  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
542  }
543  term_exit();
544 }
545 
547 {
548  AVDictionaryEntry *t = NULL;
549 
550  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
552  }
553 }
554 
556 {
558  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
559  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
560  exit_program(1);
561  }
562 }
563 
564 static void abort_codec_experimental(AVCodec *c, int encoder)
565 {
566  exit_program(1);
567 }
568 
569 static void update_benchmark(const char *fmt, ...)
570 {
571  if (do_benchmark_all) {
572  int64_t t = getutime();
573  va_list va;
574  char buf[1024];
575 
576  if (fmt) {
577  va_start(va, fmt);
578  vsnprintf(buf, sizeof(buf), fmt, va);
579  va_end(va);
580  printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
581  }
582  current_time = t;
583  }
584 }
585 
586 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
587 {
588  int i;
589  for (i = 0; i < nb_output_streams; i++) {
590  OutputStream *ost2 = output_streams[i];
591  ost2->finished |= ost == ost2 ? this_stream : others;
592  }
593 }
594 
596 {
598  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
599  int ret;
600 
601  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
603  if (ost->st->codec->extradata) {
604  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
606  }
607  }
608 
611  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
612 
613  /*
614  * Audio encoders may split the packets -- #frames in != #packets out.
615  * But there is no reordering, so we can limit the number of output packets
616  * by simply dropping them here.
617  * Counting encoded video frames needs to be done separately because of
618  * reordering, see do_video_out()
619  */
620  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
621  if (ost->frame_number >= ost->max_frames) {
622  av_free_packet(pkt);
623  return;
624  }
625  ost->frame_number++;
626  }
627 
628  if (bsfc)
630 
631  while (bsfc) {
632  AVPacket new_pkt = *pkt;
633  AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
634  bsfc->filter->name,
635  NULL, 0);
636  int a = av_bitstream_filter_filter(bsfc, avctx,
637  bsf_arg ? bsf_arg->value : NULL,
638  &new_pkt.data, &new_pkt.size,
639  pkt->data, pkt->size,
640  pkt->flags & AV_PKT_FLAG_KEY);
641  if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
642  uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
643  if(t) {
644  memcpy(t, new_pkt.data, new_pkt.size);
645  memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
646  new_pkt.data = t;
647  new_pkt.buf = NULL;
648  a = 1;
649  } else
650  a = AVERROR(ENOMEM);
651  }
652  if (a > 0) {
653  pkt->side_data = NULL;
654  pkt->side_data_elems = 0;
655  av_free_packet(pkt);
656  new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
657  av_buffer_default_free, NULL, 0);
658  if (!new_pkt.buf)
659  exit_program(1);
660  } else if (a < 0) {
661  new_pkt = *pkt;
662  av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
663  bsfc->filter->name, pkt->stream_index,
664  avctx->codec ? avctx->codec->name : "copy");
665  print_error("", a);
666  if (exit_on_error)
667  exit_program(1);
668  }
669  *pkt = new_pkt;
670 
671  bsfc = bsfc->next;
672  }
673 
674  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
675  if (pkt->dts != AV_NOPTS_VALUE &&
676  pkt->pts != AV_NOPTS_VALUE &&
677  pkt->dts > pkt->pts) {
678  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
679  pkt->dts, pkt->pts,
680  ost->file_index, ost->st->index);
681  pkt->pts =
682  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
683  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
684  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
685  }
686  if(
687  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
688  pkt->dts != AV_NOPTS_VALUE &&
689  ost->last_mux_dts != AV_NOPTS_VALUE) {
690  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
691  if (pkt->dts < max) {
692  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
693  av_log(s, loglevel, "Non-monotonous DTS in output stream "
694  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
695  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
696  if (exit_on_error) {
697  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
698  exit_program(1);
699  }
700  av_log(s, loglevel, "changing to %"PRId64". This may result "
701  "in incorrect timestamps in the output file.\n",
702  max);
703  if(pkt->pts >= pkt->dts)
704  pkt->pts = FFMAX(pkt->pts, max);
705  pkt->dts = max;
706  }
707  }
708  }
709  ost->last_mux_dts = pkt->dts;
710 
711  ost->data_size += pkt->size;
712  ost->packets_written++;
713 
714  pkt->stream_index = ost->index;
715 
716  if (debug_ts) {
717  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
718  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
720  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
721  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
722  pkt->size
723  );
724  }
725 
726  ret = av_interleaved_write_frame(s, pkt);
727  if (ret < 0) {
728  print_error("av_interleaved_write_frame()", ret);
729  main_return_code = 1;
731  }
732  av_free_packet(pkt);
733 }
734 
736 {
737  OutputFile *of = output_files[ost->file_index];
738 
739  ost->finished |= ENCODER_FINISHED;
740  if (of->shortest) {
741  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
742  of->recording_time = FFMIN(of->recording_time, end);
743  }
744 }
745 
747 {
748  OutputFile *of = output_files[ost->file_index];
749 
750  if (of->recording_time != INT64_MAX &&
752  AV_TIME_BASE_Q) >= 0) {
753  close_output_stream(ost);
754  return 0;
755  }
756  return 1;
757 }
758 
760  AVFrame *frame)
761 {
762  AVCodecContext *enc = ost->enc_ctx;
763  AVPacket pkt;
764  int got_packet = 0;
765 
766  av_init_packet(&pkt);
767  pkt.data = NULL;
768  pkt.size = 0;
769 
770  if (!check_recording_time(ost))
771  return;
772 
773  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
774  frame->pts = ost->sync_opts;
775  ost->sync_opts = frame->pts + frame->nb_samples;
776  ost->samples_encoded += frame->nb_samples;
777  ost->frames_encoded++;
778 
779  av_assert0(pkt.size || !pkt.data);
781  if (debug_ts) {
782  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
783  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
784  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
785  enc->time_base.num, enc->time_base.den);
786  }
787 
788  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
789  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
790  exit_program(1);
791  }
792  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
793 
794  if (got_packet) {
795  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
796 
797  if (debug_ts) {
798  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
799  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
800  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
801  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
802  }
803 
804  write_frame(s, &pkt, ost);
805  }
806 }
807 
809  OutputStream *ost,
810  InputStream *ist,
811  AVSubtitle *sub)
812 {
813  int subtitle_out_max_size = 1024 * 1024;
814  int subtitle_out_size, nb, i;
815  AVCodecContext *enc;
816  AVPacket pkt;
817  int64_t pts;
818 
819  if (sub->pts == AV_NOPTS_VALUE) {
820  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
821  if (exit_on_error)
822  exit_program(1);
823  return;
824  }
825 
826  enc = ost->enc_ctx;
827 
828  if (!subtitle_out) {
829  subtitle_out = av_malloc(subtitle_out_max_size);
830  if (!subtitle_out) {
831  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
832  exit_program(1);
833  }
834  }
835 
836  /* Note: DVB subtitle need one packet to draw them and one other
837  packet to clear them */
838  /* XXX: signal it in the codec context ? */
840  nb = 2;
841  else
842  nb = 1;
843 
844  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
845  pts = sub->pts;
846  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
847  pts -= output_files[ost->file_index]->start_time;
848  for (i = 0; i < nb; i++) {
849  unsigned save_num_rects = sub->num_rects;
850 
851  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
852  if (!check_recording_time(ost))
853  return;
854 
855  sub->pts = pts;
856  // start_display_time is required to be 0
857  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
859  sub->start_display_time = 0;
860  if (i == 1)
861  sub->num_rects = 0;
862 
863  ost->frames_encoded++;
864 
865  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
866  subtitle_out_max_size, sub);
867  if (i == 1)
868  sub->num_rects = save_num_rects;
869  if (subtitle_out_size < 0) {
870  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
871  exit_program(1);
872  }
873 
874  av_init_packet(&pkt);
875  pkt.data = subtitle_out;
876  pkt.size = subtitle_out_size;
877  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
878  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
879  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
880  /* XXX: the pts correction is handled here. Maybe handling
881  it in the codec would be better */
882  if (i == 0)
883  pkt.pts += 90 * sub->start_display_time;
884  else
885  pkt.pts += 90 * sub->end_display_time;
886  }
887  pkt.dts = pkt.pts;
888  write_frame(s, &pkt, ost);
889  }
890 }
891 
893  OutputStream *ost,
894  AVFrame *next_picture,
895  double sync_ipts)
896 {
897  int ret, format_video_sync;
898  AVPacket pkt;
899  AVCodecContext *enc = ost->enc_ctx;
900  AVCodecContext *mux_enc = ost->st->codec;
901  int nb_frames, nb0_frames, i;
902  double delta, delta0;
903  double duration = 0;
904  int frame_size = 0;
905  InputStream *ist = NULL;
907 
908  if (ost->source_index >= 0)
909  ist = input_streams[ost->source_index];
910 
911  if (filter->inputs[0]->frame_rate.num > 0 &&
912  filter->inputs[0]->frame_rate.den > 0)
913  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
914 
915  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
916  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
917 
918  if (!ost->filters_script &&
919  !ost->filters &&
920  next_picture &&
921  ist &&
922  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
923  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
924  }
925 
926  if (!next_picture) {
927  //end, flushing
928  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
929  ost->last_nb0_frames[1],
930  ost->last_nb0_frames[2]);
931  } else {
932  delta0 = sync_ipts - ost->sync_opts;
933  delta = delta0 + duration;
934 
935  /* by default, we output a single frame */
936  nb0_frames = 0;
937  nb_frames = 1;
938 
939  format_video_sync = video_sync_method;
940  if (format_video_sync == VSYNC_AUTO) {
941  if(!strcmp(s->oformat->name, "avi")) {
942  format_video_sync = VSYNC_VFR;
943  } else
945  if ( ist
946  && format_video_sync == VSYNC_CFR
947  && input_files[ist->file_index]->ctx->nb_streams == 1
948  && input_files[ist->file_index]->input_ts_offset == 0) {
949  format_video_sync = VSYNC_VSCFR;
950  }
951  if (format_video_sync == VSYNC_CFR && copy_ts) {
952  format_video_sync = VSYNC_VSCFR;
953  }
954  }
955 
956  if (delta0 < 0 &&
957  delta > 0 &&
958  format_video_sync != VSYNC_PASSTHROUGH &&
959  format_video_sync != VSYNC_DROP) {
960  double cor = FFMIN(-delta0, duration);
961  if (delta0 < -0.6) {
962  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
963  } else
964  av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
965  sync_ipts += cor;
966  duration -= cor;
967  delta0 += cor;
968  }
969 
970  switch (format_video_sync) {
971  case VSYNC_VSCFR:
972  if (ost->frame_number == 0 && delta - duration >= 0.5) {
973  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
974  delta = duration;
975  delta0 = 0;
976  ost->sync_opts = lrint(sync_ipts);
977  }
978  case VSYNC_CFR:
979  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
980  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
981  nb_frames = 0;
982  } else if (delta < -1.1)
983  nb_frames = 0;
984  else if (delta > 1.1) {
985  nb_frames = lrintf(delta);
986  if (delta0 > 1.1)
987  nb0_frames = lrintf(delta0 - 0.6);
988  }
989  break;
990  case VSYNC_VFR:
991  if (delta <= -0.6)
992  nb_frames = 0;
993  else if (delta > 0.6)
994  ost->sync_opts = lrint(sync_ipts);
995  break;
996  case VSYNC_DROP:
997  case VSYNC_PASSTHROUGH:
998  ost->sync_opts = lrint(sync_ipts);
999  break;
1000  default:
1001  av_assert0(0);
1002  }
1003  }
1004 
1005  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1006  nb0_frames = FFMIN(nb0_frames, nb_frames);
1007 
1008  memmove(ost->last_nb0_frames + 1,
1009  ost->last_nb0_frames,
1010  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1011  ost->last_nb0_frames[0] = nb0_frames;
1012 
1013  if (nb0_frames == 0 && ost->last_droped) {
1014  nb_frames_drop++;
1016  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1017  ost->frame_number, ost->st->index, ost->last_frame->pts);
1018  }
1019  if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1020  if (nb_frames > dts_error_threshold * 30) {
1021  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1022  nb_frames_drop++;
1023  return;
1024  }
1025  nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1026  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1027  }
1028  ost->last_droped = nb_frames == nb0_frames && next_picture;
1029 
1030  /* duplicates frame if needed */
1031  for (i = 0; i < nb_frames; i++) {
1032  AVFrame *in_picture;
1033  av_init_packet(&pkt);
1034  pkt.data = NULL;
1035  pkt.size = 0;
1036 
1037  if (i < nb0_frames && ost->last_frame) {
1038  in_picture = ost->last_frame;
1039  } else
1040  in_picture = next_picture;
1041 
1042  if (!in_picture)
1043  return;
1044 
1045  in_picture->pts = ost->sync_opts;
1046 
1047 #if 1
1048  if (!check_recording_time(ost))
1049 #else
1050  if (ost->frame_number >= ost->max_frames)
1051 #endif
1052  return;
1053 
1054  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1055  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1056  /* raw pictures are written as AVPicture structure to
1057  avoid any copies. We support temporarily the older
1058  method. */
1059  if (in_picture->interlaced_frame)
1060  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1061  else
1062  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1063  pkt.data = (uint8_t *)in_picture;
1064  pkt.size = sizeof(AVPicture);
1065  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1066  pkt.flags |= AV_PKT_FLAG_KEY;
1067 
1068  write_frame(s, &pkt, ost);
1069  } else {
1070  int got_packet, forced_keyframe = 0;
1071  double pts_time;
1072 
1074  ost->top_field_first >= 0)
1075  in_picture->top_field_first = !!ost->top_field_first;
1076 
1077  if (in_picture->interlaced_frame) {
1078  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1079  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1080  else
1081  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1082  } else
1083  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1084 
1085  in_picture->quality = enc->global_quality;
1086  in_picture->pict_type = 0;
1087 
1088  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1089  in_picture->pts * av_q2d(enc->time_base) : NAN;
1090  if (ost->forced_kf_index < ost->forced_kf_count &&
1091  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1092  ost->forced_kf_index++;
1093  forced_keyframe = 1;
1094  } else if (ost->forced_keyframes_pexpr) {
1095  double res;
1096  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1099  av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1105  res);
1106  if (res) {
1107  forced_keyframe = 1;
1113  }
1114 
1116  } else if ( ost->forced_keyframes
1117  && !strncmp(ost->forced_keyframes, "source", 6)
1118  && in_picture->key_frame==1) {
1119  forced_keyframe = 1;
1120  }
1121 
1122  if (forced_keyframe) {
1123  in_picture->pict_type = AV_PICTURE_TYPE_I;
1124  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1125  }
1126 
1128  if (debug_ts) {
1129  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1130  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1131  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1132  enc->time_base.num, enc->time_base.den);
1133  }
1134 
1135  ost->frames_encoded++;
1136 
1137  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1138  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1139  if (ret < 0) {
1140  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1141  exit_program(1);
1142  }
1143 
1144  if (got_packet) {
1145  if (debug_ts) {
1146  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1147  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1148  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1149  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1150  }
1151 
1152  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
1153  pkt.pts = ost->sync_opts;
1154 
1155  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1156 
1157  if (debug_ts) {
1158  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1159  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1160  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1161  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1162  }
1163 
1164  frame_size = pkt.size;
1165  write_frame(s, &pkt, ost);
1166 
1167  /* if two pass, output log */
1168  if (ost->logfile && enc->stats_out) {
1169  fprintf(ost->logfile, "%s", enc->stats_out);
1170  }
1171  }
1172  }
1173  ost->sync_opts++;
1174  /*
1175  * For video, number of frames in == number of packets out.
1176  * But there may be reordering, so we can't throw away frames on encoder
1177  * flush, we need to limit them here, before they go into encoder.
1178  */
1179  ost->frame_number++;
1180 
1181  if (vstats_filename && frame_size)
1182  do_video_stats(ost, frame_size);
1183  }
1184 
1185  if (!ost->last_frame)
1186  ost->last_frame = av_frame_alloc();
1187  av_frame_unref(ost->last_frame);
1188  if (next_picture && ost->last_frame)
1189  av_frame_ref(ost->last_frame, next_picture);
1190  else
1191  av_frame_free(&ost->last_frame);
1192 }
1193 
1194 static double psnr(double d)
1195 {
1196  return -10.0 * log(d) / log(10.0);
1197 }
1198 
1200 {
1201  AVCodecContext *enc;
1202  int frame_number;
1203  double ti1, bitrate, avg_bitrate;
1204 
1205  /* this is executed just the first time do_video_stats is called */
1206  if (!vstats_file) {
1207  vstats_file = fopen(vstats_filename, "w");
1208  if (!vstats_file) {
1209  perror("fopen");
1210  exit_program(1);
1211  }
1212  }
1213 
1214  enc = ost->enc_ctx;
1215  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1216  frame_number = ost->st->nb_frames;
1217  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
1218  if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
1219  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1220 
1221  fprintf(vstats_file,"f_size= %6d ", frame_size);
1222  /* compute pts value */
1223  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1224  if (ti1 < 0.01)
1225  ti1 = 0.01;
1226 
1227  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1228  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1229  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1230  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1231  fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1232  }
1233 }
1234 
1236 {
1237  OutputFile *of = output_files[ost->file_index];
1238  int i;
1239 
1241 
1242  if (of->shortest) {
1243  for (i = 0; i < of->ctx->nb_streams; i++)
1244  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1245  }
1246 }
1247 
1248 /**
1249  * Get and encode new output from any of the filtergraphs, without causing
1250  * activity.
1251  *
1252  * @return 0 for success, <0 for severe errors
1253  */
1254 static int reap_filters(int flush)
1255 {
1256  AVFrame *filtered_frame = NULL;
1257  int i;
1258 
1259  /* Reap all buffers present in the buffer sinks */
1260  for (i = 0; i < nb_output_streams; i++) {
1261  OutputStream *ost = output_streams[i];
1262  OutputFile *of = output_files[ost->file_index];
1264  AVCodecContext *enc = ost->enc_ctx;
1265  int ret = 0;
1266 
1267  if (!ost->filter)
1268  continue;
1269  filter = ost->filter->filter;
1270 
1271  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1272  return AVERROR(ENOMEM);
1273  }
1274  filtered_frame = ost->filtered_frame;
1275 
1276  while (1) {
1277  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1278  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1280  if (ret < 0) {
1281  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1283  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1284  } else if (flush && ret == AVERROR_EOF) {
1285  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1286  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1287  }
1288  break;
1289  }
1290  if (ost->finished) {
1291  av_frame_unref(filtered_frame);
1292  continue;
1293  }
1294  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1295  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1296  AVRational tb = enc->time_base;
1297  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1298 
1299  tb.den <<= extra_bits;
1300  float_pts =
1301  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1302  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1303  float_pts /= 1 << extra_bits;
1304  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1305  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1306 
1307  filtered_frame->pts =
1308  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1309  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1310  }
1311  //if (ost->source_index >= 0)
1312  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1313 
1314  switch (filter->inputs[0]->type) {
1315  case AVMEDIA_TYPE_VIDEO:
1316  if (!ost->frame_aspect_ratio.num)
1317  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1318 
1319  if (debug_ts) {
1320  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1321  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1322  float_pts,
1323  enc->time_base.num, enc->time_base.den);
1324  }
1325 
1326  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1327  break;
1328  case AVMEDIA_TYPE_AUDIO:
1329  if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1330  enc->channels != av_frame_get_channels(filtered_frame)) {
1332  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1333  break;
1334  }
1335  do_audio_out(of->ctx, ost, filtered_frame);
1336  break;
1337  default:
1338  // TODO support subtitle filters
1339  av_assert0(0);
1340  }
1341 
1342  av_frame_unref(filtered_frame);
1343  }
1344  }
1345 
1346  return 0;
1347 }
1348 
1349 static void print_final_stats(int64_t total_size)
1350 {
1351  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1352  uint64_t subtitle_size = 0;
1353  uint64_t data_size = 0;
1354  float percent = -1.0;
1355  int i, j;
1356  int pass1_used = 1;
1357 
1358  for (i = 0; i < nb_output_streams; i++) {
1359  OutputStream *ost = output_streams[i];
1360  switch (ost->enc_ctx->codec_type) {
1361  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1362  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1363  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1364  default: other_size += ost->data_size; break;
1365  }
1366  extra_size += ost->enc_ctx->extradata_size;
1367  data_size += ost->data_size;
1368  if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
1369  != CODEC_FLAG_PASS1)
1370  pass1_used = 0;
1371  }
1372 
1373  if (data_size && total_size>0 && total_size >= data_size)
1374  percent = 100.0 * (total_size - data_size) / data_size;
1375 
1376  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1377  video_size / 1024.0,
1378  audio_size / 1024.0,
1379  subtitle_size / 1024.0,
1380  other_size / 1024.0,
1381  extra_size / 1024.0);
1382  if (percent >= 0.0)
1383  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1384  else
1385  av_log(NULL, AV_LOG_INFO, "unknown");
1386  av_log(NULL, AV_LOG_INFO, "\n");
1387 
1388  /* print verbose per-stream stats */
1389  for (i = 0; i < nb_input_files; i++) {
1390  InputFile *f = input_files[i];
1391  uint64_t total_packets = 0, total_size = 0;
1392 
1393  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1394  i, f->ctx->filename);
1395 
1396  for (j = 0; j < f->nb_streams; j++) {
1397  InputStream *ist = input_streams[f->ist_index + j];
1398  enum AVMediaType type = ist->dec_ctx->codec_type;
1399 
1400  total_size += ist->data_size;
1401  total_packets += ist->nb_packets;
1402 
1403  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1404  i, j, media_type_string(type));
1405  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1406  ist->nb_packets, ist->data_size);
1407 
1408  if (ist->decoding_needed) {
1409  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1410  ist->frames_decoded);
1411  if (type == AVMEDIA_TYPE_AUDIO)
1412  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1413  av_log(NULL, AV_LOG_VERBOSE, "; ");
1414  }
1415 
1416  av_log(NULL, AV_LOG_VERBOSE, "\n");
1417  }
1418 
1419  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1420  total_packets, total_size);
1421  }
1422 
1423  for (i = 0; i < nb_output_files; i++) {
1424  OutputFile *of = output_files[i];
1425  uint64_t total_packets = 0, total_size = 0;
1426 
1427  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1428  i, of->ctx->filename);
1429 
1430  for (j = 0; j < of->ctx->nb_streams; j++) {
1431  OutputStream *ost = output_streams[of->ost_index + j];
1432  enum AVMediaType type = ost->enc_ctx->codec_type;
1433 
1434  total_size += ost->data_size;
1435  total_packets += ost->packets_written;
1436 
1437  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1438  i, j, media_type_string(type));
1439  if (ost->encoding_needed) {
1440  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1441  ost->frames_encoded);
1442  if (type == AVMEDIA_TYPE_AUDIO)
1443  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1444  av_log(NULL, AV_LOG_VERBOSE, "; ");
1445  }
1446 
1447  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1448  ost->packets_written, ost->data_size);
1449 
1450  av_log(NULL, AV_LOG_VERBOSE, "\n");
1451  }
1452 
1453  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1454  total_packets, total_size);
1455  }
1456  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1457  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1458  if (pass1_used) {
1459  av_log(NULL, AV_LOG_WARNING, "\n");
1460  } else {
1461  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1462  }
1463  }
1464 }
1465 
1466 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1467 {
1468  char buf[1024];
1469  AVBPrint buf_script;
1470  OutputStream *ost;
1471  AVFormatContext *oc;
1472  int64_t total_size;
1473  AVCodecContext *enc;
1474  int frame_number, vid, i;
1475  double bitrate;
1476  int64_t pts = INT64_MIN;
1477  static int64_t last_time = -1;
1478  static int qp_histogram[52];
1479  int hours, mins, secs, us;
1480 
1481  if (!print_stats && !is_last_report && !progress_avio)
1482  return;
1483 
1484  if (!is_last_report) {
1485  if (last_time == -1) {
1486  last_time = cur_time;
1487  return;
1488  }
1489  if ((cur_time - last_time) < 500000)
1490  return;
1491  last_time = cur_time;
1492  }
1493 
1494 
1495  oc = output_files[0]->ctx;
1496 
1497  total_size = avio_size(oc->pb);
1498  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1499  total_size = avio_tell(oc->pb);
1500 
1501  buf[0] = '\0';
1502  vid = 0;
1503  av_bprint_init(&buf_script, 0, 1);
1504  for (i = 0; i < nb_output_streams; i++) {
1505  float q = -1;
1506  ost = output_streams[i];
1507  enc = ost->enc_ctx;
1508  if (!ost->stream_copy && enc->coded_frame)
1509  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1510  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1511  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1512  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1513  ost->file_index, ost->index, q);
1514  }
1515  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1516  float fps, t = (cur_time-timer_start) / 1000000.0;
1517 
1518  frame_number = ost->frame_number;
1519  fps = t > 1 ? frame_number / t : 0;
1520  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1521  frame_number, fps < 9.95, fps, q);
1522  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1523  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1524  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1525  ost->file_index, ost->index, q);
1526  if (is_last_report)
1527  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1528  if (qp_hist) {
1529  int j;
1530  int qp = lrintf(q);
1531  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1532  qp_histogram[qp]++;
1533  for (j = 0; j < 32; j++)
1534  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1535  }
1536  if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1537  int j;
1538  double error, error_sum = 0;
1539  double scale, scale_sum = 0;
1540  double p;
1541  char type[3] = { 'Y','U','V' };
1542  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1543  for (j = 0; j < 3; j++) {
1544  if (is_last_report) {
1545  error = enc->error[j];
1546  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1547  } else {
1548  error = enc->coded_frame->error[j];
1549  scale = enc->width * enc->height * 255.0 * 255.0;
1550  }
1551  if (j)
1552  scale /= 4;
1553  error_sum += error;
1554  scale_sum += scale;
1555  p = psnr(error / scale);
1556  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1557  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1558  ost->file_index, ost->index, type[j] | 32, p);
1559  }
1560  p = psnr(error_sum / scale_sum);
1561  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1562  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1563  ost->file_index, ost->index, p);
1564  }
1565  vid = 1;
1566  }
1567  /* compute min output value */
1569  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1570  ost->st->time_base, AV_TIME_BASE_Q));
1571  if (is_last_report)
1572  nb_frames_drop += ost->last_droped;
1573  }
1574 
1575  secs = FFABS(pts) / AV_TIME_BASE;
1576  us = FFABS(pts) % AV_TIME_BASE;
1577  mins = secs / 60;
1578  secs %= 60;
1579  hours = mins / 60;
1580  mins %= 60;
1581 
1582  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1583 
1584  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1585  "size=N/A time=");
1586  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1587  "size=%8.0fkB time=", total_size / 1024.0);
1588  if (pts < 0)
1589  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1590  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1591  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1592  (100 * us) / AV_TIME_BASE);
1593 
1594  if (bitrate < 0) {
1595  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1596  av_bprintf(&buf_script, "bitrate=N/A\n");
1597  }else{
1598  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1599  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1600  }
1601 
1602  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1603  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1604  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1605  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1606  hours, mins, secs, us);
1607 
1609  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1611  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1612  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1613 
1614  if (print_stats || is_last_report) {
1615  const char end = is_last_report ? '\n' : '\r';
1616  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1617  fprintf(stderr, "%s %c", buf, end);
1618  } else
1619  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1620 
1621  fflush(stderr);
1622  }
1623 
1624  if (progress_avio) {
1625  av_bprintf(&buf_script, "progress=%s\n",
1626  is_last_report ? "end" : "continue");
1627  avio_write(progress_avio, buf_script.str,
1628  FFMIN(buf_script.len, buf_script.size - 1));
1629  avio_flush(progress_avio);
1630  av_bprint_finalize(&buf_script, NULL);
1631  if (is_last_report) {
1632  avio_closep(&progress_avio);
1633  }
1634  }
1635 
1636  if (is_last_report)
1637  print_final_stats(total_size);
1638 }
1639 
1640 static void flush_encoders(void)
1641 {
1642  int i, ret;
1643 
1644  for (i = 0; i < nb_output_streams; i++) {
1645  OutputStream *ost = output_streams[i];
1646  AVCodecContext *enc = ost->enc_ctx;
1647  AVFormatContext *os = output_files[ost->file_index]->ctx;
1648  int stop_encoding = 0;
1649 
1650  if (!ost->encoding_needed)
1651  continue;
1652 
1653  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1654  continue;
1656  continue;
1657 
1658  for (;;) {
1659  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1660  const char *desc;
1661 
1662  switch (enc->codec_type) {
1663  case AVMEDIA_TYPE_AUDIO:
1664  encode = avcodec_encode_audio2;
1665  desc = "Audio";
1666  break;
1667  case AVMEDIA_TYPE_VIDEO:
1668  encode = avcodec_encode_video2;
1669  desc = "Video";
1670  break;
1671  default:
1672  stop_encoding = 1;
1673  }
1674 
1675  if (encode) {
1676  AVPacket pkt;
1677  int pkt_size;
1678  int got_packet;
1679  av_init_packet(&pkt);
1680  pkt.data = NULL;
1681  pkt.size = 0;
1682 
1684  ret = encode(enc, &pkt, NULL, &got_packet);
1685  update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1686  if (ret < 0) {
1687  av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1688  exit_program(1);
1689  }
1690  if (ost->logfile && enc->stats_out) {
1691  fprintf(ost->logfile, "%s", enc->stats_out);
1692  }
1693  if (!got_packet) {
1694  stop_encoding = 1;
1695  break;
1696  }
1697  if (ost->finished & MUXER_FINISHED) {
1698  av_free_packet(&pkt);
1699  continue;
1700  }
1701  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1702  pkt_size = pkt.size;
1703  write_frame(os, &pkt, ost);
1705  do_video_stats(ost, pkt_size);
1706  }
1707  }
1708 
1709  if (stop_encoding)
1710  break;
1711  }
1712  }
1713 }
1714 
1715 /*
1716  * Check whether a packet from ist should be written into ost at this time
1717  */
1719 {
1720  OutputFile *of = output_files[ost->file_index];
1721  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1722 
1723  if (ost->source_index != ist_index)
1724  return 0;
1725 
1726  if (ost->finished)
1727  return 0;
1728 
1729  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1730  return 0;
1731 
1732  return 1;
1733 }
1734 
1735 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1736 {
1737  OutputFile *of = output_files[ost->file_index];
1738  InputFile *f = input_files [ist->file_index];
1739  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1740  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1741  int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1742  AVPicture pict;
1743  AVPacket opkt;
1744 
1745  av_init_packet(&opkt);
1746 
1747  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1749  return;
1750 
1751  if (pkt->pts == AV_NOPTS_VALUE) {
1752  if (!ost->frame_number && ist->pts < start_time &&
1753  !ost->copy_prior_start)
1754  return;
1755  } else {
1756  if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1757  !ost->copy_prior_start)
1758  return;
1759  }
1760 
1761  if (of->recording_time != INT64_MAX &&
1762  ist->pts >= of->recording_time + start_time) {
1763  close_output_stream(ost);
1764  return;
1765  }
1766 
1767  if (f->recording_time != INT64_MAX) {
1768  start_time = f->ctx->start_time;
1769  if (f->start_time != AV_NOPTS_VALUE)
1770  start_time += f->start_time;
1771  if (ist->pts >= f->recording_time + start_time) {
1772  close_output_stream(ost);
1773  return;
1774  }
1775  }
1776 
1777  /* force the input stream PTS */
1778  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1779  ost->sync_opts++;
1780 
1781  if (pkt->pts != AV_NOPTS_VALUE)
1782  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1783  else
1784  opkt.pts = AV_NOPTS_VALUE;
1785 
1786  if (pkt->dts == AV_NOPTS_VALUE)
1787  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1788  else
1789  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1790  opkt.dts -= ost_tb_start_time;
1791 
1792  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1794  if(!duration)
1795  duration = ist->dec_ctx->frame_size;
1796  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1797  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1798  ost->st->time_base) - ost_tb_start_time;
1799  }
1800 
1801  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1802  opkt.flags = pkt->flags;
1803 
1804  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1805  if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1808  && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1809  ) {
1810  if (av_parser_change(ost->parser, ost->st->codec,
1811  &opkt.data, &opkt.size,
1812  pkt->data, pkt->size,
1813  pkt->flags & AV_PKT_FLAG_KEY)) {
1814  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1815  if (!opkt.buf)
1816  exit_program(1);
1817  }
1818  } else {
1819  opkt.data = pkt->data;
1820  opkt.size = pkt->size;
1821  }
1822  av_copy_packet_side_data(&opkt, pkt);
1823 
1824  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1825  /* store AVPicture in AVPacket, as expected by the output format */
1826  avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1827  opkt.data = (uint8_t *)&pict;
1828  opkt.size = sizeof(AVPicture);
1829  opkt.flags |= AV_PKT_FLAG_KEY;
1830  }
1831 
1832  write_frame(of->ctx, &opkt, ost);
1833 }
1834 
1836 {
1837  AVCodecContext *dec = ist->dec_ctx;
1838 
1839  if (!dec->channel_layout) {
1840  char layout_name[256];
1841 
1842  if (dec->channels > ist->guess_layout_max)
1843  return 0;
1845  if (!dec->channel_layout)
1846  return 0;
1847  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1848  dec->channels, dec->channel_layout);
1849  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1850  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1851  }
1852  return 1;
1853 }
1854 
1855 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1856 {
1857  AVFrame *decoded_frame, *f;
1858  AVCodecContext *avctx = ist->dec_ctx;
1859  int i, ret, err = 0, resample_changed;
1860  AVRational decoded_frame_tb;
1861 
1862  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1863  return AVERROR(ENOMEM);
1864  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1865  return AVERROR(ENOMEM);
1866  decoded_frame = ist->decoded_frame;
1867 
1869  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1870  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1871 
1872  if (ret >= 0 && avctx->sample_rate <= 0) {
1873  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1874  ret = AVERROR_INVALIDDATA;
1875  }
1876 
1877  if (*got_output || ret<0)
1878  decode_error_stat[ret<0] ++;
1879 
1880  if (ret < 0 && exit_on_error)
1881  exit_program(1);
1882 
1883  if (!*got_output || ret < 0) {
1884  if (!pkt->size) {
1885  for (i = 0; i < ist->nb_filters; i++)
1886 #if 1
1887  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1888 #else
1890 #endif
1891  }
1892  return ret;
1893  }
1894 
1895  ist->samples_decoded += decoded_frame->nb_samples;
1896  ist->frames_decoded++;
1897 
1898 #if 1
1899  /* increment next_dts to use for the case where the input stream does not
1900  have timestamps or there are multiple frames in the packet */
1901  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1902  avctx->sample_rate;
1903  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1904  avctx->sample_rate;
1905 #endif
1906 
1907  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1908  ist->resample_channels != avctx->channels ||
1909  ist->resample_channel_layout != decoded_frame->channel_layout ||
1910  ist->resample_sample_rate != decoded_frame->sample_rate;
1911  if (resample_changed) {
1912  char layout1[64], layout2[64];
1913 
1914  if (!guess_input_channel_layout(ist)) {
1915  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1916  "layout for Input Stream #%d.%d\n", ist->file_index,
1917  ist->st->index);
1918  exit_program(1);
1919  }
1920  decoded_frame->channel_layout = avctx->channel_layout;
1921 
1922  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1924  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1925  decoded_frame->channel_layout);
1926 
1928  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1929  ist->file_index, ist->st->index,
1931  ist->resample_channels, layout1,
1932  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1933  avctx->channels, layout2);
1934 
1935  ist->resample_sample_fmt = decoded_frame->format;
1936  ist->resample_sample_rate = decoded_frame->sample_rate;
1937  ist->resample_channel_layout = decoded_frame->channel_layout;
1938  ist->resample_channels = avctx->channels;
1939 
1940  for (i = 0; i < nb_filtergraphs; i++)
1941  if (ist_in_filtergraph(filtergraphs[i], ist)) {
1942  FilterGraph *fg = filtergraphs[i];
1943  if (configure_filtergraph(fg) < 0) {
1944  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1945  exit_program(1);
1946  }
1947  }
1948  }
1949 
1950  /* if the decoder provides a pts, use it instead of the last packet pts.
1951  the decoder could be delaying output by a packet or more. */
1952  if (decoded_frame->pts != AV_NOPTS_VALUE) {
1953  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1954  decoded_frame_tb = avctx->time_base;
1955  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1956  decoded_frame->pts = decoded_frame->pkt_pts;
1957  decoded_frame_tb = ist->st->time_base;
1958  } else if (pkt->pts != AV_NOPTS_VALUE) {
1959  decoded_frame->pts = pkt->pts;
1960  decoded_frame_tb = ist->st->time_base;
1961  }else {
1962  decoded_frame->pts = ist->dts;
1963  decoded_frame_tb = AV_TIME_BASE_Q;
1964  }
1965  pkt->pts = AV_NOPTS_VALUE;
1966  if (decoded_frame->pts != AV_NOPTS_VALUE)
1967  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1968  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
1969  (AVRational){1, avctx->sample_rate});
1970  for (i = 0; i < ist->nb_filters; i++) {
1971  if (i < ist->nb_filters - 1) {
1972  f = ist->filter_frame;
1973  err = av_frame_ref(f, decoded_frame);
1974  if (err < 0)
1975  break;
1976  } else
1977  f = decoded_frame;
1978  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
1980  if (err == AVERROR_EOF)
1981  err = 0; /* ignore */
1982  if (err < 0)
1983  break;
1984  }
1985  decoded_frame->pts = AV_NOPTS_VALUE;
1986 
1988  av_frame_unref(decoded_frame);
1989  return err < 0 ? err : ret;
1990 }
1991 
1992 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1993 {
1994  AVFrame *decoded_frame, *f;
1995  int i, ret = 0, err = 0, resample_changed;
1996  int64_t best_effort_timestamp;
1997  AVRational *frame_sample_aspect;
1998 
1999  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2000  return AVERROR(ENOMEM);
2001  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2002  return AVERROR(ENOMEM);
2003  decoded_frame = ist->decoded_frame;
2004  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2005 
2007  ret = avcodec_decode_video2(ist->dec_ctx,
2008  decoded_frame, got_output, pkt);
2009  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2010 
2011  // The following line may be required in some cases where there is no parser
2012  // or the parser does not has_b_frames correctly
2013  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2014  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2015  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2016  } else
2018  ist->dec_ctx,
2019  "has_b_frames is larger in decoder than demuxer %d > %d ",
2020  ist->dec_ctx->has_b_frames,
2021  ist->st->codec->has_b_frames
2022  );
2023  }
2024 
2025  if (*got_output || ret<0)
2026  decode_error_stat[ret<0] ++;
2027 
2028  if (ret < 0 && exit_on_error)
2029  exit_program(1);
2030 
2031  if (*got_output && ret >= 0) {
2032  if (ist->dec_ctx->width != decoded_frame->width ||
2033  ist->dec_ctx->height != decoded_frame->height ||
2034  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2035  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2036  decoded_frame->width,
2037  decoded_frame->height,
2038  decoded_frame->format,
2039  ist->dec_ctx->width,
2040  ist->dec_ctx->height,
2041  ist->dec_ctx->pix_fmt);
2042  }
2043  }
2044 
2045  if (!*got_output || ret < 0) {
2046  if (!pkt->size) {
2047  for (i = 0; i < ist->nb_filters; i++)
2048 #if 1
2049  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
2050 #else
2052 #endif
2053  }
2054  return ret;
2055  }
2056 
2057  if(ist->top_field_first>=0)
2058  decoded_frame->top_field_first = ist->top_field_first;
2059 
2060  ist->frames_decoded++;
2061 
2062  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2063  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2064  if (err < 0)
2065  goto fail;
2066  }
2067  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2068 
2069  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2070  if(best_effort_timestamp != AV_NOPTS_VALUE)
2071  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2072 
2073  if (debug_ts) {
2074  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2075  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2076  ist->st->index, av_ts2str(decoded_frame->pts),
2077  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2078  best_effort_timestamp,
2079  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2080  decoded_frame->key_frame, decoded_frame->pict_type,
2081  ist->st->time_base.num, ist->st->time_base.den);
2082  }
2083 
2084  pkt->size = 0;
2085 
2086  if (ist->st->sample_aspect_ratio.num)
2087  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2088 
2089  resample_changed = ist->resample_width != decoded_frame->width ||
2090  ist->resample_height != decoded_frame->height ||
2091  ist->resample_pix_fmt != decoded_frame->format;
2092  if (resample_changed) {
2094  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2095  ist->file_index, ist->st->index,
2097  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2098 
2099  ist->resample_width = decoded_frame->width;
2100  ist->resample_height = decoded_frame->height;
2101  ist->resample_pix_fmt = decoded_frame->format;
2102 
2103  for (i = 0; i < nb_filtergraphs; i++) {
2104  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2105  configure_filtergraph(filtergraphs[i]) < 0) {
2106  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2107  exit_program(1);
2108  }
2109  }
2110  }
2111 
2112  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2113  for (i = 0; i < ist->nb_filters; i++) {
2114  if (!frame_sample_aspect->num)
2115  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2116 
2117  if (i < ist->nb_filters - 1) {
2118  f = ist->filter_frame;
2119  err = av_frame_ref(f, decoded_frame);
2120  if (err < 0)
2121  break;
2122  } else
2123  f = decoded_frame;
2125  if (ret == AVERROR_EOF) {
2126  ret = 0; /* ignore */
2127  } else if (ret < 0) {
2129  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2130  exit_program(1);
2131  }
2132  }
2133 
2134 fail:
2136  av_frame_unref(decoded_frame);
2137  return err < 0 ? err : ret;
2138 }
2139 
2140 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2141 {
2142  AVSubtitle subtitle;
2143  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2144  &subtitle, got_output, pkt);
2145 
2146  if (*got_output || ret<0)
2147  decode_error_stat[ret<0] ++;
2148 
2149  if (ret < 0 && exit_on_error)
2150  exit_program(1);
2151 
2152  if (ret < 0 || !*got_output) {
2153  if (!pkt->size)
2154  sub2video_flush(ist);
2155  return ret;
2156  }
2157 
2158  if (ist->fix_sub_duration) {
2159  int end = 1;
2160  if (ist->prev_sub.got_output) {
2161  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2162  1000, AV_TIME_BASE);
2163  if (end < ist->prev_sub.subtitle.end_display_time) {
2164  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2165  "Subtitle duration reduced from %d to %d%s\n",
2167  end <= 0 ? ", dropping it" : "");
2169  }
2170  }
2171  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2172  FFSWAP(int, ret, ist->prev_sub.ret);
2173  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2174  if (end <= 0)
2175  goto out;
2176  }
2177 
2178  if (!*got_output)
2179  return ret;
2180 
2181  sub2video_update(ist, &subtitle);
2182 
2183  if (!subtitle.num_rects)
2184  goto out;
2185 
2186  ist->frames_decoded++;
2187 
2188  for (i = 0; i < nb_output_streams; i++) {
2189  OutputStream *ost = output_streams[i];
2190 
2191  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2192  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2193  continue;
2194 
2195  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2196  }
2197 
2198 out:
2199  avsubtitle_free(&subtitle);
2200  return ret;
2201 }
2202 
2203 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2205 {
2206  int ret = 0, i;
2207  int got_output = 0;
2208 
2209  AVPacket avpkt;
2210  if (!ist->saw_first_ts) {
2211  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2212  ist->pts = 0;
2213  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2214  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2215  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2216  }
2217  ist->saw_first_ts = 1;
2218  }
2219 
2220  if (ist->next_dts == AV_NOPTS_VALUE)
2221  ist->next_dts = ist->dts;
2222  if (ist->next_pts == AV_NOPTS_VALUE)
2223  ist->next_pts = ist->pts;
2224 
2225  if (!pkt) {
2226  /* EOF handling */
2227  av_init_packet(&avpkt);
2228  avpkt.data = NULL;
2229  avpkt.size = 0;
2230  goto handle_eof;
2231  } else {
2232  avpkt = *pkt;
2233  }
2234 
2235  if (pkt->dts != AV_NOPTS_VALUE) {
2236  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2237  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2238  ist->next_pts = ist->pts = ist->dts;
2239  }
2240 
2241  // while we have more to decode or while the decoder did output something on EOF
2242  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2243  int duration;
2244  handle_eof:
2245 
2246  ist->pts = ist->next_pts;
2247  ist->dts = ist->next_dts;
2248 
2249  if (avpkt.size && avpkt.size != pkt->size &&
2250  !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2252  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2253  ist->showed_multi_packet_warning = 1;
2254  }
2255 
2256  switch (ist->dec_ctx->codec_type) {
2257  case AVMEDIA_TYPE_AUDIO:
2258  ret = decode_audio (ist, &avpkt, &got_output);
2259  break;
2260  case AVMEDIA_TYPE_VIDEO:
2261  ret = decode_video (ist, &avpkt, &got_output);
2262  if (avpkt.duration) {
2263  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2264  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2266  duration = ((int64_t)AV_TIME_BASE *
2267  ist->dec_ctx->framerate.den * ticks) /
2269  } else
2270  duration = 0;
2271 
2272  if(ist->dts != AV_NOPTS_VALUE && duration) {
2273  ist->next_dts += duration;
2274  }else
2275  ist->next_dts = AV_NOPTS_VALUE;
2276 
2277  if (got_output)
2278  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2279  break;
2280  case AVMEDIA_TYPE_SUBTITLE:
2281  ret = transcode_subtitles(ist, &avpkt, &got_output);
2282  break;
2283  default:
2284  return -1;
2285  }
2286 
2287  if (ret < 0)
2288  return ret;
2289 
2290  avpkt.dts=
2291  avpkt.pts= AV_NOPTS_VALUE;
2292 
2293  // touch data and size only if not EOF
2294  if (pkt) {
2296  ret = avpkt.size;
2297  avpkt.data += ret;
2298  avpkt.size -= ret;
2299  }
2300  if (!got_output) {
2301  continue;
2302  }
2303  if (got_output && !pkt)
2304  break;
2305  }
2306 
2307  /* handle stream copy */
2308  if (!ist->decoding_needed) {
2309  ist->dts = ist->next_dts;
2310  switch (ist->dec_ctx->codec_type) {
2311  case AVMEDIA_TYPE_AUDIO:
2312  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2313  ist->dec_ctx->sample_rate;
2314  break;
2315  case AVMEDIA_TYPE_VIDEO:
2316  if (ist->framerate.num) {
2317  // TODO: Remove work-around for c99-to-c89 issue 7
2318  AVRational time_base_q = AV_TIME_BASE_Q;
2319  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2320  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2321  } else if (pkt->duration) {
2322  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2323  } else if(ist->dec_ctx->framerate.num != 0) {
2324  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2325  ist->next_dts += ((int64_t)AV_TIME_BASE *
2326  ist->dec_ctx->framerate.den * ticks) /
2328  }
2329  break;
2330  }
2331  ist->pts = ist->dts;
2332  ist->next_pts = ist->next_dts;
2333  }
2334  for (i = 0; pkt && i < nb_output_streams; i++) {
2335  OutputStream *ost = output_streams[i];
2336 
2337  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2338  continue;
2339 
2340  do_streamcopy(ist, ost, pkt);
2341  }
2342 
2343  return got_output;
2344 }
2345 
2346 static void print_sdp(void)
2347 {
2348  char sdp[16384];
2349  int i;
2350  int j;
2351  AVIOContext *sdp_pb;
2352  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2353 
2354  if (!avc)
2355  exit_program(1);
2356  for (i = 0, j = 0; i < nb_output_files; i++) {
2357  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2358  avc[j] = output_files[i]->ctx;
2359  j++;
2360  }
2361  }
2362 
2363  av_sdp_create(avc, j, sdp, sizeof(sdp));
2364 
2365  if (!sdp_filename) {
2366  printf("SDP:\n%s\n", sdp);
2367  fflush(stdout);
2368  } else {
2369  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2370  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2371  } else {
2372  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2373  avio_closep(&sdp_pb);
2375  }
2376  }
2377 
2378  av_freep(&avc);
2379 }
2380 
2382 {
2383  int i;
2384  for (i = 0; hwaccels[i].name; i++)
2385  if (hwaccels[i].pix_fmt == pix_fmt)
2386  return &hwaccels[i];
2387  return NULL;
2388 }
2389 
2390 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2391 {
2392  InputStream *ist = s->opaque;
2393  const enum AVPixelFormat *p;
2394  int ret;
2395 
2396  for (p = pix_fmts; *p != -1; p++) {
2397  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2398  const HWAccel *hwaccel;
2399 
2400  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2401  break;
2402 
2403  hwaccel = get_hwaccel(*p);
2404  if (!hwaccel ||
2405  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2406  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2407  continue;
2408 
2409  ret = hwaccel->init(s);
2410  if (ret < 0) {
2411  if (ist->hwaccel_id == hwaccel->id) {
2413  "%s hwaccel requested for input stream #%d:%d, "
2414  "but cannot be initialized.\n", hwaccel->name,
2415  ist->file_index, ist->st->index);
2416  return AV_PIX_FMT_NONE;
2417  }
2418  continue;
2419  }
2420  ist->active_hwaccel_id = hwaccel->id;
2421  ist->hwaccel_pix_fmt = *p;
2422  break;
2423  }
2424 
2425  return *p;
2426 }
2427 
2429 {
2430  InputStream *ist = s->opaque;
2431 
2432  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2433  return ist->hwaccel_get_buffer(s, frame, flags);
2434 
2435  return avcodec_default_get_buffer2(s, frame, flags);
2436 }
2437 
2438 static int init_input_stream(int ist_index, char *error, int error_len)
2439 {
2440  int ret;
2441  InputStream *ist = input_streams[ist_index];
2442 
2443  if (ist->decoding_needed) {
2444  AVCodec *codec = ist->dec;
2445  if (!codec) {
2446  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2447  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2448  return AVERROR(EINVAL);
2449  }
2450 
2451  ist->dec_ctx->opaque = ist;
2452  ist->dec_ctx->get_format = get_format;
2453  ist->dec_ctx->get_buffer2 = get_buffer;
2454  ist->dec_ctx->thread_safe_callbacks = 1;
2455 
2456  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2457  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2458  (ist->decoding_needed & DECODING_FOR_OST)) {
2459  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2461  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2462  }
2463 
2464  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2465  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2466  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2467  if (ret == AVERROR_EXPERIMENTAL)
2468  abort_codec_experimental(codec, 0);
2469 
2470  snprintf(error, error_len,
2471  "Error while opening decoder for input stream "
2472  "#%d:%d : %s",
2473  ist->file_index, ist->st->index, av_err2str(ret));
2474  return ret;
2475  }
2477  }
2478 
2479  ist->next_pts = AV_NOPTS_VALUE;
2480  ist->next_dts = AV_NOPTS_VALUE;
2481 
2482  return 0;
2483 }
2484 
2486 {
2487  if (ost->source_index >= 0)
2488  return input_streams[ost->source_index];
2489  return NULL;
2490 }
2491 
2492 static int compare_int64(const void *a, const void *b)
2493 {
2494  int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2495  return va < vb ? -1 : va > vb ? +1 : 0;
2496 }
2497 
2498 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2499  AVCodecContext *avctx)
2500 {
2501  char *p;
2502  int n = 1, i, size, index = 0;
2503  int64_t t, *pts;
2504 
2505  for (p = kf; *p; p++)
2506  if (*p == ',')
2507  n++;
2508  size = n;
2509  pts = av_malloc_array(size, sizeof(*pts));
2510  if (!pts) {
2511  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2512  exit_program(1);
2513  }
2514 
2515  p = kf;
2516  for (i = 0; i < n; i++) {
2517  char *next = strchr(p, ',');
2518 
2519  if (next)
2520  *next++ = 0;
2521 
2522  if (!memcmp(p, "chapters", 8)) {
2523 
2524  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2525  int j;
2526 
2527  if (avf->nb_chapters > INT_MAX - size ||
2528  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2529  sizeof(*pts)))) {
2531  "Could not allocate forced key frames array.\n");
2532  exit_program(1);
2533  }
2534  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2535  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2536 
2537  for (j = 0; j < avf->nb_chapters; j++) {
2538  AVChapter *c = avf->chapters[j];
2539  av_assert1(index < size);
2540  pts[index++] = av_rescale_q(c->start, c->time_base,
2541  avctx->time_base) + t;
2542  }
2543 
2544  } else {
2545 
2546  t = parse_time_or_die("force_key_frames", p, 1);
2547  av_assert1(index < size);
2548  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2549 
2550  }
2551 
2552  p = next;
2553  }
2554 
2555  av_assert0(index == size);
2556  qsort(pts, size, sizeof(*pts), compare_int64);
2557  ost->forced_kf_count = size;
2558  ost->forced_kf_pts = pts;
2559 }
2560 
2561 static void report_new_stream(int input_index, AVPacket *pkt)
2562 {
2563  InputFile *file = input_files[input_index];
2564  AVStream *st = file->ctx->streams[pkt->stream_index];
2565 
2566  if (pkt->stream_index < file->nb_streams_warn)
2567  return;
2568  av_log(file->ctx, AV_LOG_WARNING,
2569  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2571  input_index, pkt->stream_index,
2572  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2573  file->nb_streams_warn = pkt->stream_index + 1;
2574 }
2575 
2577 {
2578  AVDictionaryEntry *e;
2579 
2580  uint8_t *encoder_string;
2581  int encoder_string_len;
2582  int format_flags = 0;
2583  int codec_flags = 0;
2584 
2585  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2586  return;
2587 
2588  e = av_dict_get(of->opts, "fflags", NULL, 0);
2589  if (e) {
2590  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2591  if (!o)
2592  return;
2593  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2594  }
2595  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2596  if (e) {
2597  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2598  if (!o)
2599  return;
2600  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2601  }
2602 
2603  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2604  encoder_string = av_mallocz(encoder_string_len);
2605  if (!encoder_string)
2606  exit_program(1);
2607 
2608  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2609  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2610  else
2611  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2612  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2613  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2615 }
2616 
2617 static int transcode_init(void)
2618 {
2619  int ret = 0, i, j, k;
2620  AVFormatContext *oc;
2621  OutputStream *ost;
2622  InputStream *ist;
2623  char error[1024] = {0};
2624  int want_sdp = 1;
2625 
2626  for (i = 0; i < nb_filtergraphs; i++) {
2627  FilterGraph *fg = filtergraphs[i];
2628  for (j = 0; j < fg->nb_outputs; j++) {
2629  OutputFilter *ofilter = fg->outputs[j];
2630  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2631  continue;
2632  if (fg->nb_inputs != 1)
2633  continue;
2634  for (k = nb_input_streams-1; k >= 0 ; k--)
2635  if (fg->inputs[0]->ist == input_streams[k])
2636  break;
2637  ofilter->ost->source_index = k;
2638  }
2639  }
2640 
2641  /* init framerate emulation */
2642  for (i = 0; i < nb_input_files; i++) {
2643  InputFile *ifile = input_files[i];
2644  if (ifile->rate_emu)
2645  for (j = 0; j < ifile->nb_streams; j++)
2646  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2647  }
2648 
2649  /* output stream init */
2650  for (i = 0; i < nb_output_files; i++) {
2651  oc = output_files[i]->ctx;
2652  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2653  av_dump_format(oc, i, oc->filename, 1);
2654  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2655  return AVERROR(EINVAL);
2656  }
2657  }
2658 
2659  /* init complex filtergraphs */
2660  for (i = 0; i < nb_filtergraphs; i++)
2661  if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2662  return ret;
2663 
2664  /* for each output stream, we compute the right encoding parameters */
2665  for (i = 0; i < nb_output_streams; i++) {
2666  AVCodecContext *enc_ctx;
2668  ost = output_streams[i];
2669  oc = output_files[ost->file_index]->ctx;
2670  ist = get_input_stream(ost);
2671 
2672  if (ost->attachment_filename)
2673  continue;
2674 
2675  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2676 
2677  if (ist) {
2678  dec_ctx = ist->dec_ctx;
2679 
2680  ost->st->disposition = ist->st->disposition;
2681  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2682  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2683  } else {
2684  for (j=0; j<oc->nb_streams; j++) {
2685  AVStream *st = oc->streams[j];
2686  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2687  break;
2688  }
2689  if (j == oc->nb_streams)
2690  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2692  }
2693 
2694  if (ost->stream_copy) {
2695  AVRational sar;
2696  uint64_t extra_size;
2697 
2698  av_assert0(ist && !ost->filter);
2699 
2700  extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2701 
2702  if (extra_size > INT_MAX) {
2703  return AVERROR(EINVAL);
2704  }
2705 
2706  /* if stream_copy is selected, no need to decode or encode */
2707  enc_ctx->codec_id = dec_ctx->codec_id;
2708  enc_ctx->codec_type = dec_ctx->codec_type;
2709 
2710  if (!enc_ctx->codec_tag) {
2711  unsigned int codec_tag;
2712  if (!oc->oformat->codec_tag ||
2713  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2714  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2715  enc_ctx->codec_tag = dec_ctx->codec_tag;
2716  }
2717 
2718  enc_ctx->bit_rate = dec_ctx->bit_rate;
2719  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2720  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2721  enc_ctx->field_order = dec_ctx->field_order;
2722  if (dec_ctx->extradata_size) {
2723  enc_ctx->extradata = av_mallocz(extra_size);
2724  if (!enc_ctx->extradata) {
2725  return AVERROR(ENOMEM);
2726  }
2727  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2728  }
2729  enc_ctx->extradata_size= dec_ctx->extradata_size;
2730  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2731 
2732  enc_ctx->time_base = ist->st->time_base;
2733  /*
2734  * Avi is a special case here because it supports variable fps but
2735  * having the fps and timebase differe significantly adds quite some
2736  * overhead
2737  */
2738  if(!strcmp(oc->oformat->name, "avi")) {
2739  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2740  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2741  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2742  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2743  || copy_tb==2){
2744  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2745  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2746  enc_ctx->ticks_per_frame = 2;
2747  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2748  && av_q2d(ist->st->time_base) < 1.0/500
2749  || copy_tb==0){
2750  enc_ctx->time_base = dec_ctx->time_base;
2751  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2752  enc_ctx->time_base.den *= 2;
2753  enc_ctx->ticks_per_frame = 2;
2754  }
2755  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2756  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2757  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2758  && strcmp(oc->oformat->name, "f4v")
2759  ) {
2760  if( copy_tb<0 && dec_ctx->time_base.den
2761  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2762  && av_q2d(ist->st->time_base) < 1.0/500
2763  || copy_tb==0){
2764  enc_ctx->time_base = dec_ctx->time_base;
2765  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2766  }
2767  }
2768  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2769  && dec_ctx->time_base.num < dec_ctx->time_base.den
2770  && dec_ctx->time_base.num > 0
2771  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2772  enc_ctx->time_base = dec_ctx->time_base;
2773  }
2774 
2775  if (ist && !ost->frame_rate.num)
2776  ost->frame_rate = ist->framerate;
2777  if(ost->frame_rate.num)
2778  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2779 
2780  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2781  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2782 
2783  if (ist->st->nb_side_data) {
2785  sizeof(*ist->st->side_data));
2786  if (!ost->st->side_data)
2787  return AVERROR(ENOMEM);
2788 
2789  ost->st->nb_side_data = 0;
2790  for (j = 0; j < ist->st->nb_side_data; j++) {
2791  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2792  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2793 
2794  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2795  continue;
2796 
2797  sd_dst->data = av_malloc(sd_src->size);
2798  if (!sd_dst->data)
2799  return AVERROR(ENOMEM);
2800  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2801  sd_dst->size = sd_src->size;
2802  sd_dst->type = sd_src->type;
2803  ost->st->nb_side_data++;
2804  }
2805  }
2806 
2807  ost->parser = av_parser_init(enc_ctx->codec_id);
2808 
2809  switch (enc_ctx->codec_type) {
2810  case AVMEDIA_TYPE_AUDIO:
2811  if (audio_volume != 256) {
2812  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2813  exit_program(1);
2814  }
2815  enc_ctx->channel_layout = dec_ctx->channel_layout;
2816  enc_ctx->sample_rate = dec_ctx->sample_rate;
2817  enc_ctx->channels = dec_ctx->channels;
2818  enc_ctx->frame_size = dec_ctx->frame_size;
2819  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2820  enc_ctx->block_align = dec_ctx->block_align;
2821  enc_ctx->initial_padding = dec_ctx->delay;
2822 #if FF_API_AUDIOENC_DELAY
2823  enc_ctx->delay = dec_ctx->delay;
2824 #endif
2825  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2826  enc_ctx->block_align= 0;
2827  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2828  enc_ctx->block_align= 0;
2829  break;
2830  case AVMEDIA_TYPE_VIDEO:
2831  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2832  enc_ctx->width = dec_ctx->width;
2833  enc_ctx->height = dec_ctx->height;
2834  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2835  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2836  sar =
2838  (AVRational){ enc_ctx->height, enc_ctx->width });
2839  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2840  "with stream copy may produce invalid files\n");
2841  }
2842  else if (ist->st->sample_aspect_ratio.num)
2843  sar = ist->st->sample_aspect_ratio;
2844  else
2845  sar = dec_ctx->sample_aspect_ratio;
2846  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2847  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2848  ost->st->r_frame_rate = ist->st->r_frame_rate;
2849  break;
2850  case AVMEDIA_TYPE_SUBTITLE:
2851  enc_ctx->width = dec_ctx->width;
2852  enc_ctx->height = dec_ctx->height;
2853  break;
2854  case AVMEDIA_TYPE_UNKNOWN:
2855  case AVMEDIA_TYPE_DATA:
2857  break;
2858  default:
2859  abort();
2860  }
2861  } else {
2862  if (!ost->enc)
2863  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2864  if (!ost->enc) {
2865  /* should only happen when a default codec is not present. */
2866  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2867  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2868  ret = AVERROR(EINVAL);
2869  goto dump_format;
2870  }
2871 
2872  if (ist)
2874  ost->encoding_needed = 1;
2875 
2876  set_encoder_id(output_files[ost->file_index], ost);
2877 
2878  if (!ost->filter &&
2879  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2880  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2881  FilterGraph *fg;
2882  fg = init_simple_filtergraph(ist, ost);
2883  if (configure_filtergraph(fg)) {
2884  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2885  exit_program(1);
2886  }
2887  }
2888 
2889  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2890  if (!ost->frame_rate.num)
2892  if (ist && !ost->frame_rate.num)
2893  ost->frame_rate = ist->framerate;
2894  if (ist && !ost->frame_rate.num)
2895  ost->frame_rate = ist->st->r_frame_rate;
2896  if (ist && !ost->frame_rate.num) {
2897  ost->frame_rate = (AVRational){25, 1};
2899  "No information "
2900  "about the input framerate is available. Falling "
2901  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2902  "if you want a different framerate.\n",
2903  ost->file_index, ost->index);
2904  }
2905 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2906  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2908  ost->frame_rate = ost->enc->supported_framerates[idx];
2909  }
2910  // reduce frame rate for mpeg4 to be within the spec limits
2911  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2912  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2913  ost->frame_rate.num, ost->frame_rate.den, 65535);
2914  }
2915  }
2916 
2917  switch (enc_ctx->codec_type) {
2918  case AVMEDIA_TYPE_AUDIO:
2919  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
2920  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2921  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2922  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2923  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
2924  break;
2925  case AVMEDIA_TYPE_VIDEO:
2926  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2927  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
2928  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
2929  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2931  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2932  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2933  }
2934  for (j = 0; j < ost->forced_kf_count; j++)
2935  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2937  enc_ctx->time_base);
2938 
2939  enc_ctx->width = ost->filter->filter->inputs[0]->w;
2940  enc_ctx->height = ost->filter->filter->inputs[0]->h;
2941  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2942  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2943  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
2945  if (!strncmp(ost->enc->name, "libx264", 7) &&
2946  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2949  "No pixel format specified, %s for H.264 encoding chosen.\n"
2950  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2952  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
2953  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2956  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
2957  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2959  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
2960 
2961  ost->st->avg_frame_rate = ost->frame_rate;
2962 
2963  if (!dec_ctx ||
2964  enc_ctx->width != dec_ctx->width ||
2965  enc_ctx->height != dec_ctx->height ||
2966  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
2968  }
2969 
2970  if (ost->forced_keyframes) {
2971  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
2974  if (ret < 0) {
2976  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
2977  return ret;
2978  }
2983 
2984  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
2985  // parse it only for static kf timings
2986  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
2988  }
2989  }
2990  break;
2991  case AVMEDIA_TYPE_SUBTITLE:
2992  enc_ctx->time_base = (AVRational){1, 1000};
2993  if (!enc_ctx->width) {
2994  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
2995  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
2996  }
2997  break;
2998  case AVMEDIA_TYPE_DATA:
2999  break;
3000  default:
3001  abort();
3002  break;
3003  }
3004  /* two pass mode */
3005  if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
3006  char logfilename[1024];
3007  FILE *f;
3008 
3009  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
3010  ost->logfile_prefix ? ost->logfile_prefix :
3012  i);
3013  if (!strcmp(ost->enc->name, "libx264")) {
3014  av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
3015  } else {
3016  if (enc_ctx->flags & CODEC_FLAG_PASS2) {
3017  char *logbuffer;
3018  size_t logbuffer_size;
3019  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
3020  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
3021  logfilename);
3022  exit_program(1);
3023  }
3024  enc_ctx->stats_in = logbuffer;
3025  }
3026  if (enc_ctx->flags & CODEC_FLAG_PASS1) {
3027  f = av_fopen_utf8(logfilename, "wb");
3028  if (!f) {
3029  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
3030  logfilename, strerror(errno));
3031  exit_program(1);
3032  }
3033  ost->logfile = f;
3034  }
3035  }
3036  }
3037  }
3038 
3039  if (ost->disposition) {
3040  static const AVOption opts[] = {
3041  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3042  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3043  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3044  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3045  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3046  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3047  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3048  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3049  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3050  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3051  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3052  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3053  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3054  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3055  { NULL },
3056  };
3057  static const AVClass class = {
3058  .class_name = "",
3059  .item_name = av_default_item_name,
3060  .option = opts,
3061  .version = LIBAVUTIL_VERSION_INT,
3062  };
3063  const AVClass *pclass = &class;
3064 
3065  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3066  if (ret < 0)
3067  goto dump_format;
3068  }
3069  }
3070 
3071  /* open each encoder */
3072  for (i = 0; i < nb_output_streams; i++) {
3073  ost = output_streams[i];
3074  if (ost->encoding_needed) {
3075  AVCodec *codec = ost->enc;
3076  AVCodecContext *dec = NULL;
3077 
3078  if ((ist = get_input_stream(ost)))
3079  dec = ist->dec_ctx;
3080  if (dec && dec->subtitle_header) {
3081  /* ASS code assumes this buffer is null terminated so add extra byte. */
3083  if (!ost->enc_ctx->subtitle_header) {
3084  ret = AVERROR(ENOMEM);
3085  goto dump_format;
3086  }
3087  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3089  }
3090  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3091  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3092  av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
3093 
3094  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3095  if (ret == AVERROR_EXPERIMENTAL)
3096  abort_codec_experimental(codec, 1);
3097  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3098  ost->file_index, ost->index);
3099  goto dump_format;
3100  }
3101  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3104  ost->enc_ctx->frame_size);
3106  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3107  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3108  " It takes bits/s as argument, not kbits/s\n");
3109 
3110  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3111  if (ret < 0) {
3113  "Error initializing the output stream codec context.\n");
3114  exit_program(1);
3115  }
3116 
3117  // copy timebase while removing common factors
3118  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3119  ost->st->codec->codec= ost->enc_ctx->codec;
3120  } else {
3121  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3122  if (ret < 0) {
3124  "Error setting up codec context options.\n");
3125  return ret;
3126  }
3127  // copy timebase while removing common factors
3128  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
3129  }
3130  }
3131 
3132  /* init input streams */
3133  for (i = 0; i < nb_input_streams; i++)
3134  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3135  for (i = 0; i < nb_output_streams; i++) {
3136  ost = output_streams[i];
3137  avcodec_close(ost->enc_ctx);
3138  }
3139  goto dump_format;
3140  }
3141 
3142  /* discard unused programs */
3143  for (i = 0; i < nb_input_files; i++) {
3144  InputFile *ifile = input_files[i];
3145  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3146  AVProgram *p = ifile->ctx->programs[j];
3147  int discard = AVDISCARD_ALL;
3148 
3149  for (k = 0; k < p->nb_stream_indexes; k++)
3150  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3151  discard = AVDISCARD_DEFAULT;
3152  break;
3153  }
3154  p->discard = discard;
3155  }
3156  }
3157 
3158  /* open files and write file headers */
3159  for (i = 0; i < nb_output_files; i++) {
3160  oc = output_files[i]->ctx;
3161  oc->interrupt_callback = int_cb;
3162  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3163  snprintf(error, sizeof(error),
3164  "Could not write header for output file #%d "
3165  "(incorrect codec parameters ?): %s",
3166  i, av_err2str(ret));
3167  ret = AVERROR(EINVAL);
3168  goto dump_format;
3169  }
3170 // assert_avoptions(output_files[i]->opts);
3171  if (strcmp(oc->oformat->name, "rtp")) {
3172  want_sdp = 0;
3173  }
3174  }
3175 
3176  dump_format:
3177  /* dump the file output parameters - cannot be done before in case
3178  of stream copy */
3179  for (i = 0; i < nb_output_files; i++) {
3180  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3181  }
3182 
3183  /* dump the stream mapping */
3184  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3185  for (i = 0; i < nb_input_streams; i++) {
3186  ist = input_streams[i];
3187 
3188  for (j = 0; j < ist->nb_filters; j++) {
3189  if (ist->filters[j]->graph->graph_desc) {
3190  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3191  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3192  ist->filters[j]->name);
3193  if (nb_filtergraphs > 1)
3194  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3195  av_log(NULL, AV_LOG_INFO, "\n");
3196  }
3197  }
3198  }
3199 
3200  for (i = 0; i < nb_output_streams; i++) {
3201  ost = output_streams[i];
3202 
3203  if (ost->attachment_filename) {
3204  /* an attached file */
3205  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3206  ost->attachment_filename, ost->file_index, ost->index);
3207  continue;
3208  }
3209 
3210  if (ost->filter && ost->filter->graph->graph_desc) {
3211  /* output from a complex graph */
3212  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3213  if (nb_filtergraphs > 1)
3214  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3215 
3216  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3217  ost->index, ost->enc ? ost->enc->name : "?");
3218  continue;
3219  }
3220 
3221  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3222  input_streams[ost->source_index]->file_index,
3223  input_streams[ost->source_index]->st->index,
3224  ost->file_index,
3225  ost->index);
3226  if (ost->sync_ist != input_streams[ost->source_index])
3227  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3228  ost->sync_ist->file_index,
3229  ost->sync_ist->st->index);
3230  if (ost->stream_copy)
3231  av_log(NULL, AV_LOG_INFO, " (copy)");
3232  else {
3233  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3234  const AVCodec *out_codec = ost->enc;
3235  const char *decoder_name = "?";
3236  const char *in_codec_name = "?";
3237  const char *encoder_name = "?";
3238  const char *out_codec_name = "?";
3239  const AVCodecDescriptor *desc;
3240 
3241  if (in_codec) {
3242  decoder_name = in_codec->name;
3243  desc = avcodec_descriptor_get(in_codec->id);
3244  if (desc)
3245  in_codec_name = desc->name;
3246  if (!strcmp(decoder_name, in_codec_name))
3247  decoder_name = "native";
3248  }
3249 
3250  if (out_codec) {
3251  encoder_name = out_codec->name;
3252  desc = avcodec_descriptor_get(out_codec->id);
3253  if (desc)
3254  out_codec_name = desc->name;
3255  if (!strcmp(encoder_name, out_codec_name))
3256  encoder_name = "native";
3257  }
3258 
3259  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3260  in_codec_name, decoder_name,
3261  out_codec_name, encoder_name);
3262  }
3263  av_log(NULL, AV_LOG_INFO, "\n");
3264  }
3265 
3266  if (ret) {
3267  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3268  return ret;
3269  }
3270 
3271  if (sdp_filename || want_sdp) {
3272  print_sdp();
3273  }
3274 
3275  transcode_init_done = 1;
3276 
3277  return 0;
3278 }
3279 
3280 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3281 static int need_output(void)
3282 {
3283  int i;
3284 
3285  for (i = 0; i < nb_output_streams; i++) {
3286  OutputStream *ost = output_streams[i];
3287  OutputFile *of = output_files[ost->file_index];
3288  AVFormatContext *os = output_files[ost->file_index]->ctx;
3289 
3290  if (ost->finished ||
3291  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3292  continue;
3293  if (ost->frame_number >= ost->max_frames) {
3294  int j;
3295  for (j = 0; j < of->ctx->nb_streams; j++)
3296  close_output_stream(output_streams[of->ost_index + j]);
3297  continue;
3298  }
3299 
3300  return 1;
3301  }
3302 
3303  return 0;
3304 }
3305 
3306 /**
3307  * Select the output stream to process.
3308  *
3309  * @return selected output stream, or NULL if none available
3310  */
3312 {
3313  int i;
3314  int64_t opts_min = INT64_MAX;
3315  OutputStream *ost_min = NULL;
3316 
3317  for (i = 0; i < nb_output_streams; i++) {
3318  OutputStream *ost = output_streams[i];
3319  int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3320  AV_TIME_BASE_Q);
3321  if (!ost->finished && opts < opts_min) {
3322  opts_min = opts;
3323  ost_min = ost->unavailable ? NULL : ost;
3324  }
3325  }
3326  return ost_min;
3327 }
3328 
3330 {
3331  int i, ret, key;
3332  static int64_t last_time;
3333  if (received_nb_signals)
3334  return AVERROR_EXIT;
3335  /* read_key() returns 0 on EOF */
3336  if(cur_time - last_time >= 100000 && !run_as_daemon){
3337  key = read_key();
3338  last_time = cur_time;
3339  }else
3340  key = -1;
3341  if (key == 'q')
3342  return AVERROR_EXIT;
3343  if (key == '+') av_log_set_level(av_log_get_level()+10);
3344  if (key == '-') av_log_set_level(av_log_get_level()-10);
3345  if (key == 's') qp_hist ^= 1;
3346  if (key == 'h'){
3347  if (do_hex_dump){
3348  do_hex_dump = do_pkt_dump = 0;
3349  } else if(do_pkt_dump){
3350  do_hex_dump = 1;
3351  } else
3352  do_pkt_dump = 1;
3354  }
3355  if (key == 'c' || key == 'C'){
3356  char buf[4096], target[64], command[256], arg[256] = {0};
3357  double time;
3358  int k, n = 0;
3359  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3360  i = 0;
3361  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3362  if (k > 0)
3363  buf[i++] = k;
3364  buf[i] = 0;
3365  if (k > 0 &&
3366  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3367  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3368  target, time, command, arg);
3369  for (i = 0; i < nb_filtergraphs; i++) {
3370  FilterGraph *fg = filtergraphs[i];
3371  if (fg->graph) {
3372  if (time < 0) {
3373  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3374  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3375  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3376  } else if (key == 'c') {
3377  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3378  ret = AVERROR_PATCHWELCOME;
3379  } else {
3380  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3381  if (ret < 0)
3382  fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3383  }
3384  }
3385  }
3386  } else {
3388  "Parse error, at least 3 arguments were expected, "
3389  "only %d given in string '%s'\n", n, buf);
3390  }
3391  }
3392  if (key == 'd' || key == 'D'){
3393  int debug=0;
3394  if(key == 'D') {
3395  debug = input_streams[0]->st->codec->debug<<1;
3396  if(!debug) debug = 1;
3397  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3398  debug += debug;
3399  }else
3400  if(scanf("%d", &debug)!=1)
3401  fprintf(stderr,"error parsing debug value\n");
3402  for(i=0;i<nb_input_streams;i++) {
3403  input_streams[i]->st->codec->debug = debug;
3404  }
3405  for(i=0;i<nb_output_streams;i++) {
3406  OutputStream *ost = output_streams[i];
3407  ost->enc_ctx->debug = debug;
3408  }
3409  if(debug) av_log_set_level(AV_LOG_DEBUG);
3410  fprintf(stderr,"debug=%d\n", debug);
3411  }
3412  if (key == '?'){
3413  fprintf(stderr, "key function\n"
3414  "? show this help\n"
3415  "+ increase verbosity\n"
3416  "- decrease verbosity\n"
3417  "c Send command to first matching filter supporting it\n"
3418  "C Send/Que command to all matching filters\n"
3419  "D cycle through available debug modes\n"
3420  "h dump packets/hex press to cycle through the 3 states\n"
3421  "q quit\n"
3422  "s Show QP histogram\n"
3423  );
3424  }
3425  return 0;
3426 }
3427 
3428 #if HAVE_PTHREADS
3429 static void *input_thread(void *arg)
3430 {
3431  InputFile *f = arg;
3432  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3433  int ret = 0;
3434 
3435  while (1) {
3436  AVPacket pkt;
3437  ret = av_read_frame(f->ctx, &pkt);
3438 
3439  if (ret == AVERROR(EAGAIN)) {
3440  av_usleep(10000);
3441  continue;
3442  }
3443  if (ret < 0) {
3445  break;
3446  }
3447  av_dup_packet(&pkt);
3448  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3449  if (flags && ret == AVERROR(EAGAIN)) {
3450  flags = 0;
3451  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3453  "Thread message queue blocking; consider raising the "
3454  "thread_queue_size option (current value: %d)\n",
3455  f->thread_queue_size);
3456  }
3457  if (ret < 0) {
3458  if (ret != AVERROR_EOF)
3459  av_log(f->ctx, AV_LOG_ERROR,
3460  "Unable to send packet to main thread: %s\n",
3461  av_err2str(ret));
3462  av_free_packet(&pkt);
3464  break;
3465  }
3466  }
3467 
3468  return NULL;
3469 }
3470 
3471 static void free_input_threads(void)
3472 {
3473  int i;
3474 
3475  for (i = 0; i < nb_input_files; i++) {
3476  InputFile *f = input_files[i];
3477  AVPacket pkt;
3478 
3479  if (!f->in_thread_queue)
3480  continue;
3482  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3483  av_free_packet(&pkt);
3484 
3485  pthread_join(f->thread, NULL);
3486  f->joined = 1;
3488  }
3489 }
3490 
3491 static int init_input_threads(void)
3492 {
3493  int i, ret;
3494 
3495  if (nb_input_files == 1)
3496  return 0;
3497 
3498  for (i = 0; i < nb_input_files; i++) {
3499  InputFile *f = input_files[i];
3500 
3501  if (f->ctx->pb ? !f->ctx->pb->seekable :
3502  strcmp(f->ctx->iformat->name, "lavfi"))
3503  f->non_blocking = 1;
3505  f->thread_queue_size, sizeof(AVPacket));
3506  if (ret < 0)
3507  return ret;
3508 
3509  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3510  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3512  return AVERROR(ret);
3513  }
3514  }
3515  return 0;
3516 }
3517 
3519 {
3521  f->non_blocking ?
3523 }
3524 #endif
3525 
3527 {
3528  if (f->rate_emu) {
3529  int i;
3530  for (i = 0; i < f->nb_streams; i++) {
3531  InputStream *ist = input_streams[f->ist_index + i];
3532  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3533  int64_t now = av_gettime_relative() - ist->start;
3534  if (pts > now)
3535  return AVERROR(EAGAIN);
3536  }
3537  }
3538 
3539 #if HAVE_PTHREADS
3540  if (nb_input_files > 1)
3541  return get_input_packet_mt(f, pkt);
3542 #endif
3543  return av_read_frame(f->ctx, pkt);
3544 }
3545 
3546 static int got_eagain(void)
3547 {
3548  int i;
3549  for (i = 0; i < nb_output_streams; i++)
3550  if (output_streams[i]->unavailable)
3551  return 1;
3552  return 0;
3553 }
3554 
3555 static void reset_eagain(void)
3556 {
3557  int i;
3558  for (i = 0; i < nb_input_files; i++)
3559  input_files[i]->eagain = 0;
3560  for (i = 0; i < nb_output_streams; i++)
3561  output_streams[i]->unavailable = 0;
3562 }
3563 
3564 /*
3565  * Return
3566  * - 0 -- one packet was read and processed
3567  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3568  * this function should be called again
3569  * - AVERROR_EOF -- this function should not be called again
3570  */
3571 static int process_input(int file_index)
3572 {
3573  InputFile *ifile = input_files[file_index];
3574  AVFormatContext *is;
3575  InputStream *ist;
3576  AVPacket pkt;
3577  int ret, i, j;
3578 
3579  is = ifile->ctx;
3580  ret = get_input_packet(ifile, &pkt);
3581 
3582  if (ret == AVERROR(EAGAIN)) {
3583  ifile->eagain = 1;
3584  return ret;
3585  }
3586  if (ret < 0) {
3587  if (ret != AVERROR_EOF) {
3588  print_error(is->filename, ret);
3589  if (exit_on_error)
3590  exit_program(1);
3591  }
3592 
3593  for (i = 0; i < ifile->nb_streams; i++) {
3594  ist = input_streams[ifile->ist_index + i];
3595  if (ist->decoding_needed) {
3596  ret = process_input_packet(ist, NULL);
3597  if (ret>0)
3598  return 0;
3599  }
3600 
3601  /* mark all outputs that don't go through lavfi as finished */
3602  for (j = 0; j < nb_output_streams; j++) {
3603  OutputStream *ost = output_streams[j];
3604 
3605  if (ost->source_index == ifile->ist_index + i &&
3606  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3607  finish_output_stream(ost);
3608  }
3609  }
3610 
3611  ifile->eof_reached = 1;
3612  return AVERROR(EAGAIN);
3613  }
3614 
3615  reset_eagain();
3616 
3617  if (do_pkt_dump) {
3619  is->streams[pkt.stream_index]);
3620  }
3621  /* the following test is needed in case new streams appear
3622  dynamically in stream : we ignore them */
3623  if (pkt.stream_index >= ifile->nb_streams) {
3624  report_new_stream(file_index, &pkt);
3625  goto discard_packet;
3626  }
3627 
3628  ist = input_streams[ifile->ist_index + pkt.stream_index];
3629 
3630  ist->data_size += pkt.size;
3631  ist->nb_packets++;
3632 
3633  if (ist->discard)
3634  goto discard_packet;
3635 
3636  if (debug_ts) {
3637  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3638  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3642  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3643  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3644  av_ts2str(input_files[ist->file_index]->ts_offset),
3645  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3646  }
3647 
3648  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3649  int64_t stime, stime2;
3650  // Correcting starttime based on the enabled streams
3651  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3652  // so we instead do it here as part of discontinuity handling
3653  if ( ist->next_dts == AV_NOPTS_VALUE
3654  && ifile->ts_offset == -is->start_time
3655  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3656  int64_t new_start_time = INT64_MAX;
3657  for (i=0; i<is->nb_streams; i++) {
3658  AVStream *st = is->streams[i];
3659  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3660  continue;
3661  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3662  }
3663  if (new_start_time > is->start_time) {
3664  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3665  ifile->ts_offset = -new_start_time;
3666  }
3667  }
3668 
3669  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3670  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3671  ist->wrap_correction_done = 1;
3672 
3673  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3674  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3675  ist->wrap_correction_done = 0;
3676  }
3677  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3678  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3679  ist->wrap_correction_done = 0;
3680  }
3681  }
3682 
3683  /* add the stream-global side data to the first packet */
3684  if (ist->nb_packets == 1) {
3685  if (ist->st->nb_side_data)
3687  for (i = 0; i < ist->st->nb_side_data; i++) {
3688  AVPacketSideData *src_sd = &ist->st->side_data[i];
3689  uint8_t *dst_data;
3690 
3691  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3692  continue;
3693  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3694  continue;
3695 
3696  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3697  if (!dst_data)
3698  exit_program(1);
3699 
3700  memcpy(dst_data, src_sd->data, src_sd->size);
3701  }
3702  }
3703 
3704  if (pkt.dts != AV_NOPTS_VALUE)
3705  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3706  if (pkt.pts != AV_NOPTS_VALUE)
3707  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3708 
3709  if (pkt.pts != AV_NOPTS_VALUE)
3710  pkt.pts *= ist->ts_scale;
3711  if (pkt.dts != AV_NOPTS_VALUE)
3712  pkt.dts *= ist->ts_scale;
3713 
3714  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3716  pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3717  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3718  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3719  int64_t delta = pkt_dts - ifile->last_ts;
3720  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3721  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3722  ifile->ts_offset -= delta;
3724  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3725  delta, ifile->ts_offset);
3726  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3727  if (pkt.pts != AV_NOPTS_VALUE)
3728  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3729  }
3730  }
3731 
3732  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3734  pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3735  !copy_ts) {
3736  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3737  int64_t delta = pkt_dts - ist->next_dts;
3738  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3739  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3740  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3741  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3742  ifile->ts_offset -= delta;
3744  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3745  delta, ifile->ts_offset);
3746  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3747  if (pkt.pts != AV_NOPTS_VALUE)
3748  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3749  }
3750  } else {
3751  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3752  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3753  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3754  pkt.dts = AV_NOPTS_VALUE;
3755  }
3756  if (pkt.pts != AV_NOPTS_VALUE){
3757  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3758  delta = pkt_pts - ist->next_dts;
3759  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3760  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3761  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3762  pkt.pts = AV_NOPTS_VALUE;
3763  }
3764  }
3765  }
3766  }
3767 
3768  if (pkt.dts != AV_NOPTS_VALUE)
3769  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3770 
3771  if (debug_ts) {
3772  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3774  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3775  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3776  av_ts2str(input_files[ist->file_index]->ts_offset),
3777  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3778  }
3779 
3780  sub2video_heartbeat(ist, pkt.pts);
3781 
3782  ret = process_input_packet(ist, &pkt);
3783  if (ret < 0) {
3784  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
3785  ist->file_index, ist->st->index, av_err2str(ret));
3786  if (exit_on_error)
3787  exit_program(1);
3788  }
3789 
3790 discard_packet:
3791  av_free_packet(&pkt);
3792 
3793  return 0;
3794 }
3795 
3796 /**
3797  * Perform a step of transcoding for the specified filter graph.
3798  *
3799  * @param[in] graph filter graph to consider
3800  * @param[out] best_ist input stream where a frame would allow to continue
3801  * @return 0 for success, <0 for error
3802  */
3803 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3804 {
3805  int i, ret;
3806  int nb_requests, nb_requests_max = 0;
3807  InputFilter *ifilter;
3808  InputStream *ist;
3809 
3810  *best_ist = NULL;
3811  ret = avfilter_graph_request_oldest(graph->graph);
3812  if (ret >= 0)
3813  return reap_filters(0);
3814 
3815  if (ret == AVERROR_EOF) {
3816  ret = reap_filters(1);
3817  for (i = 0; i < graph->nb_outputs; i++)
3818  close_output_stream(graph->outputs[i]->ost);
3819  return ret;
3820  }
3821  if (ret != AVERROR(EAGAIN))
3822  return ret;
3823 
3824  for (i = 0; i < graph->nb_inputs; i++) {
3825  ifilter = graph->inputs[i];
3826  ist = ifilter->ist;
3827  if (input_files[ist->file_index]->eagain ||
3828  input_files[ist->file_index]->eof_reached)
3829  continue;
3830  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3831  if (nb_requests > nb_requests_max) {
3832  nb_requests_max = nb_requests;
3833  *best_ist = ist;
3834  }
3835  }
3836 
3837  if (!*best_ist)
3838  for (i = 0; i < graph->nb_outputs; i++)
3839  graph->outputs[i]->ost->unavailable = 1;
3840 
3841  return 0;
3842 }
3843 
3844 /**
3845  * Run a single step of transcoding.
3846  *
3847  * @return 0 for success, <0 for error
3848  */
3849 static int transcode_step(void)
3850 {
3851  OutputStream *ost;
3852  InputStream *ist;
3853  int ret;
3854 
3855  ost = choose_output();
3856  if (!ost) {
3857  if (got_eagain()) {
3858  reset_eagain();
3859  av_usleep(10000);
3860  return 0;
3861  }
3862  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3863  return AVERROR_EOF;
3864  }
3865 
3866  if (ost->filter) {
3867  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3868  return ret;
3869  if (!ist)
3870  return 0;
3871  } else {
3872  av_assert0(ost->source_index >= 0);
3873  ist = input_streams[ost->source_index];
3874  }
3875 
3876  ret = process_input(ist->file_index);
3877  if (ret == AVERROR(EAGAIN)) {
3878  if (input_files[ist->file_index]->eagain)
3879  ost->unavailable = 1;
3880  return 0;
3881  }
3882 
3883  if (ret < 0)
3884  return ret == AVERROR_EOF ? 0 : ret;
3885 
3886  return reap_filters(0);
3887 }
3888 
3889 /*
3890  * The following code is the main loop of the file converter
3891  */
3892 static int transcode(void)
3893 {
3894  int ret, i;
3895  AVFormatContext *os;
3896  OutputStream *ost;
3897  InputStream *ist;
3898  int64_t timer_start;
3899 
3900  ret = transcode_init();
3901  if (ret < 0)
3902  goto fail;
3903 
3904  if (stdin_interaction) {
3905  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3906  }
3907 
3908  timer_start = av_gettime_relative();
3909 
3910 #if HAVE_PTHREADS
3911  if ((ret = init_input_threads()) < 0)
3912  goto fail;
3913 #endif
3914 
3915  while (!received_sigterm) {
3916  int64_t cur_time= av_gettime_relative();
3917 
3918  /* if 'q' pressed, exits */
3919  if (stdin_interaction)
3920  if (check_keyboard_interaction(cur_time) < 0)
3921  break;
3922 
3923  /* check if there's any stream where output is still needed */
3924  if (!need_output()) {
3925  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3926  break;
3927  }
3928 
3929  ret = transcode_step();
3930  if (ret < 0) {
3931  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3932  continue;
3933  } else {
3934  char errbuf[128];
3935  av_strerror(ret, errbuf, sizeof(errbuf));
3936 
3937  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3938  break;
3939  }
3940  }
3941 
3942  /* dump report by using the output first video and audio streams */
3943  print_report(0, timer_start, cur_time);
3944  }
3945 #if HAVE_PTHREADS
3947 #endif
3948 
3949  /* at the end of stream, we must flush the decoder buffers */
3950  for (i = 0; i < nb_input_streams; i++) {
3951  ist = input_streams[i];
3952  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3953  process_input_packet(ist, NULL);
3954  }
3955  }
3956  flush_encoders();
3957 
3958  term_exit();
3959 
3960  /* write the trailer if needed and close file */
3961  for (i = 0; i < nb_output_files; i++) {
3962  os = output_files[i]->ctx;
3963  av_write_trailer(os);
3964  }
3965 
3966  /* dump report by using the first video and audio streams */
3967  print_report(1, timer_start, av_gettime_relative());
3968 
3969  /* close each encoder */
3970  for (i = 0; i < nb_output_streams; i++) {
3971  ost = output_streams[i];
3972  if (ost->encoding_needed) {
3973  av_freep(&ost->enc_ctx->stats_in);
3974  }
3975  }
3976 
3977  /* close each decoder */
3978  for (i = 0; i < nb_input_streams; i++) {
3979  ist = input_streams[i];
3980  if (ist->decoding_needed) {
3981  avcodec_close(ist->dec_ctx);
3982  if (ist->hwaccel_uninit)
3983  ist->hwaccel_uninit(ist->dec_ctx);
3984  }
3985  }
3986 
3987  /* finished ! */
3988  ret = 0;
3989 
3990  fail:
3991 #if HAVE_PTHREADS
3993 #endif
3994 
3995  if (output_streams) {
3996  for (i = 0; i < nb_output_streams; i++) {
3997  ost = output_streams[i];
3998  if (ost) {
3999  if (ost->logfile) {
4000  fclose(ost->logfile);
4001  ost->logfile = NULL;
4002  }
4003  av_freep(&ost->forced_kf_pts);
4004  av_freep(&ost->apad);
4005  av_freep(&ost->disposition);
4006  av_dict_free(&ost->encoder_opts);
4007  av_dict_free(&ost->swr_opts);
4008  av_dict_free(&ost->resample_opts);
4009  av_dict_free(&ost->bsf_args);
4010  }
4011  }
4012  }
4013  return ret;
4014 }
4015 
4016 
4017 static int64_t getutime(void)
4018 {
4019 #if HAVE_GETRUSAGE
4020  struct rusage rusage;
4021 
4022  getrusage(RUSAGE_SELF, &rusage);
4023  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4024 #elif HAVE_GETPROCESSTIMES
4025  HANDLE proc;
4026  FILETIME c, e, k, u;
4027  proc = GetCurrentProcess();
4028  GetProcessTimes(proc, &c, &e, &k, &u);
4029  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4030 #else
4031  return av_gettime_relative();
4032 #endif
4033 }
4034 
4035 static int64_t getmaxrss(void)
4036 {
4037 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4038  struct rusage rusage;
4039  getrusage(RUSAGE_SELF, &rusage);
4040  return (int64_t)rusage.ru_maxrss * 1024;
4041 #elif HAVE_GETPROCESSMEMORYINFO
4042  HANDLE proc;
4043  PROCESS_MEMORY_COUNTERS memcounters;
4044  proc = GetCurrentProcess();
4045  memcounters.cb = sizeof(memcounters);
4046  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4047  return memcounters.PeakPagefileUsage;
4048 #else
4049  return 0;
4050 #endif
4051 }
4052 
4053 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4054 {
4055 }
4056 
4057 int main(int argc, char **argv)
4058 {
4059  int ret;
4060  int64_t ti;
4061 
4063 
4064  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4065 
4067  parse_loglevel(argc, argv, options);
4068 
4069  if(argc>1 && !strcmp(argv[1], "-d")){
4070  run_as_daemon=1;
4072  argc--;
4073  argv++;
4074  }
4075 
4077 #if CONFIG_AVDEVICE
4079 #endif
4081  av_register_all();
4083 
4084  show_banner(argc, argv, options);
4085 
4086  term_init();
4087 
4088  /* parse options and open all input/output files */
4089  ret = ffmpeg_parse_options(argc, argv);
4090  if (ret < 0)
4091  exit_program(1);
4092 
4093  if (nb_output_files <= 0 && nb_input_files == 0) {
4094  show_usage();
4095  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4096  exit_program(1);
4097  }
4098 
4099  /* file converter / grab */
4100  if (nb_output_files <= 0) {
4101  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4102  exit_program(1);
4103  }
4104 
4105 // if (nb_input_files == 0) {
4106 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4107 // exit_program(1);
4108 // }
4109 
4110  current_time = ti = getutime();
4111  if (transcode() < 0)
4112  exit_program(1);
4113  ti = getutime() - ti;
4114  if (do_benchmark) {
4115  printf("bench: utime=%0.3fs\n", ti / 1000000.0);
4116  }
4117  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4120  exit_program(69);
4121 
4123  return main_return_code;
4124 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1471
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:268
#define extra_bits(eb)
Definition: intrax8.c:152
int got_output
Definition: ffmpeg.h:293
#define AV_DISPOSITION_METADATA
Definition: avformat.h:826
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2577
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1735
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1018
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1835
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:385
const struct AVCodec * codec
Definition: avcodec.h:1250
Definition: ffmpeg.h:364
AVRational framerate
Definition: avcodec.h:3023
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:319
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:111
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:464
void term_init(void)
Definition: ffmpeg.c:328
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:281
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:228
int nb_outputs
Definition: ffmpeg.h:244
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3454
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:280
AVDictionary * swr_opts
Definition: ffmpeg.h:434
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:254
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2090
int resample_channels
Definition: ffmpeg.h:288
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:307
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2934
int stream_copy
Definition: ffmpeg.h:440
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:913
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3482
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1520
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2587
AVOption.
Definition: opt.h:255
AVRational frame_rate
Definition: ffmpeg.h:405
int64_t * forced_kf_pts
Definition: ffmpeg.h:413
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:287
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2663
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:429
#define CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:737
static int process_input(int file_index)
Definition: ffmpeg.c:3571
#define CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:882
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:428
int exit_on_error
Definition: ffmpeg_opt.c:99
const char * fmt
Definition: avisynth_c.h:632
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1187
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
#define CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:736
static int run_as_daemon
Definition: ffmpeg.c:125
Memory buffer source API.
AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2745
void av_log_set_level(int level)
Set the log level.
Definition: log.c:382
AVRational framerate
Definition: ffmpeg.h:277
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVCodecParserContext * parser
Definition: ffmpeg.h:448
static int64_t cur_time
Definition: ffserver.c:253
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
Definition: file_open.c:92
int decoding_needed
Definition: ffmpeg.h:252
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:914
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1466
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:408
int index
stream index in AVFormatContext
Definition: avformat.h:843
int size
Definition: avcodec.h:1163
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4035
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:126
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2485
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:461
#define AV_DISPOSITION_DUB
Definition: avformat.h:798
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1623
int eagain
Definition: ffmpeg.h:340
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1130
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1444
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:395
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:564
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:810
unsigned num_rects
Definition: avcodec.h:3511
AVFrame * filter_frame
Definition: ffmpeg.h:259
static int transcode_init(void)
Definition: ffmpeg.c:2617
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2492
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2502
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:92
enum AVMediaType type
Definition: avcodec.h:3194
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:812
#define FF_ARRAY_ELEMS(a)
static int init_input_threads(void)
Definition: ffmpeg.c:3491
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:652
discard all
Definition: avcodec.h:669
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:954
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:322
int64_t input_ts_offset
Definition: ffmpeg.h:342
int do_hex_dump
Definition: ffmpeg_opt.c:93
static AVPacket pkt
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1839
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2727
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2588
int nb_input_streams
Definition: ffmpeg.c:138
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:68
const char * name
Definition: ffmpeg.h:69
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2732
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:248
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2561
Picture data structure.
Definition: avcodec.h:3452
uint64_t packets_written
Definition: ffmpeg.h:454
AVCodec.
Definition: avcodec.h:3181
#define VSYNC_VFR
Definition: ffmpeg.h:54
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:180
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2022
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:483
int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Setup the picture fields based on the specified image parameters and the provided image data buffer...
Definition: avpicture.c:34
int print_stats
Definition: ffmpeg_opt.c:100
float dts_error_threshold
Definition: ffmpeg_opt.c:84
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:465
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:235
uint64_t data_size
Definition: ffmpeg.h:452
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:419
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
Definition: ffmpeg.c:2204
#define log2(x)
Definition: libm.h:122
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:802
struct FilterGraph * graph
Definition: ffmpeg.h:220
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1369
AVSubtitleRect ** rects
Definition: avcodec.h:3512
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:2060
int encoding_needed
Definition: ffmpeg.h:384
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:569
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4053
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3329
Format I/O context.
Definition: avformat.h:1272
uint64_t samples_decoded
Definition: ffmpeg.h:334
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:219
unsigned int nb_stream_indexes
Definition: avformat.h:1210
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:71
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:746
int64_t cur_dts
Definition: avformat.h:1019
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3484
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
uint64_t frames_decoded
Definition: ffmpeg.h:333
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:238
Public dictionary API.
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:892
char * logfile_prefix
Definition: ffmpeg.h:424
static uint8_t * subtitle_out
Definition: ffmpeg.c:133
#define DEFAULT_PASS_LOGFILENAME_PREFIX
Definition: ffmpeg.c:135
static int main_return_code
Definition: ffmpeg.c:316
static int64_t start_time
Definition: ffplay.c:320
int copy_initial_nonkeyframes
Definition: ffmpeg.h:442
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:118
if()
Definition: avfilter.c:975
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1993
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:532
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
Opaque data information usually continuous.
Definition: avutil.h:196
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
static void * input_thread(void *arg)
Definition: ffmpeg.c:3429
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:186
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:210
AVOptions.
int subtitle_header_size
Definition: avcodec.h:2958
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:642
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
attribute_deprecated void(* destruct)(struct AVPacket *)
Definition: avcodec.h:1183
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3453
int stdin_interaction
Definition: ffmpeg_opt.c:102
FILE * logfile
Definition: ffmpeg.h:425
AVDictionary * opts
Definition: ffmpeg.h:462
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
#define media_type_string
Definition: cmdutils.h:577
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1032
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
libavcodec/libavfilter gluing utilities
#define ECHO(name, type, min, max)
Definition: af_aecho.c:185
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2381
static int need_output(void)
Definition: ffmpeg.c:3281
int last_droped
Definition: ffmpeg.h:401
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:363
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:958
static double psnr(double d)
Definition: ffmpeg.c:1194
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1355
int do_benchmark
Definition: ffmpeg_opt.c:91
int audio_sync_method
Definition: ffmpeg_opt.c:87
int shortest
Definition: ffmpeg.h:468
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1340
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:2103
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:4017
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:111
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:94
const char * name
Definition: avcodec.h:5082
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int nb_streams
Definition: ffmpeg.h:348
pthread_t thread
Definition: ffmpeg.h:356
uint8_t * data
Definition: avcodec.h:1162
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * resample_opts
Definition: ffmpeg.h:435
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2498
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:67
AVFilterContext * filter
Definition: ffmpeg.h:225
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4123
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:84
int nb_input_files
Definition: ffmpeg.c:140
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:364
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1254
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:819
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1199
int resample_sample_rate
Definition: ffmpeg.h:287
uint8_t * data
Definition: avcodec.h:1112
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:367
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:759
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:494
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:283
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3485
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:365
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2720
AVCodec * dec
Definition: ffmpeg.h:257
static int64_t duration
Definition: ffplay.c:321
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1208
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2494
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:177
int top_field_first
Definition: ffmpeg.h:278
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1400
int nb_output_streams
Definition: ffmpeg.c:143
int file_index
Definition: ffmpeg.h:248
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1180
const OptionDef options[]
Definition: ffserver.c:3798
struct AVBitStreamFilterContext * next
Definition: avcodec.h:5077
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1967
unsigned int * stream_index
Definition: avformat.h:1209
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:284
int resample_height
Definition: ffmpeg.h:282
int wrap_correction_done
Definition: ffmpeg.h:269
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:271
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:258
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:824
unsigned m
Definition: audioconvert.c:187
int av_buffersrc_add_ref(AVFilterContext *buffer_src, AVFilterBufferRef *picref, int flags)
Add buffer data in picref to buffer_src.
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:117
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1291
int64_t next_dts
Definition: ffmpeg.h:264
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1208
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:140
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:477
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:50
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2845
static volatile int transcode_init_done
Definition: ffmpeg.c:315
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3195
int rate_emu
Definition: ffmpeg.h:351
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1533
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1235
static void reset_eagain(void)
Definition: ffmpeg.c:3555
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:595
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2354
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:324
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1549
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:594
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:2815
FilterGraph ** filtergraphs
Definition: ffmpeg.c:147
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:424
AVFilterContext * filter
Definition: ffmpeg.h:218
#define CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:874
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:763
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:321
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:474
int64_t start
Definition: ffmpeg.h:261
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:824
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3483
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:331
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:394
int video_sync_method
Definition: ffmpeg_opt.c:88
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:128
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:175
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
char * sdp_filename
Definition: ffmpeg_opt.c:80
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
int last_nb0_frames[3]
Definition: ffmpeg.h:402
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2140
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:107
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:112
int capabilities
Codec capabilities.
Definition: avcodec.h:3200
int initial_padding
Audio only.
Definition: avcodec.h:3015
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:122
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
Release bitstream filter context.
unsigned int nb_programs
Definition: avformat.h:1421
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:421
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1145
const char * arg
Definition: jacosubdec.c:66
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1335
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:491
AVChapter ** chapters
Definition: avformat.h:1472
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2327
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:123
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1114
int av_log_get_level(void)
Get the current log level.
Definition: log.c:377
const char * name
Name of the codec implementation.
Definition: avcodec.h:3188
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:746
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:70
int side_data_elems
Definition: avcodec.h:1174
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:546
int force_fps
Definition: ffmpeg.h:406
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:925
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1205
#define FFMAX(a, b)
Definition: common.h:64
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:126
int qp_hist
Definition: ffmpeg_opt.c:101
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
float frame_drop_threshold
Definition: ffmpeg_opt.c:89
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1168
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2891
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2046
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:145
uint32_t end_display_time
Definition: avcodec.h:3510
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3513
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:861
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2304
OutputFilter * filter
Definition: ffmpeg.h:427
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:427
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational frame_aspect_ratio
Definition: ffmpeg.h:410
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:801
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1484
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:809
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:630
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1328
static int nb_frames_drop
Definition: ffmpeg.c:127
A bitmap, pict will be set.
Definition: avcodec.h:3464
int nb_output_files
Definition: ffmpeg.c:145
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:160
int bit_rate
the average bitrate
Definition: avcodec.h:1305
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:197
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
static int transcode(void)
Definition: ffmpeg.c:3892
char filename[1024]
input or output filename
Definition: avformat.h:1348
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3492
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:127
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:247
#define FFMIN(a, b)
Definition: common.h:66
float y
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int saw_first_ts
Definition: ffmpeg.h:274
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1855
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:78
#define FFSIGN(a)
Definition: common.h:62
struct OutputStream * ost
Definition: ffmpeg.h:226
ret
Definition: avfilter.c:974
int width
picture width / height.
Definition: avcodec.h:1414
PVOID HANDLE
char * apad
Definition: ffmpeg.h:437
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:418
const char * name
Definition: avformat.h:513
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:221
int nb_filtergraphs
Definition: ffmpeg.c:148
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:80
int64_t last_ts
Definition: ffmpeg.h:344
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3518
#define FFABS(a)
Definition: common.h:61
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:283
Keep a reference to the frame.
Definition: buffersrc.h:62
int do_pkt_dump
Definition: ffmpeg_opt.c:94
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2500
int64_t max_frames
Definition: ffmpeg.h:398
#define AV_RL32
Definition: intreadwrite.h:146
#define CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:756
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:323
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:299
float u
int audio_channels_mapped
Definition: ffmpeg.h:422
int n
Definition: avisynth_c.h:547
AVDictionary * metadata
Definition: avformat.h:916
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1378
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:963
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:193
Opaque data information usually sparse.
Definition: avutil.h:198
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:107
static int restore_tty
Definition: ffmpeg.c:154
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
static int got_eagain(void)
Definition: ffmpeg.c:3546
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:223
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:141
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:2972
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:123
static void flush(AVCodecContext *avctx)
Definition: aacdec.c:514
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:404
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:811
int ret
Definition: ffmpeg.h:294
int audio_volume
Definition: ffmpeg_opt.c:86
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:842
#define av_dlog(pctx,...)
av_dlog macros
Definition: log.h:330
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:472
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
InputFilter ** filters
Definition: ffmpeg.h:309
int fix_sub_duration
Definition: ffmpeg.h:291
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:347
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4136
Definition: ffmpeg.h:68
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2005
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:64
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:797
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:166
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2576
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Filter bitstream.
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: frame.h:351
int frame_size
Definition: mxfenc.c:1803
attribute_deprecated void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:50
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:825
int ost_index
Definition: ffmpeg.h:463
struct InputStream * sync_ist
Definition: ffmpeg.h:388
AVS_Value src
Definition: avisynth_c.h:482
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:721
enum AVMediaType codec_type
Definition: avcodec.h:1249
double ts_scale
Definition: ffmpeg.h:273
int unavailable
Definition: ffmpeg.h:439
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: avcodec.h:3201
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:162
Immediately push the frame to the output.
Definition: buffersrc.h:55
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2438
enum AVCodecID codec_id
Definition: avcodec.h:1258
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:312
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:253
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1479
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:181
float max_error_rate
Definition: ffmpeg_opt.c:104
int sample_rate
samples per second
Definition: avcodec.h:1985
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
uint64_t frames_encoded
Definition: ffmpeg.h:456
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1992
AVIOContext * pb
I/O context.
Definition: avformat.h:1314
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:166
int ist_index
Definition: ffmpeg.h:341
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:538
uint8_t flags
Definition: pixdesc.h:90
int debug
debug
Definition: avcodec.h:2565
static void print_sdp(void)
Definition: ffmpeg.c:2346
const char * graph_desc
Definition: ffmpeg.h:236
int guess_layout_max
Definition: ffmpeg.h:279
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
int64_t start_time
Definition: ffmpeg.h:345
#define AVFMT_RAWPICTURE
Format wants AVPicture structure for raw picture data.
Definition: avformat.h:468
main external API structure.
Definition: avcodec.h:1241
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:341
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:426
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:765
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2826
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:252
const char * attachment_filename
Definition: ffmpeg.h:441
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1273
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1718
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:555
AVCodecContext * enc_ctx
Definition: ffmpeg.h:396
void * buf
Definition: avisynth_c.h:553
AVFrame * decoded_frame
Definition: ffmpeg.h:258
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1356
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
Replacements for frequently missing libm functions.
struct AVBitStreamFilter * filter
Definition: avcodec.h:5075
AVCodecContext * dec_ctx
Definition: ffmpeg.h:256
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:3803
AVStream * st
Definition: ffmpeg.h:249
int * audio_channels_map
Definition: ffmpeg.h:421
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:422
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1495
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:142
int index
Definition: gxfenc.c:89
rational number numerator/denominator
Definition: rational.h:43
int file_index
Definition: ffmpeg.h:380
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:48
static int current_time
Definition: ffmpeg.c:130
int64_t sync_opts
Definition: ffmpeg.h:389
char * vstats_filename
Definition: ffmpeg_opt.c:79
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:152
char * disposition
Definition: ffmpeg.h:444
#define mid_pred
Definition: mathops.h:96
AVMediaType
Definition: avutil.h:192
discard useless packets like 0 size packets in avi
Definition: avcodec.h:664
static av_always_inline av_const long int lrint(double x)
Definition: libm.h:148
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:917
int nb_streams_warn
Definition: ffmpeg.h:350
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2246
AVDictionary * decoder_opts
Definition: ffmpeg.h:276
int autorotate
Definition: ffmpeg.h:281
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:568
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1332
int showed_multi_packet_warning
Definition: ffmpeg.h:275
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:103
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:3609
int64_t ts_offset
Definition: ffmpeg.h:343
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:328
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:808
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3849
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:430
static void free_input_threads(void)
Definition: ffmpeg.c:3471
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3338
misc parsing utilities
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1475
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:358
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:560
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:262
AVFrame * filtered_frame
Definition: ffmpeg.h:399
int source_index
Definition: ffmpeg.h:382
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:265
static volatile int received_nb_signals
Definition: ffmpeg.c:314
int copy_prior_start
Definition: ffmpeg.h:443
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:462
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
Read the file with name filename, and put its content in a newly allocated 0-terminated buffer...
Definition: cmdutils.c:1878
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1321
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:586
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:72
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:84
int nb_filters
Definition: ffmpeg.h:310
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2390
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1357
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
uint8_t level
Definition: svq3.c:150
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:417
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:265
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:68
int resample_sample_fmt
Definition: ffmpeg.h:286
int forced_kf_count
Definition: ffmpeg.h:414
int64_t start
Definition: avformat.h:1238
OSTFinished finished
Definition: ffmpeg.h:438
char * forced_keyframes
Definition: ffmpeg.h:416
uint64_t data_size
Definition: ffmpeg.h:329
#define CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:847
int resample_width
Definition: ffmpeg.h:283
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:267
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1032
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1173
struct FilterGraph * graph
Definition: ffmpeg.h:227
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
uint64_t limit_filesize
Definition: ffmpeg.h:466
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1349
AVIOContext * progress_avio
Definition: ffmpeg.c:131
AVThreadMessageQueue * in_thread_queue
Definition: ffmpeg.h:355
int main(int argc, char **argv)
Definition: ffmpeg.c:4057
int reinit_filters
Definition: ffmpeg.h:312
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:465
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:261
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:894
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:477
AVStream * st
Definition: muxing.c:54
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:905
static AVCodecContext * dec_ctx
uint32_t start_display_time
Definition: avcodec.h:3509
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1008
uint64_t samples_encoded
Definition: ffmpeg.h:457
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1237
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:206
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:2782
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:903
char * key
Definition: dict.h:87
static FILE * vstats_file
Definition: ffmpeg.c:110
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:82
AVFrame * last_frame
Definition: ffmpeg.h:400
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int copy_ts
Definition: ffmpeg_opt.c:95
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1284
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3644
AVFormatContext * ctx
Definition: ffmpeg.h:338
static struct termios oldtty
Definition: ffmpeg.c:153
AVCodec * enc
Definition: ffmpeg.h:397
AVSubtitle subtitle
Definition: ffmpeg.h:295
int eof_reached
Definition: ffmpeg.h:339
int forced_kf_index
Definition: ffmpeg.h:415
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:759
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:475
char * avfilter
Definition: ffmpeg.h:428
uint8_t * name
Definition: ffmpeg.h:221
char * value
Definition: dict.h:88
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:372
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:83
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:707
int channels
number of audio channels
Definition: avcodec.h:1986
#define av_log2
Definition: intmath.h:105
int top_field_first
Definition: ffmpeg.h:407
OutputFilter ** outputs
Definition: ffmpeg.h:243
InputFile ** input_files
Definition: ffmpeg.c:139
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2428
void av_log_set_flags(int arg)
Definition: log.c:387
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:220
AVDictionary * bsf_args
Definition: ffmpeg.h:436
AVFormatContext * ctx
Definition: ffmpeg.h:461
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:800
void show_usage(void)
Definition: ffmpeg_opt.c:2765
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
An instance of a filter.
Definition: avfilter.h:633
#define LIBAVCODEC_IDENT
Definition: version.h:43
char * hwaccel_device
Definition: ffmpeg.h:316
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1161
AVDictionary * encoder_opts
Definition: ffmpeg.h:433
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:967
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:108
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4265
int height
Definition: frame.h:220
InputFilter ** inputs
Definition: ffmpeg.h:241
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1982
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:325
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:628
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:324
OutputFile ** output_files
Definition: ffmpeg.c:144
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
#define av_malloc_array(a, b)
static void flush_encoders(void)
Definition: ffmpeg.c:1640
int copy_tb
Definition: ffmpeg_opt.c:97
static volatile int received_sigterm
Definition: ffmpeg.c:313
#define FFSWAP(type, a, b)
Definition: common.h:69
int discard
Definition: ffmpeg.h:250
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3526
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2011
int thread_queue_size
Definition: ffmpeg.h:359
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:299
int stream_index
Definition: avcodec.h:1164
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:884
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:315
enum AVSubtitleType type
Definition: avcodec.h:3493
int64_t first_pts
Definition: ffmpeg.h:392
int nb_inputs
Definition: ffmpeg.h:242
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:907
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:223
#define DECODING_FOR_OST
Definition: ffmpeg.h:253
int index
Definition: ffmpeg.h:381
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1062
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
uint64_t resample_channel_layout
Definition: ffmpeg.h:289
OSTFinished
Definition: ffmpeg.h:374
This structure stores compressed data.
Definition: avcodec.h:1139
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:958
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:2189
int non_blocking
Definition: ffmpeg.h:357
int delay
Codec delay.
Definition: avcodec.h:1402
int debug_ts
Definition: ffmpeg_opt.c:98
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3311
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:225
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
static void sigterm_handler(int sig)
Definition: ffmpeg.c:319
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1155
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:117
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1298
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:65
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:241
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1422
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:799
int joined
Definition: ffmpeg.h:358
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
InputStream ** input_streams
Definition: ffmpeg.c:137
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:66
Definition: ffmpeg.h:368
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:735
struct InputStream::@25 prev_sub
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:2957