FFmpeg  2.8.15
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_ISATTY
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 #endif
43 
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
47 #include "libavutil/opt.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
66 
67 # include "libavfilter/avcodec.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 
130 static int run_as_daemon = 0;
131 static int nb_frames_dup = 0;
132 static int nb_frames_drop = 0;
133 static int64_t decode_error_stat[2];
134 
135 static int current_time;
137 
139 
144 
149 
152 
153 #if HAVE_TERMIOS_H
154 
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
158 #endif
159 
160 #if HAVE_PTHREADS
161 static void free_input_threads(void);
162 #endif
163 
164 /* sub2video hack:
165  Convert subtitles to video with alpha to insert them in filter graphs.
166  This is a temporary solution until libavfilter gets real subtitles support.
167  */
168 
170 {
171  int ret;
172  AVFrame *frame = ist->sub2video.frame;
173 
174  av_frame_unref(frame);
175  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179  return ret;
180  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181  return 0;
182 }
183 
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
185  AVSubtitleRect *r)
186 {
187  uint32_t *pal, *dst2;
188  uint8_t *src, *src2;
189  int x, y;
190 
191  if (r->type != SUBTITLE_BITMAP) {
192  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
193  return;
194  }
195  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197  r->x, r->y, r->w, r->h, w, h
198  );
199  return;
200  }
201 
202  dst += r->y * dst_linesize + r->x * 4;
203  src = r->pict.data[0];
204  pal = (uint32_t *)r->pict.data[1];
205  for (y = 0; y < r->h; y++) {
206  dst2 = (uint32_t *)dst;
207  src2 = src;
208  for (x = 0; x < r->w; x++)
209  *(dst2++) = pal[*(src2++)];
210  dst += dst_linesize;
211  src += r->pict.linesize[0];
212  }
213 }
214 
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 {
217  AVFrame *frame = ist->sub2video.frame;
218  int i;
219 
220  av_assert1(frame->data[0]);
221  ist->sub2video.last_pts = frame->pts = pts;
222  for (i = 0; i < ist->nb_filters; i++)
226 }
227 
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 {
230  AVFrame *frame = ist->sub2video.frame;
231  int8_t *dst;
232  int dst_linesize;
233  int num_rects, i;
234  int64_t pts, end_pts;
235 
236  if (!frame)
237  return;
238  if (sub) {
239  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240  AV_TIME_BASE_Q, ist->st->time_base);
241  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242  AV_TIME_BASE_Q, ist->st->time_base);
243  num_rects = sub->num_rects;
244  } else {
245  pts = ist->sub2video.end_pts;
246  end_pts = INT64_MAX;
247  num_rects = 0;
248  }
249  if (sub2video_get_blank_frame(ist) < 0) {
251  "Impossible to get a blank canvas.\n");
252  return;
253  }
254  dst = frame->data [0];
255  dst_linesize = frame->linesize[0];
256  for (i = 0; i < num_rects; i++)
257  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258  sub2video_push_ref(ist, pts);
259  ist->sub2video.end_pts = end_pts;
260 }
261 
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 {
264  InputFile *infile = input_files[ist->file_index];
265  int i, j, nb_reqs;
266  int64_t pts2;
267 
268  /* When a frame is read from a file, examine all sub2video streams in
269  the same file and send the sub2video frame again. Otherwise, decoded
270  video frames could be accumulating in the filter graph while a filter
271  (possibly overlay) is desperately waiting for a subtitle frame. */
272  for (i = 0; i < infile->nb_streams; i++) {
273  InputStream *ist2 = input_streams[infile->ist_index + i];
274  if (!ist2->sub2video.frame)
275  continue;
276  /* subtitles seem to be usually muxed ahead of other streams;
277  if not, subtracting a larger time here is necessary */
278  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279  /* do not send the heartbeat frame if the subtitle is already ahead */
280  if (pts2 <= ist2->sub2video.last_pts)
281  continue;
282  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283  sub2video_update(ist2, NULL);
284  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286  if (nb_reqs)
287  sub2video_push_ref(ist2, pts2);
288  }
289 }
290 
291 static void sub2video_flush(InputStream *ist)
292 {
293  int i;
294 
295  if (ist->sub2video.end_pts < INT64_MAX)
296  sub2video_update(ist, NULL);
297  for (i = 0; i < ist->nb_filters; i++)
299 }
300 
301 /* end of sub2video hack */
302 
303 static void term_exit_sigsafe(void)
304 {
305 #if HAVE_TERMIOS_H
306  if(restore_tty)
307  tcsetattr (0, TCSANOW, &oldtty);
308 #endif
309 }
310 
311 void term_exit(void)
312 {
313  av_log(NULL, AV_LOG_QUIET, "%s", "");
315 }
316 
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
322 
323 static void
325 {
326  received_sigterm = sig;
329  if(received_nb_signals > 3) {
330  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331  strlen("Received > 3 system signals, hard exiting\n"));
332 
333  exit(123);
334  }
335 }
336 
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 {
340  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
341 
342  switch (fdwCtrlType)
343  {
344  case CTRL_C_EVENT:
345  case CTRL_BREAK_EVENT:
346  sigterm_handler(SIGINT);
347  return TRUE;
348 
349  case CTRL_CLOSE_EVENT:
350  case CTRL_LOGOFF_EVENT:
351  case CTRL_SHUTDOWN_EVENT:
352  sigterm_handler(SIGTERM);
353  /* Basically, with these 3 events, when we return from this method the
354  process is hard terminated, so stall as long as we need to
355  to try and let the main thread(s) clean up and gracefully terminate
356  (we have at most 5 seconds, but should be done far before that). */
357  while (!ffmpeg_exited) {
358  Sleep(0);
359  }
360  return TRUE;
361 
362  default:
363  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
364  return FALSE;
365  }
366 }
367 #endif
368 
369 void term_init(void)
370 {
371 #if HAVE_TERMIOS_H
372  if(!run_as_daemon){
373  struct termios tty;
374  int istty = 1;
375 #if HAVE_ISATTY
376  istty = isatty(0) && isatty(2);
377 #endif
378  if (istty && tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  av_freep(&fg->inputs[j]->name);
480  av_freep(&fg->inputs[j]);
481  }
482  av_freep(&fg->inputs);
483  for (j = 0; j < fg->nb_outputs; j++) {
484  av_freep(&fg->outputs[j]->name);
485  av_freep(&fg->outputs[j]);
486  }
487  av_freep(&fg->outputs);
488  av_freep(&fg->graph_desc);
489 
490  av_freep(&filtergraphs[i]);
491  }
492  av_freep(&filtergraphs);
493 
495 
496  /* close files */
497  for (i = 0; i < nb_output_files; i++) {
498  OutputFile *of = output_files[i];
500  if (!of)
501  continue;
502  s = of->ctx;
503  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504  avio_closep(&s->pb);
506  av_dict_free(&of->opts);
507 
508  av_freep(&output_files[i]);
509  }
510  for (i = 0; i < nb_output_streams; i++) {
511  OutputStream *ost = output_streams[i];
513 
514  if (!ost)
515  continue;
516 
517  bsfc = ost->bitstream_filters;
518  while (bsfc) {
519  AVBitStreamFilterContext *next = bsfc->next;
521  bsfc = next;
522  }
523  ost->bitstream_filters = NULL;
525  av_frame_free(&ost->last_frame);
526 
527  av_parser_close(ost->parser);
528 
529  av_freep(&ost->forced_keyframes);
531  av_freep(&ost->avfilter);
532  av_freep(&ost->logfile_prefix);
533 
535  ost->audio_channels_mapped = 0;
536 
538 
539  av_freep(&output_streams[i]);
540  }
541 #if HAVE_PTHREADS
543 #endif
544  for (i = 0; i < nb_input_files; i++) {
545  avformat_close_input(&input_files[i]->ctx);
546  av_freep(&input_files[i]);
547  }
548  for (i = 0; i < nb_input_streams; i++) {
549  InputStream *ist = input_streams[i];
550 
553  av_dict_free(&ist->decoder_opts);
556  av_freep(&ist->filters);
557  av_freep(&ist->hwaccel_device);
558 
560 
561  av_freep(&input_streams[i]);
562  }
563 
564  if (vstats_file)
565  fclose(vstats_file);
567 
568  av_freep(&input_streams);
569  av_freep(&input_files);
570  av_freep(&output_streams);
571  av_freep(&output_files);
572 
573  uninit_opts();
574 
576 
577  if (received_sigterm) {
578  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579  (int) received_sigterm);
580  } else if (ret && transcode_init_done) {
581  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
582  }
583  term_exit();
584  ffmpeg_exited = 1;
585 }
586 
588 {
589  AVDictionaryEntry *t = NULL;
590 
591  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
593  }
594 }
595 
597 {
599  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
601  exit_program(1);
602  }
603 }
604 
605 static void abort_codec_experimental(AVCodec *c, int encoder)
606 {
607  exit_program(1);
608 }
609 
610 static void update_benchmark(const char *fmt, ...)
611 {
612  if (do_benchmark_all) {
613  int64_t t = getutime();
614  va_list va;
615  char buf[1024];
616 
617  if (fmt) {
618  va_start(va, fmt);
619  vsnprintf(buf, sizeof(buf), fmt, va);
620  va_end(va);
621  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
622  }
623  current_time = t;
624  }
625 }
626 
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
628 {
629  int i;
630  for (i = 0; i < nb_output_streams; i++) {
631  OutputStream *ost2 = output_streams[i];
632  ost2->finished |= ost == ost2 ? this_stream : others;
633  }
634 }
635 
637 {
639  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
640  int ret;
641 
642  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
644  if (ost->st->codec->extradata) {
645  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
647  }
648  }
649 
652  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
653 
654  /*
655  * Audio encoders may split the packets -- #frames in != #packets out.
656  * But there is no reordering, so we can limit the number of output packets
657  * by simply dropping them here.
658  * Counting encoded video frames needs to be done separately because of
659  * reordering, see do_video_out()
660  */
661  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
662  if (ost->frame_number >= ost->max_frames) {
663  av_free_packet(pkt);
664  return;
665  }
666  ost->frame_number++;
667  }
668  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669  int i;
671  NULL);
672  ost->quality = sd ? AV_RL32(sd) : -1;
673  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 
675  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676  if (sd && i < sd[5])
677  ost->error[i] = AV_RL64(sd + 8 + 8*i);
678  else
679  ost->error[i] = -1;
680  }
681  }
682 
683  if (bsfc)
685 
686  while (bsfc) {
687  AVPacket new_pkt = *pkt;
688  AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
689  bsfc->filter->name,
690  NULL, 0);
691  int a = av_bitstream_filter_filter(bsfc, avctx,
692  bsf_arg ? bsf_arg->value : NULL,
693  &new_pkt.data, &new_pkt.size,
694  pkt->data, pkt->size,
695  pkt->flags & AV_PKT_FLAG_KEY);
697  if(a == 0 && new_pkt.data != pkt->data
699  && new_pkt.destruct
700 #endif
701  ) {
703  uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
704  if(t) {
705  memcpy(t, new_pkt.data, new_pkt.size);
706  memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
707  new_pkt.data = t;
708  new_pkt.buf = NULL;
709  a = 1;
710  } else
711  a = AVERROR(ENOMEM);
712  }
713  if (a > 0) {
714  pkt->side_data = NULL;
715  pkt->side_data_elems = 0;
716  av_free_packet(pkt);
717  new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
718  av_buffer_default_free, NULL, 0);
719  if (!new_pkt.buf)
720  exit_program(1);
721  } else if (a < 0) {
722  new_pkt = *pkt;
723  av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
724  bsfc->filter->name, pkt->stream_index,
725  avctx->codec ? avctx->codec->name : "copy");
726  print_error("", a);
727  if (exit_on_error)
728  exit_program(1);
729  }
730  *pkt = new_pkt;
731 
732  bsfc = bsfc->next;
733  }
734 
735  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
736  if (pkt->dts != AV_NOPTS_VALUE &&
737  pkt->pts != AV_NOPTS_VALUE &&
738  pkt->dts > pkt->pts) {
739  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
740  pkt->dts, pkt->pts,
741  ost->file_index, ost->st->index);
742  pkt->pts =
743  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
744  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
745  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
746  }
747  if(
748  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
749  pkt->dts != AV_NOPTS_VALUE &&
750  ost->last_mux_dts != AV_NOPTS_VALUE) {
751  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
752  if (pkt->dts < max) {
753  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
754  av_log(s, loglevel, "Non-monotonous DTS in output stream "
755  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
756  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
757  if (exit_on_error) {
758  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
759  exit_program(1);
760  }
761  av_log(s, loglevel, "changing to %"PRId64". This may result "
762  "in incorrect timestamps in the output file.\n",
763  max);
764  if(pkt->pts >= pkt->dts)
765  pkt->pts = FFMAX(pkt->pts, max);
766  pkt->dts = max;
767  }
768  }
769  }
770  ost->last_mux_dts = pkt->dts;
771 
772  ost->data_size += pkt->size;
773  ost->packets_written++;
774 
775  pkt->stream_index = ost->index;
776 
777  if (debug_ts) {
778  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
779  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
781  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
782  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
783  pkt->size
784  );
785  }
786 
787  ret = av_interleaved_write_frame(s, pkt);
788  if (ret < 0) {
789  print_error("av_interleaved_write_frame()", ret);
790  main_return_code = 1;
792  }
793  av_free_packet(pkt);
794 }
795 
797 {
798  OutputFile *of = output_files[ost->file_index];
799 
800  ost->finished |= ENCODER_FINISHED;
801  if (of->shortest) {
802  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
803  of->recording_time = FFMIN(of->recording_time, end);
804  }
805 }
806 
808 {
809  OutputFile *of = output_files[ost->file_index];
810 
811  if (of->recording_time != INT64_MAX &&
813  AV_TIME_BASE_Q) >= 0) {
814  close_output_stream(ost);
815  return 0;
816  }
817  return 1;
818 }
819 
821  AVFrame *frame)
822 {
823  AVCodecContext *enc = ost->enc_ctx;
824  AVPacket pkt;
825  int got_packet = 0;
826 
827  av_init_packet(&pkt);
828  pkt.data = NULL;
829  pkt.size = 0;
830 
831  if (!check_recording_time(ost))
832  return;
833 
834  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
835  frame->pts = ost->sync_opts;
836  ost->sync_opts = frame->pts + frame->nb_samples;
837  ost->samples_encoded += frame->nb_samples;
838  ost->frames_encoded++;
839 
840  av_assert0(pkt.size || !pkt.data);
842  if (debug_ts) {
843  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
844  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
845  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
846  enc->time_base.num, enc->time_base.den);
847  }
848 
849  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
850  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
851  exit_program(1);
852  }
853  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
854 
855  if (got_packet) {
856  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
857 
858  if (debug_ts) {
859  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
860  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
861  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
862  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
863  }
864 
865  write_frame(s, &pkt, ost);
866  }
867 }
868 
870  OutputStream *ost,
871  InputStream *ist,
872  AVSubtitle *sub)
873 {
874  int subtitle_out_max_size = 1024 * 1024;
875  int subtitle_out_size, nb, i;
876  AVCodecContext *enc;
877  AVPacket pkt;
878  int64_t pts;
879 
880  if (sub->pts == AV_NOPTS_VALUE) {
881  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
882  if (exit_on_error)
883  exit_program(1);
884  return;
885  }
886 
887  enc = ost->enc_ctx;
888 
889  if (!subtitle_out) {
890  subtitle_out = av_malloc(subtitle_out_max_size);
891  if (!subtitle_out) {
892  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
893  exit_program(1);
894  }
895  }
896 
897  /* Note: DVB subtitle need one packet to draw them and one other
898  packet to clear them */
899  /* XXX: signal it in the codec context ? */
901  nb = 2;
902  else
903  nb = 1;
904 
905  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
906  pts = sub->pts;
907  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
908  pts -= output_files[ost->file_index]->start_time;
909  for (i = 0; i < nb; i++) {
910  unsigned save_num_rects = sub->num_rects;
911 
912  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
913  if (!check_recording_time(ost))
914  return;
915 
916  sub->pts = pts;
917  // start_display_time is required to be 0
918  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
920  sub->start_display_time = 0;
921  if (i == 1)
922  sub->num_rects = 0;
923 
924  ost->frames_encoded++;
925 
926  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
927  subtitle_out_max_size, sub);
928  if (i == 1)
929  sub->num_rects = save_num_rects;
930  if (subtitle_out_size < 0) {
931  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
932  exit_program(1);
933  }
934 
935  av_init_packet(&pkt);
936  pkt.data = subtitle_out;
937  pkt.size = subtitle_out_size;
938  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
939  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
940  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
941  /* XXX: the pts correction is handled here. Maybe handling
942  it in the codec would be better */
943  if (i == 0)
944  pkt.pts += 90 * sub->start_display_time;
945  else
946  pkt.pts += 90 * sub->end_display_time;
947  }
948  pkt.dts = pkt.pts;
949  write_frame(s, &pkt, ost);
950  }
951 }
952 
954  OutputStream *ost,
955  AVFrame *next_picture,
956  double sync_ipts)
957 {
958  int ret, format_video_sync;
959  AVPacket pkt;
960  AVCodecContext *enc = ost->enc_ctx;
961  AVCodecContext *mux_enc = ost->st->codec;
962  int nb_frames, nb0_frames, i;
963  double delta, delta0;
964  double duration = 0;
965  int frame_size = 0;
966  InputStream *ist = NULL;
968 
969  if (ost->source_index >= 0)
970  ist = input_streams[ost->source_index];
971 
972  if (filter->inputs[0]->frame_rate.num > 0 &&
973  filter->inputs[0]->frame_rate.den > 0)
974  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
975 
976  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
977  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
978 
979  if (!ost->filters_script &&
980  !ost->filters &&
981  next_picture &&
982  ist &&
983  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
984  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
985  }
986 
987  if (!next_picture) {
988  //end, flushing
989  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
990  ost->last_nb0_frames[1],
991  ost->last_nb0_frames[2]);
992  } else {
993  delta0 = sync_ipts - ost->sync_opts;
994  delta = delta0 + duration;
995 
996  /* by default, we output a single frame */
997  nb0_frames = 0;
998  nb_frames = 1;
999 
1000  format_video_sync = video_sync_method;
1001  if (format_video_sync == VSYNC_AUTO) {
1002  if(!strcmp(s->oformat->name, "avi")) {
1003  format_video_sync = VSYNC_VFR;
1004  } else
1006  if ( ist
1007  && format_video_sync == VSYNC_CFR
1008  && input_files[ist->file_index]->ctx->nb_streams == 1
1009  && input_files[ist->file_index]->input_ts_offset == 0) {
1010  format_video_sync = VSYNC_VSCFR;
1011  }
1012  if (format_video_sync == VSYNC_CFR && copy_ts) {
1013  format_video_sync = VSYNC_VSCFR;
1014  }
1015  }
1016 
1017  if (delta0 < 0 &&
1018  delta > 0 &&
1019  format_video_sync != VSYNC_PASSTHROUGH &&
1020  format_video_sync != VSYNC_DROP) {
1021  double cor = FFMIN(-delta0, duration);
1022  if (delta0 < -0.6) {
1023  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1024  } else
1025  av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1026  sync_ipts += cor;
1027  duration -= cor;
1028  delta0 += cor;
1029  }
1030 
1031  switch (format_video_sync) {
1032  case VSYNC_VSCFR:
1033  if (ost->frame_number == 0 && delta - duration >= 0.5) {
1034  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1035  delta = duration;
1036  delta0 = 0;
1037  ost->sync_opts = lrint(sync_ipts);
1038  }
1039  case VSYNC_CFR:
1040  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1041  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1042  nb_frames = 0;
1043  } else if (delta < -1.1)
1044  nb_frames = 0;
1045  else if (delta > 1.1) {
1046  nb_frames = lrintf(delta);
1047  if (delta0 > 1.1)
1048  nb0_frames = lrintf(delta0 - 0.6);
1049  }
1050  break;
1051  case VSYNC_VFR:
1052  if (delta <= -0.6)
1053  nb_frames = 0;
1054  else if (delta > 0.6)
1055  ost->sync_opts = lrint(sync_ipts);
1056  break;
1057  case VSYNC_DROP:
1058  case VSYNC_PASSTHROUGH:
1059  ost->sync_opts = lrint(sync_ipts);
1060  break;
1061  default:
1062  av_assert0(0);
1063  }
1064  }
1065 
1066  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1067  nb0_frames = FFMIN(nb0_frames, nb_frames);
1068 
1069  memmove(ost->last_nb0_frames + 1,
1070  ost->last_nb0_frames,
1071  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1072  ost->last_nb0_frames[0] = nb0_frames;
1073 
1074  if (nb0_frames == 0 && ost->last_droped) {
1075  nb_frames_drop++;
1077  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1078  ost->frame_number, ost->st->index, ost->last_frame->pts);
1079  }
1080  if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1081  if (nb_frames > dts_error_threshold * 30) {
1082  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1083  nb_frames_drop++;
1084  return;
1085  }
1086  nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1087  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1088  }
1089  ost->last_droped = nb_frames == nb0_frames && next_picture;
1090 
1091  /* duplicates frame if needed */
1092  for (i = 0; i < nb_frames; i++) {
1093  AVFrame *in_picture;
1094  av_init_packet(&pkt);
1095  pkt.data = NULL;
1096  pkt.size = 0;
1097 
1098  if (i < nb0_frames && ost->last_frame) {
1099  in_picture = ost->last_frame;
1100  } else
1101  in_picture = next_picture;
1102 
1103  if (!in_picture)
1104  return;
1105 
1106  in_picture->pts = ost->sync_opts;
1107 
1108 #if 1
1109  if (!check_recording_time(ost))
1110 #else
1111  if (ost->frame_number >= ost->max_frames)
1112 #endif
1113  return;
1114 
1115  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1116  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1117  /* raw pictures are written as AVPicture structure to
1118  avoid any copies. We support temporarily the older
1119  method. */
1120  if (in_picture->interlaced_frame)
1121  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1122  else
1123  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1124  pkt.data = (uint8_t *)in_picture;
1125  pkt.size = sizeof(AVPicture);
1126  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1127  pkt.flags |= AV_PKT_FLAG_KEY;
1128 
1129  write_frame(s, &pkt, ost);
1130  } else {
1131  int got_packet, forced_keyframe = 0;
1132  double pts_time;
1133 
1135  ost->top_field_first >= 0)
1136  in_picture->top_field_first = !!ost->top_field_first;
1137 
1138  if (in_picture->interlaced_frame) {
1139  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1140  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1141  else
1142  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1143  } else
1144  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1145 
1146  in_picture->quality = enc->global_quality;
1147  in_picture->pict_type = 0;
1148 
1149  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1150  in_picture->pts * av_q2d(enc->time_base) : NAN;
1151  if (ost->forced_kf_index < ost->forced_kf_count &&
1152  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1153  ost->forced_kf_index++;
1154  forced_keyframe = 1;
1155  } else if (ost->forced_keyframes_pexpr) {
1156  double res;
1157  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1160  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1166  res);
1167  if (res) {
1168  forced_keyframe = 1;
1174  }
1175 
1177  } else if ( ost->forced_keyframes
1178  && !strncmp(ost->forced_keyframes, "source", 6)
1179  && in_picture->key_frame==1) {
1180  forced_keyframe = 1;
1181  }
1182 
1183  if (forced_keyframe) {
1184  in_picture->pict_type = AV_PICTURE_TYPE_I;
1185  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1186  }
1187 
1189  if (debug_ts) {
1190  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1191  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1192  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1193  enc->time_base.num, enc->time_base.den);
1194  }
1195 
1196  ost->frames_encoded++;
1197 
1198  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1199  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1200  if (ret < 0) {
1201  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1202  exit_program(1);
1203  }
1204 
1205  if (got_packet) {
1206  if (debug_ts) {
1207  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1208  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1209  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1210  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1211  }
1212 
1213  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1214  pkt.pts = ost->sync_opts;
1215 
1216  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1217 
1218  if (debug_ts) {
1219  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1220  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1221  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1222  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1223  }
1224 
1225  frame_size = pkt.size;
1226  write_frame(s, &pkt, ost);
1227 
1228  /* if two pass, output log */
1229  if (ost->logfile && enc->stats_out) {
1230  fprintf(ost->logfile, "%s", enc->stats_out);
1231  }
1232  }
1233  }
1234  ost->sync_opts++;
1235  /*
1236  * For video, number of frames in == number of packets out.
1237  * But there may be reordering, so we can't throw away frames on encoder
1238  * flush, we need to limit them here, before they go into encoder.
1239  */
1240  ost->frame_number++;
1241 
1242  if (vstats_filename && frame_size)
1243  do_video_stats(ost, frame_size);
1244  }
1245 
1246  if (!ost->last_frame)
1247  ost->last_frame = av_frame_alloc();
1248  av_frame_unref(ost->last_frame);
1249  if (next_picture && ost->last_frame)
1250  av_frame_ref(ost->last_frame, next_picture);
1251  else
1252  av_frame_free(&ost->last_frame);
1253 }
1254 
1255 static double psnr(double d)
1256 {
1257  return -10.0 * log(d) / log(10.0);
1258 }
1259 
1261 {
1262  AVCodecContext *enc;
1263  int frame_number;
1264  double ti1, bitrate, avg_bitrate;
1265 
1266  /* this is executed just the first time do_video_stats is called */
1267  if (!vstats_file) {
1268  vstats_file = fopen(vstats_filename, "w");
1269  if (!vstats_file) {
1270  perror("fopen");
1271  exit_program(1);
1272  }
1273  }
1274 
1275  enc = ost->enc_ctx;
1276  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1277  frame_number = ost->st->nb_frames;
1278  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1279  ost->quality / (float)FF_QP2LAMBDA);
1280 
1281  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1282  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1283 
1284  fprintf(vstats_file,"f_size= %6d ", frame_size);
1285  /* compute pts value */
1286  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1287  if (ti1 < 0.01)
1288  ti1 = 0.01;
1289 
1290  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1291  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1292  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1293  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1294  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1295  }
1296 }
1297 
1299 {
1300  OutputFile *of = output_files[ost->file_index];
1301  int i;
1302 
1304 
1305  if (of->shortest) {
1306  for (i = 0; i < of->ctx->nb_streams; i++)
1307  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1308  }
1309 }
1310 
1311 /**
1312  * Get and encode new output from any of the filtergraphs, without causing
1313  * activity.
1314  *
1315  * @return 0 for success, <0 for severe errors
1316  */
1317 static int reap_filters(int flush)
1318 {
1319  AVFrame *filtered_frame = NULL;
1320  int i;
1321 
1322  /* Reap all buffers present in the buffer sinks */
1323  for (i = 0; i < nb_output_streams; i++) {
1324  OutputStream *ost = output_streams[i];
1325  OutputFile *of = output_files[ost->file_index];
1327  AVCodecContext *enc = ost->enc_ctx;
1328  int ret = 0;
1329 
1330  if (!ost->filter)
1331  continue;
1332  filter = ost->filter->filter;
1333 
1334  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1335  return AVERROR(ENOMEM);
1336  }
1337  filtered_frame = ost->filtered_frame;
1338 
1339  while (1) {
1340  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1341  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1343  if (ret < 0) {
1344  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1346  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1347  } else if (flush && ret == AVERROR_EOF) {
1348  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1349  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1350  }
1351  break;
1352  }
1353  if (ost->finished) {
1354  av_frame_unref(filtered_frame);
1355  continue;
1356  }
1357  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1358  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1359  AVRational tb = enc->time_base;
1360  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1361 
1362  tb.den <<= extra_bits;
1363  float_pts =
1364  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1365  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1366  float_pts /= 1 << extra_bits;
1367  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1368  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1369 
1370  filtered_frame->pts =
1371  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1372  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1373  }
1374  //if (ost->source_index >= 0)
1375  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1376 
1377  switch (filter->inputs[0]->type) {
1378  case AVMEDIA_TYPE_VIDEO:
1379  if (!ost->frame_aspect_ratio.num)
1380  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1381 
1382  if (debug_ts) {
1383  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1384  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1385  float_pts,
1386  enc->time_base.num, enc->time_base.den);
1387  }
1388 
1389  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1390  break;
1391  case AVMEDIA_TYPE_AUDIO:
1392  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1393  enc->channels != av_frame_get_channels(filtered_frame)) {
1395  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1396  break;
1397  }
1398  do_audio_out(of->ctx, ost, filtered_frame);
1399  break;
1400  default:
1401  // TODO support subtitle filters
1402  av_assert0(0);
1403  }
1404 
1405  av_frame_unref(filtered_frame);
1406  }
1407  }
1408 
1409  return 0;
1410 }
1411 
1412 static void print_final_stats(int64_t total_size)
1413 {
1414  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1415  uint64_t subtitle_size = 0;
1416  uint64_t data_size = 0;
1417  float percent = -1.0;
1418  int i, j;
1419  int pass1_used = 1;
1420 
1421  for (i = 0; i < nb_output_streams; i++) {
1422  OutputStream *ost = output_streams[i];
1423  switch (ost->enc_ctx->codec_type) {
1424  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1425  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1426  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1427  default: other_size += ost->data_size; break;
1428  }
1429  extra_size += ost->enc_ctx->extradata_size;
1430  data_size += ost->data_size;
1433  pass1_used = 0;
1434  }
1435 
1436  if (data_size && total_size>0 && total_size >= data_size)
1437  percent = 100.0 * (total_size - data_size) / data_size;
1438 
1439  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1440  video_size / 1024.0,
1441  audio_size / 1024.0,
1442  subtitle_size / 1024.0,
1443  other_size / 1024.0,
1444  extra_size / 1024.0);
1445  if (percent >= 0.0)
1446  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1447  else
1448  av_log(NULL, AV_LOG_INFO, "unknown");
1449  av_log(NULL, AV_LOG_INFO, "\n");
1450 
1451  /* print verbose per-stream stats */
1452  for (i = 0; i < nb_input_files; i++) {
1453  InputFile *f = input_files[i];
1454  uint64_t total_packets = 0, total_size = 0;
1455 
1456  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1457  i, f->ctx->filename);
1458 
1459  for (j = 0; j < f->nb_streams; j++) {
1460  InputStream *ist = input_streams[f->ist_index + j];
1461  enum AVMediaType type = ist->dec_ctx->codec_type;
1462 
1463  total_size += ist->data_size;
1464  total_packets += ist->nb_packets;
1465 
1466  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1467  i, j, media_type_string(type));
1468  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1469  ist->nb_packets, ist->data_size);
1470 
1471  if (ist->decoding_needed) {
1472  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1473  ist->frames_decoded);
1474  if (type == AVMEDIA_TYPE_AUDIO)
1475  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1476  av_log(NULL, AV_LOG_VERBOSE, "; ");
1477  }
1478 
1479  av_log(NULL, AV_LOG_VERBOSE, "\n");
1480  }
1481 
1482  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1483  total_packets, total_size);
1484  }
1485 
1486  for (i = 0; i < nb_output_files; i++) {
1487  OutputFile *of = output_files[i];
1488  uint64_t total_packets = 0, total_size = 0;
1489 
1490  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1491  i, of->ctx->filename);
1492 
1493  for (j = 0; j < of->ctx->nb_streams; j++) {
1494  OutputStream *ost = output_streams[of->ost_index + j];
1495  enum AVMediaType type = ost->enc_ctx->codec_type;
1496 
1497  total_size += ost->data_size;
1498  total_packets += ost->packets_written;
1499 
1500  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1501  i, j, media_type_string(type));
1502  if (ost->encoding_needed) {
1503  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1504  ost->frames_encoded);
1505  if (type == AVMEDIA_TYPE_AUDIO)
1506  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1507  av_log(NULL, AV_LOG_VERBOSE, "; ");
1508  }
1509 
1510  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1511  ost->packets_written, ost->data_size);
1512 
1513  av_log(NULL, AV_LOG_VERBOSE, "\n");
1514  }
1515 
1516  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1517  total_packets, total_size);
1518  }
1519  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1520  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1521  if (pass1_used) {
1522  av_log(NULL, AV_LOG_WARNING, "\n");
1523  } else {
1524  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1525  }
1526  }
1527 }
1528 
1529 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1530 {
1531  char buf[1024];
1532  AVBPrint buf_script;
1533  OutputStream *ost;
1534  AVFormatContext *oc;
1535  int64_t total_size;
1536  AVCodecContext *enc;
1537  int frame_number, vid, i;
1538  double bitrate;
1539  int64_t pts = INT64_MIN;
1540  static int64_t last_time = -1;
1541  static int qp_histogram[52];
1542  int hours, mins, secs, us;
1543 
1544  if (!print_stats && !is_last_report && !progress_avio)
1545  return;
1546 
1547  if (!is_last_report) {
1548  if (last_time == -1) {
1549  last_time = cur_time;
1550  return;
1551  }
1552  if ((cur_time - last_time) < 500000)
1553  return;
1554  last_time = cur_time;
1555  }
1556 
1557 
1558  oc = output_files[0]->ctx;
1559 
1560  total_size = avio_size(oc->pb);
1561  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1562  total_size = avio_tell(oc->pb);
1563 
1564  buf[0] = '\0';
1565  vid = 0;
1566  av_bprint_init(&buf_script, 0, 1);
1567  for (i = 0; i < nb_output_streams; i++) {
1568  float q = -1;
1569  ost = output_streams[i];
1570  enc = ost->enc_ctx;
1571  if (!ost->stream_copy)
1572  q = ost->quality / (float) FF_QP2LAMBDA;
1573 
1574  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1575  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1576  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1577  ost->file_index, ost->index, q);
1578  }
1579  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1580  float fps, t = (cur_time-timer_start) / 1000000.0;
1581 
1582  frame_number = ost->frame_number;
1583  fps = t > 1 ? frame_number / t : 0;
1584  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1585  frame_number, fps < 9.95, fps, q);
1586  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1587  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1588  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1589  ost->file_index, ost->index, q);
1590  if (is_last_report)
1591  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1592  if (qp_hist) {
1593  int j;
1594  int qp = lrintf(q);
1595  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1596  qp_histogram[qp]++;
1597  for (j = 0; j < 32; j++)
1598  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1599  }
1600 
1601  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1602  int j;
1603  double error, error_sum = 0;
1604  double scale, scale_sum = 0;
1605  double p;
1606  char type[3] = { 'Y','U','V' };
1607  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1608  for (j = 0; j < 3; j++) {
1609  if (is_last_report) {
1610  error = enc->error[j];
1611  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1612  } else {
1613  error = ost->error[j];
1614  scale = enc->width * enc->height * 255.0 * 255.0;
1615  }
1616  if (j)
1617  scale /= 4;
1618  error_sum += error;
1619  scale_sum += scale;
1620  p = psnr(error / scale);
1621  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1622  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1623  ost->file_index, ost->index, type[j] | 32, p);
1624  }
1625  p = psnr(error_sum / scale_sum);
1626  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1627  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1628  ost->file_index, ost->index, p);
1629  }
1630  vid = 1;
1631  }
1632  /* compute min output value */
1634  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1635  ost->st->time_base, AV_TIME_BASE_Q));
1636  if (is_last_report)
1637  nb_frames_drop += ost->last_droped;
1638  }
1639 
1640  secs = FFABS(pts) / AV_TIME_BASE;
1641  us = FFABS(pts) % AV_TIME_BASE;
1642  mins = secs / 60;
1643  secs %= 60;
1644  hours = mins / 60;
1645  mins %= 60;
1646 
1647  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1648 
1649  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1650  "size=N/A time=");
1651  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1652  "size=%8.0fkB time=", total_size / 1024.0);
1653  if (pts < 0)
1654  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1655  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1656  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1657  (100 * us) / AV_TIME_BASE);
1658 
1659  if (bitrate < 0) {
1660  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1661  av_bprintf(&buf_script, "bitrate=N/A\n");
1662  }else{
1663  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1664  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1665  }
1666 
1667  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1668  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1669  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1670  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1671  hours, mins, secs, us);
1672 
1674  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1676  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1677  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1678 
1679  if (print_stats || is_last_report) {
1680  const char end = is_last_report ? '\n' : '\r';
1681  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1682  fprintf(stderr, "%s %c", buf, end);
1683  } else
1684  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1685 
1686  fflush(stderr);
1687  }
1688 
1689  if (progress_avio) {
1690  av_bprintf(&buf_script, "progress=%s\n",
1691  is_last_report ? "end" : "continue");
1692  avio_write(progress_avio, buf_script.str,
1693  FFMIN(buf_script.len, buf_script.size - 1));
1694  avio_flush(progress_avio);
1695  av_bprint_finalize(&buf_script, NULL);
1696  if (is_last_report) {
1697  avio_closep(&progress_avio);
1698  }
1699  }
1700 
1701  if (is_last_report)
1702  print_final_stats(total_size);
1703 }
1704 
1705 static void flush_encoders(void)
1706 {
1707  int i, ret;
1708 
1709  for (i = 0; i < nb_output_streams; i++) {
1710  OutputStream *ost = output_streams[i];
1711  AVCodecContext *enc = ost->enc_ctx;
1712  AVFormatContext *os = output_files[ost->file_index]->ctx;
1713  int stop_encoding = 0;
1714 
1715  if (!ost->encoding_needed)
1716  continue;
1717 
1718  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1719  continue;
1721  continue;
1722 
1723  for (;;) {
1724  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1725  const char *desc;
1726 
1727  switch (enc->codec_type) {
1728  case AVMEDIA_TYPE_AUDIO:
1729  encode = avcodec_encode_audio2;
1730  desc = "Audio";
1731  break;
1732  case AVMEDIA_TYPE_VIDEO:
1733  encode = avcodec_encode_video2;
1734  desc = "Video";
1735  break;
1736  default:
1737  stop_encoding = 1;
1738  }
1739 
1740  if (encode) {
1741  AVPacket pkt;
1742  int pkt_size;
1743  int got_packet;
1744  av_init_packet(&pkt);
1745  pkt.data = NULL;
1746  pkt.size = 0;
1747 
1749  ret = encode(enc, &pkt, NULL, &got_packet);
1750  update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1751  if (ret < 0) {
1752  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1753  desc,
1754  av_err2str(ret));
1755  exit_program(1);
1756  }
1757  if (ost->logfile && enc->stats_out) {
1758  fprintf(ost->logfile, "%s", enc->stats_out);
1759  }
1760  if (!got_packet) {
1761  stop_encoding = 1;
1762  break;
1763  }
1764  if (ost->finished & MUXER_FINISHED) {
1765  av_free_packet(&pkt);
1766  continue;
1767  }
1768  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1769  pkt_size = pkt.size;
1770  write_frame(os, &pkt, ost);
1772  do_video_stats(ost, pkt_size);
1773  }
1774  }
1775 
1776  if (stop_encoding)
1777  break;
1778  }
1779  }
1780 }
1781 
1782 /*
1783  * Check whether a packet from ist should be written into ost at this time
1784  */
1786 {
1787  OutputFile *of = output_files[ost->file_index];
1788  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1789 
1790  if (ost->source_index != ist_index)
1791  return 0;
1792 
1793  if (ost->finished)
1794  return 0;
1795 
1796  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1797  return 0;
1798 
1799  return 1;
1800 }
1801 
1802 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1803 {
1804  OutputFile *of = output_files[ost->file_index];
1805  InputFile *f = input_files [ist->file_index];
1806  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1807  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1808  int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1809  AVPicture pict;
1810  AVPacket opkt;
1811 
1812  av_init_packet(&opkt);
1813 
1814  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1816  return;
1817 
1818  if (pkt->pts == AV_NOPTS_VALUE) {
1819  if (!ost->frame_number && ist->pts < start_time &&
1820  !ost->copy_prior_start)
1821  return;
1822  } else {
1823  if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1824  !ost->copy_prior_start)
1825  return;
1826  }
1827 
1828  if (of->recording_time != INT64_MAX &&
1829  ist->pts >= of->recording_time + start_time) {
1830  close_output_stream(ost);
1831  return;
1832  }
1833 
1834  if (f->recording_time != INT64_MAX) {
1835  start_time = f->ctx->start_time;
1836  if (f->start_time != AV_NOPTS_VALUE)
1837  start_time += f->start_time;
1838  if (ist->pts >= f->recording_time + start_time) {
1839  close_output_stream(ost);
1840  return;
1841  }
1842  }
1843 
1844  /* force the input stream PTS */
1845  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1846  ost->sync_opts++;
1847 
1848  if (pkt->pts != AV_NOPTS_VALUE)
1849  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1850  else
1851  opkt.pts = AV_NOPTS_VALUE;
1852 
1853  if (pkt->dts == AV_NOPTS_VALUE)
1854  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1855  else
1856  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1857  opkt.dts -= ost_tb_start_time;
1858 
1859  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1861  if(!duration)
1862  duration = ist->dec_ctx->frame_size;
1863  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1864  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1865  ost->st->time_base) - ost_tb_start_time;
1866  }
1867 
1868  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1869  opkt.flags = pkt->flags;
1870  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1871  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1874  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1875  ) {
1876  int ret = av_parser_change(ost->parser, ost->st->codec,
1877  &opkt.data, &opkt.size,
1878  pkt->data, pkt->size,
1879  pkt->flags & AV_PKT_FLAG_KEY);
1880  if (ret < 0) {
1881  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1882  av_err2str(ret));
1883  exit_program(1);
1884  }
1885  if (ret) {
1886  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1887  if (!opkt.buf)
1888  exit_program(1);
1889  }
1890  } else {
1891  opkt.data = pkt->data;
1892  opkt.size = pkt->size;
1893  }
1894  av_copy_packet_side_data(&opkt, pkt);
1895 
1896  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1897  ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1898  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1899  /* store AVPicture in AVPacket, as expected by the output format */
1900  int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1901  if (ret < 0) {
1902  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1903  av_err2str(ret));
1904  exit_program(1);
1905  }
1906  opkt.data = (uint8_t *)&pict;
1907  opkt.size = sizeof(AVPicture);
1908  opkt.flags |= AV_PKT_FLAG_KEY;
1909  }
1910 
1911  write_frame(of->ctx, &opkt, ost);
1912 }
1913 
1915 {
1916  AVCodecContext *dec = ist->dec_ctx;
1917 
1918  if (!dec->channel_layout) {
1919  char layout_name[256];
1920 
1921  if (dec->channels > ist->guess_layout_max)
1922  return 0;
1924  if (!dec->channel_layout)
1925  return 0;
1926  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1927  dec->channels, dec->channel_layout);
1928  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1929  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1930  }
1931  return 1;
1932 }
1933 
1934 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1935 {
1936  AVFrame *decoded_frame, *f;
1937  AVCodecContext *avctx = ist->dec_ctx;
1938  int i, ret, err = 0, resample_changed;
1939  AVRational decoded_frame_tb;
1940 
1941  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1942  return AVERROR(ENOMEM);
1943  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1944  return AVERROR(ENOMEM);
1945  decoded_frame = ist->decoded_frame;
1946 
1948  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1949  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1950 
1951  if (ret >= 0 && avctx->sample_rate <= 0) {
1952  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1953  ret = AVERROR_INVALIDDATA;
1954  }
1955 
1956  if (*got_output || ret<0)
1957  decode_error_stat[ret<0] ++;
1958 
1959  if (ret < 0 && exit_on_error)
1960  exit_program(1);
1961 
1962  if (!*got_output || ret < 0)
1963  return ret;
1964 
1965  ist->samples_decoded += decoded_frame->nb_samples;
1966  ist->frames_decoded++;
1967 
1968 #if 1
1969  /* increment next_dts to use for the case where the input stream does not
1970  have timestamps or there are multiple frames in the packet */
1971  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1972  avctx->sample_rate;
1973  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974  avctx->sample_rate;
1975 #endif
1976 
1977  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1978  ist->resample_channels != avctx->channels ||
1979  ist->resample_channel_layout != decoded_frame->channel_layout ||
1980  ist->resample_sample_rate != decoded_frame->sample_rate;
1981  if (resample_changed) {
1982  char layout1[64], layout2[64];
1983 
1984  if (!guess_input_channel_layout(ist)) {
1985  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1986  "layout for Input Stream #%d.%d\n", ist->file_index,
1987  ist->st->index);
1988  exit_program(1);
1989  }
1990  decoded_frame->channel_layout = avctx->channel_layout;
1991 
1992  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1994  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1995  decoded_frame->channel_layout);
1996 
1998  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1999  ist->file_index, ist->st->index,
2001  ist->resample_channels, layout1,
2002  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2003  avctx->channels, layout2);
2004 
2005  ist->resample_sample_fmt = decoded_frame->format;
2006  ist->resample_sample_rate = decoded_frame->sample_rate;
2007  ist->resample_channel_layout = decoded_frame->channel_layout;
2008  ist->resample_channels = avctx->channels;
2009 
2010  for (i = 0; i < nb_filtergraphs; i++)
2011  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2012  FilterGraph *fg = filtergraphs[i];
2013  if (configure_filtergraph(fg) < 0) {
2014  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2015  exit_program(1);
2016  }
2017  }
2018  }
2019 
2020  /* if the decoder provides a pts, use it instead of the last packet pts.
2021  the decoder could be delaying output by a packet or more. */
2022  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2023  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2024  decoded_frame_tb = avctx->time_base;
2025  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2026  decoded_frame->pts = decoded_frame->pkt_pts;
2027  decoded_frame_tb = ist->st->time_base;
2028  } else if (pkt->pts != AV_NOPTS_VALUE) {
2029  decoded_frame->pts = pkt->pts;
2030  decoded_frame_tb = ist->st->time_base;
2031  }else {
2032  decoded_frame->pts = ist->dts;
2033  decoded_frame_tb = AV_TIME_BASE_Q;
2034  }
2035  pkt->pts = AV_NOPTS_VALUE;
2036  if (decoded_frame->pts != AV_NOPTS_VALUE)
2037  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2038  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2039  (AVRational){1, avctx->sample_rate});
2040  for (i = 0; i < ist->nb_filters; i++) {
2041  if (i < ist->nb_filters - 1) {
2042  f = ist->filter_frame;
2043  err = av_frame_ref(f, decoded_frame);
2044  if (err < 0)
2045  break;
2046  } else
2047  f = decoded_frame;
2048  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2050  if (err == AVERROR_EOF)
2051  err = 0; /* ignore */
2052  if (err < 0)
2053  break;
2054  }
2055  decoded_frame->pts = AV_NOPTS_VALUE;
2056 
2058  av_frame_unref(decoded_frame);
2059  return err < 0 ? err : ret;
2060 }
2061 
2062 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2063 {
2064  AVFrame *decoded_frame, *f;
2065  int i, ret = 0, err = 0, resample_changed;
2066  int64_t best_effort_timestamp;
2067  AVRational *frame_sample_aspect;
2068 
2069  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2070  return AVERROR(ENOMEM);
2071  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2072  return AVERROR(ENOMEM);
2073  decoded_frame = ist->decoded_frame;
2074  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2075 
2077  ret = avcodec_decode_video2(ist->dec_ctx,
2078  decoded_frame, got_output, pkt);
2079  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2080 
2081  // The following line may be required in some cases where there is no parser
2082  // or the parser does not has_b_frames correctly
2083  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2084  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2085  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2086  } else
2088  "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2089  "If you want to help, upload a sample "
2090  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2091  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2092  ist->dec_ctx->has_b_frames,
2093  ist->st->codec->has_b_frames);
2094  }
2095 
2096  if (*got_output || ret<0)
2097  decode_error_stat[ret<0] ++;
2098 
2099  if (ret < 0 && exit_on_error)
2100  exit_program(1);
2101 
2102  if (*got_output && ret >= 0) {
2103  if (ist->dec_ctx->width != decoded_frame->width ||
2104  ist->dec_ctx->height != decoded_frame->height ||
2105  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2106  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2107  decoded_frame->width,
2108  decoded_frame->height,
2109  decoded_frame->format,
2110  ist->dec_ctx->width,
2111  ist->dec_ctx->height,
2112  ist->dec_ctx->pix_fmt);
2113  }
2114  }
2115 
2116  if (!*got_output || ret < 0)
2117  return ret;
2118 
2119  if(ist->top_field_first>=0)
2120  decoded_frame->top_field_first = ist->top_field_first;
2121 
2122  ist->frames_decoded++;
2123 
2124  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2125  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2126  if (err < 0)
2127  goto fail;
2128  }
2129  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2130 
2131  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2132  if(best_effort_timestamp != AV_NOPTS_VALUE)
2133  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2134 
2135  if (debug_ts) {
2136  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2137  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2138  ist->st->index, av_ts2str(decoded_frame->pts),
2139  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2140  best_effort_timestamp,
2141  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2142  decoded_frame->key_frame, decoded_frame->pict_type,
2143  ist->st->time_base.num, ist->st->time_base.den);
2144  }
2145 
2146  pkt->size = 0;
2147 
2148  if (ist->st->sample_aspect_ratio.num)
2149  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2150 
2151  resample_changed = ist->resample_width != decoded_frame->width ||
2152  ist->resample_height != decoded_frame->height ||
2153  ist->resample_pix_fmt != decoded_frame->format;
2154  if (resample_changed) {
2156  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2157  ist->file_index, ist->st->index,
2159  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2160 
2161  ist->resample_width = decoded_frame->width;
2162  ist->resample_height = decoded_frame->height;
2163  ist->resample_pix_fmt = decoded_frame->format;
2164 
2165  for (i = 0; i < nb_filtergraphs; i++) {
2166  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2167  configure_filtergraph(filtergraphs[i]) < 0) {
2168  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2169  exit_program(1);
2170  }
2171  }
2172  }
2173 
2174  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2175  for (i = 0; i < ist->nb_filters; i++) {
2176  if (!frame_sample_aspect->num)
2177  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2178 
2179  if (i < ist->nb_filters - 1) {
2180  f = ist->filter_frame;
2181  err = av_frame_ref(f, decoded_frame);
2182  if (err < 0)
2183  break;
2184  } else
2185  f = decoded_frame;
2187  if (ret == AVERROR_EOF) {
2188  ret = 0; /* ignore */
2189  } else if (ret < 0) {
2191  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2192  exit_program(1);
2193  }
2194  }
2195 
2196 fail:
2198  av_frame_unref(decoded_frame);
2199  return err < 0 ? err : ret;
2200 }
2201 
2202 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2203 {
2204  AVSubtitle subtitle;
2205  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2206  &subtitle, got_output, pkt);
2207 
2208  if (*got_output || ret<0)
2209  decode_error_stat[ret<0] ++;
2210 
2211  if (ret < 0 && exit_on_error)
2212  exit_program(1);
2213 
2214  if (ret < 0 || !*got_output) {
2215  if (!pkt->size)
2216  sub2video_flush(ist);
2217  return ret;
2218  }
2219 
2220  if (ist->fix_sub_duration) {
2221  int end = 1;
2222  if (ist->prev_sub.got_output) {
2223  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2224  1000, AV_TIME_BASE);
2225  if (end < ist->prev_sub.subtitle.end_display_time) {
2226  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2227  "Subtitle duration reduced from %d to %d%s\n",
2229  end <= 0 ? ", dropping it" : "");
2231  }
2232  }
2233  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2234  FFSWAP(int, ret, ist->prev_sub.ret);
2235  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2236  if (end <= 0)
2237  goto out;
2238  }
2239 
2240  if (!*got_output)
2241  return ret;
2242 
2243  sub2video_update(ist, &subtitle);
2244 
2245  if (!subtitle.num_rects)
2246  goto out;
2247 
2248  ist->frames_decoded++;
2249 
2250  for (i = 0; i < nb_output_streams; i++) {
2251  OutputStream *ost = output_streams[i];
2252 
2253  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2254  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2255  continue;
2256 
2257  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2258  }
2259 
2260 out:
2261  avsubtitle_free(&subtitle);
2262  return ret;
2263 }
2264 
2266 {
2267  int i, ret;
2268  for (i = 0; i < ist->nb_filters; i++) {
2269  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2270  if (ret < 0)
2271  return ret;
2272  }
2273  return 0;
2274 }
2275 
2276 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2278 {
2279  int ret = 0, i;
2280  int got_output = 0;
2281 
2282  AVPacket avpkt;
2283  if (!ist->saw_first_ts) {
2284  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2285  ist->pts = 0;
2286  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2287  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2288  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2289  }
2290  ist->saw_first_ts = 1;
2291  }
2292 
2293  if (ist->next_dts == AV_NOPTS_VALUE)
2294  ist->next_dts = ist->dts;
2295  if (ist->next_pts == AV_NOPTS_VALUE)
2296  ist->next_pts = ist->pts;
2297 
2298  if (!pkt) {
2299  /* EOF handling */
2300  av_init_packet(&avpkt);
2301  avpkt.data = NULL;
2302  avpkt.size = 0;
2303  goto handle_eof;
2304  } else {
2305  avpkt = *pkt;
2306  }
2307 
2308  if (pkt->dts != AV_NOPTS_VALUE) {
2309  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2310  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2311  ist->next_pts = ist->pts = ist->dts;
2312  }
2313 
2314  // while we have more to decode or while the decoder did output something on EOF
2315  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2316  int duration;
2317  handle_eof:
2318 
2319  ist->pts = ist->next_pts;
2320  ist->dts = ist->next_dts;
2321 
2322  if (avpkt.size && avpkt.size != pkt->size &&
2325  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2326  ist->showed_multi_packet_warning = 1;
2327  }
2328 
2329  switch (ist->dec_ctx->codec_type) {
2330  case AVMEDIA_TYPE_AUDIO:
2331  ret = decode_audio (ist, &avpkt, &got_output);
2332  break;
2333  case AVMEDIA_TYPE_VIDEO:
2334  ret = decode_video (ist, &avpkt, &got_output);
2335  if (avpkt.duration) {
2336  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2337  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2339  duration = ((int64_t)AV_TIME_BASE *
2340  ist->dec_ctx->framerate.den * ticks) /
2342  } else
2343  duration = 0;
2344 
2345  if(ist->dts != AV_NOPTS_VALUE && duration) {
2346  ist->next_dts += duration;
2347  }else
2348  ist->next_dts = AV_NOPTS_VALUE;
2349 
2350  if (got_output)
2351  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2352  break;
2353  case AVMEDIA_TYPE_SUBTITLE:
2354  ret = transcode_subtitles(ist, &avpkt, &got_output);
2355  break;
2356  default:
2357  return -1;
2358  }
2359 
2360  if (ret < 0) {
2361  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2362  ist->file_index, ist->st->index, av_err2str(ret));
2363  if (exit_on_error)
2364  exit_program(1);
2365  break;
2366  }
2367 
2368  avpkt.dts=
2369  avpkt.pts= AV_NOPTS_VALUE;
2370 
2371  // touch data and size only if not EOF
2372  if (pkt) {
2374  ret = avpkt.size;
2375  avpkt.data += ret;
2376  avpkt.size -= ret;
2377  }
2378  if (!got_output) {
2379  continue;
2380  }
2381  if (got_output && !pkt)
2382  break;
2383  }
2384 
2385  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2386  if (!pkt && ist->decoding_needed && !got_output) {
2387  int ret = send_filter_eof(ist);
2388  if (ret < 0) {
2389  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2390  exit_program(1);
2391  }
2392  }
2393 
2394  /* handle stream copy */
2395  if (!ist->decoding_needed) {
2396  ist->dts = ist->next_dts;
2397  switch (ist->dec_ctx->codec_type) {
2398  case AVMEDIA_TYPE_AUDIO:
2399  if (ist->dec_ctx->sample_rate) {
2400  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2401  ist->dec_ctx->sample_rate;
2402  } else {
2403  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2404  }
2405  break;
2406  case AVMEDIA_TYPE_VIDEO:
2407  if (ist->framerate.num) {
2408  // TODO: Remove work-around for c99-to-c89 issue 7
2409  AVRational time_base_q = AV_TIME_BASE_Q;
2410  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2411  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2412  } else if (pkt->duration) {
2413  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2414  } else if(ist->dec_ctx->framerate.num != 0) {
2415  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2416  ist->next_dts += ((int64_t)AV_TIME_BASE *
2417  ist->dec_ctx->framerate.den * ticks) /
2419  }
2420  break;
2421  }
2422  ist->pts = ist->dts;
2423  ist->next_pts = ist->next_dts;
2424  }
2425  for (i = 0; pkt && i < nb_output_streams; i++) {
2426  OutputStream *ost = output_streams[i];
2427 
2428  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2429  continue;
2430 
2431  do_streamcopy(ist, ost, pkt);
2432  }
2433 
2434  return got_output;
2435 }
2436 
2437 static void print_sdp(void)
2438 {
2439  char sdp[16384];
2440  int i;
2441  int j;
2442  AVIOContext *sdp_pb;
2443  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2444 
2445  if (!avc)
2446  exit_program(1);
2447  for (i = 0, j = 0; i < nb_output_files; i++) {
2448  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2449  avc[j] = output_files[i]->ctx;
2450  j++;
2451  }
2452  }
2453 
2454  if (!j)
2455  goto fail;
2456 
2457  av_sdp_create(avc, j, sdp, sizeof(sdp));
2458 
2459  if (!sdp_filename) {
2460  printf("SDP:\n%s\n", sdp);
2461  fflush(stdout);
2462  } else {
2463  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2464  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2465  } else {
2466  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2467  avio_closep(&sdp_pb);
2469  }
2470  }
2471 
2472 fail:
2473  av_freep(&avc);
2474 }
2475 
2477 {
2478  int i;
2479  for (i = 0; hwaccels[i].name; i++)
2480  if (hwaccels[i].pix_fmt == pix_fmt)
2481  return &hwaccels[i];
2482  return NULL;
2483 }
2484 
2486 {
2487  InputStream *ist = s->opaque;
2488  const enum AVPixelFormat *p;
2489  int ret;
2490 
2491  for (p = pix_fmts; *p != -1; p++) {
2492  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2493  const HWAccel *hwaccel;
2494 
2495  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2496  break;
2497 
2498  hwaccel = get_hwaccel(*p);
2499  if (!hwaccel ||
2500  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2501  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2502  continue;
2503 
2504  ret = hwaccel->init(s);
2505  if (ret < 0) {
2506  if (ist->hwaccel_id == hwaccel->id) {
2508  "%s hwaccel requested for input stream #%d:%d, "
2509  "but cannot be initialized.\n", hwaccel->name,
2510  ist->file_index, ist->st->index);
2511  return AV_PIX_FMT_NONE;
2512  }
2513  continue;
2514  }
2515  ist->active_hwaccel_id = hwaccel->id;
2516  ist->hwaccel_pix_fmt = *p;
2517  break;
2518  }
2519 
2520  return *p;
2521 }
2522 
2524 {
2525  InputStream *ist = s->opaque;
2526 
2527  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2528  return ist->hwaccel_get_buffer(s, frame, flags);
2529 
2530  return avcodec_default_get_buffer2(s, frame, flags);
2531 }
2532 
2533 static int init_input_stream(int ist_index, char *error, int error_len)
2534 {
2535  int ret;
2536  InputStream *ist = input_streams[ist_index];
2537 
2538  if (ist->decoding_needed) {
2539  AVCodec *codec = ist->dec;
2540  if (!codec) {
2541  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2542  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2543  return AVERROR(EINVAL);
2544  }
2545 
2546  ist->dec_ctx->opaque = ist;
2547  ist->dec_ctx->get_format = get_format;
2548  ist->dec_ctx->get_buffer2 = get_buffer;
2549  ist->dec_ctx->thread_safe_callbacks = 1;
2550 
2551  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2552  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2553  (ist->decoding_needed & DECODING_FOR_OST)) {
2554  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2556  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2557  }
2558 
2559  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2560  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2561  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2562  if (ret == AVERROR_EXPERIMENTAL)
2563  abort_codec_experimental(codec, 0);
2564 
2565  snprintf(error, error_len,
2566  "Error while opening decoder for input stream "
2567  "#%d:%d : %s",
2568  ist->file_index, ist->st->index, av_err2str(ret));
2569  return ret;
2570  }
2572  }
2573 
2574  ist->next_pts = AV_NOPTS_VALUE;
2575  ist->next_dts = AV_NOPTS_VALUE;
2576 
2577  return 0;
2578 }
2579 
2581 {
2582  if (ost->source_index >= 0)
2583  return input_streams[ost->source_index];
2584  return NULL;
2585 }
2586 
2587 static int compare_int64(const void *a, const void *b)
2588 {
2589  int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2590  return va < vb ? -1 : va > vb ? +1 : 0;
2591 }
2592 
2593 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2594 {
2595  int ret = 0;
2596 
2597  if (ost->encoding_needed) {
2598  AVCodec *codec = ost->enc;
2599  AVCodecContext *dec = NULL;
2600  InputStream *ist;
2601 
2602  if ((ist = get_input_stream(ost)))
2603  dec = ist->dec_ctx;
2604  if (dec && dec->subtitle_header) {
2605  /* ASS code assumes this buffer is null terminated so add extra byte. */
2607  if (!ost->enc_ctx->subtitle_header)
2608  return AVERROR(ENOMEM);
2609  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2611  }
2612  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2613  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2614  av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2615  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2616  !codec->defaults &&
2617  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2618  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2619  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2620 
2621  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2622  if (ret == AVERROR_EXPERIMENTAL)
2623  abort_codec_experimental(codec, 1);
2624  snprintf(error, error_len,
2625  "Error while opening encoder for output stream #%d:%d - "
2626  "maybe incorrect parameters such as bit_rate, rate, width or height",
2627  ost->file_index, ost->index);
2628  return ret;
2629  }
2630  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2633  ost->enc_ctx->frame_size);
2635  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2636  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2637  " It takes bits/s as argument, not kbits/s\n");
2638 
2639  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2640  if (ret < 0) {
2642  "Error initializing the output stream codec context.\n");
2643  exit_program(1);
2644  }
2645 
2646  // copy timebase while removing common factors
2647  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2648  ost->st->codec->codec= ost->enc_ctx->codec;
2649  } else {
2650  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2651  if (ret < 0) {
2653  "Error setting up codec context options.\n");
2654  return ret;
2655  }
2656  // copy timebase while removing common factors
2657  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2658  }
2659 
2660  return ret;
2661 }
2662 
2663 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2664  AVCodecContext *avctx)
2665 {
2666  char *p;
2667  int n = 1, i, size, index = 0;
2668  int64_t t, *pts;
2669 
2670  for (p = kf; *p; p++)
2671  if (*p == ',')
2672  n++;
2673  size = n;
2674  pts = av_malloc_array(size, sizeof(*pts));
2675  if (!pts) {
2676  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2677  exit_program(1);
2678  }
2679 
2680  p = kf;
2681  for (i = 0; i < n; i++) {
2682  char *next = strchr(p, ',');
2683 
2684  if (next)
2685  *next++ = 0;
2686 
2687  if (!memcmp(p, "chapters", 8)) {
2688 
2689  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2690  int j;
2691 
2692  if (avf->nb_chapters > INT_MAX - size ||
2693  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2694  sizeof(*pts)))) {
2696  "Could not allocate forced key frames array.\n");
2697  exit_program(1);
2698  }
2699  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2700  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2701 
2702  for (j = 0; j < avf->nb_chapters; j++) {
2703  AVChapter *c = avf->chapters[j];
2704  av_assert1(index < size);
2705  pts[index++] = av_rescale_q(c->start, c->time_base,
2706  avctx->time_base) + t;
2707  }
2708 
2709  } else {
2710 
2711  t = parse_time_or_die("force_key_frames", p, 1);
2712  av_assert1(index < size);
2713  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2714 
2715  }
2716 
2717  p = next;
2718  }
2719 
2720  av_assert0(index == size);
2721  qsort(pts, size, sizeof(*pts), compare_int64);
2722  ost->forced_kf_count = size;
2723  ost->forced_kf_pts = pts;
2724 }
2725 
2726 static void report_new_stream(int input_index, AVPacket *pkt)
2727 {
2728  InputFile *file = input_files[input_index];
2729  AVStream *st = file->ctx->streams[pkt->stream_index];
2730 
2731  if (pkt->stream_index < file->nb_streams_warn)
2732  return;
2733  av_log(file->ctx, AV_LOG_WARNING,
2734  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2736  input_index, pkt->stream_index,
2737  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2738  file->nb_streams_warn = pkt->stream_index + 1;
2739 }
2740 
2742 {
2743  AVDictionaryEntry *e;
2744 
2745  uint8_t *encoder_string;
2746  int encoder_string_len;
2747  int format_flags = 0;
2748  int codec_flags = 0;
2749 
2750  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2751  return;
2752 
2753  e = av_dict_get(of->opts, "fflags", NULL, 0);
2754  if (e) {
2755  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2756  if (!o)
2757  return;
2758  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2759  }
2760  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2761  if (e) {
2762  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2763  if (!o)
2764  return;
2765  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2766  }
2767 
2768  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2769  encoder_string = av_mallocz(encoder_string_len);
2770  if (!encoder_string)
2771  exit_program(1);
2772 
2773  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2774  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2775  else
2776  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2777  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2778  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2780 }
2781 
2782 static int transcode_init(void)
2783 {
2784  int ret = 0, i, j, k;
2785  AVFormatContext *oc;
2786  OutputStream *ost;
2787  InputStream *ist;
2788  char error[1024] = {0};
2789  int want_sdp = 1;
2790 
2791  for (i = 0; i < nb_filtergraphs; i++) {
2792  FilterGraph *fg = filtergraphs[i];
2793  for (j = 0; j < fg->nb_outputs; j++) {
2794  OutputFilter *ofilter = fg->outputs[j];
2795  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2796  continue;
2797  if (fg->nb_inputs != 1)
2798  continue;
2799  for (k = nb_input_streams-1; k >= 0 ; k--)
2800  if (fg->inputs[0]->ist == input_streams[k])
2801  break;
2802  ofilter->ost->source_index = k;
2803  }
2804  }
2805 
2806  /* init framerate emulation */
2807  for (i = 0; i < nb_input_files; i++) {
2808  InputFile *ifile = input_files[i];
2809  if (ifile->rate_emu)
2810  for (j = 0; j < ifile->nb_streams; j++)
2811  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2812  }
2813 
2814  /* for each output stream, we compute the right encoding parameters */
2815  for (i = 0; i < nb_output_streams; i++) {
2816  AVCodecContext *enc_ctx;
2818  ost = output_streams[i];
2819  oc = output_files[ost->file_index]->ctx;
2820  ist = get_input_stream(ost);
2821 
2822  if (ost->attachment_filename)
2823  continue;
2824 
2825  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2826 
2827  if (ist) {
2828  dec_ctx = ist->dec_ctx;
2829 
2830  ost->st->disposition = ist->st->disposition;
2831  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2832  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2833  } else {
2834  for (j=0; j<oc->nb_streams; j++) {
2835  AVStream *st = oc->streams[j];
2836  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2837  break;
2838  }
2839  if (j == oc->nb_streams)
2840  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2842  }
2843 
2844  if (ost->stream_copy) {
2845  AVRational sar;
2846  uint64_t extra_size;
2847 
2848  av_assert0(ist && !ost->filter);
2849 
2850  extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2851 
2852  if (extra_size > INT_MAX) {
2853  return AVERROR(EINVAL);
2854  }
2855 
2856  /* if stream_copy is selected, no need to decode or encode */
2857  enc_ctx->codec_id = dec_ctx->codec_id;
2858  enc_ctx->codec_type = dec_ctx->codec_type;
2859 
2860  if (!enc_ctx->codec_tag) {
2861  unsigned int codec_tag;
2862  if (!oc->oformat->codec_tag ||
2863  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2864  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2865  enc_ctx->codec_tag = dec_ctx->codec_tag;
2866  }
2867 
2868  enc_ctx->bit_rate = dec_ctx->bit_rate;
2869  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2870  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2871  enc_ctx->field_order = dec_ctx->field_order;
2872  if (dec_ctx->extradata_size) {
2873  enc_ctx->extradata = av_mallocz(extra_size);
2874  if (!enc_ctx->extradata) {
2875  return AVERROR(ENOMEM);
2876  }
2877  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2878  }
2879  enc_ctx->extradata_size= dec_ctx->extradata_size;
2880  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2881 
2882  enc_ctx->time_base = ist->st->time_base;
2883  /*
2884  * Avi is a special case here because it supports variable fps but
2885  * having the fps and timebase differe significantly adds quite some
2886  * overhead
2887  */
2888  if(!strcmp(oc->oformat->name, "avi")) {
2889  if ( copy_tb<0 && ist->st->r_frame_rate.num
2890  && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2891  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2892  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2893  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2894  || copy_tb==2){
2895  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2896  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2897  enc_ctx->ticks_per_frame = 2;
2898  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2899  && av_q2d(ist->st->time_base) < 1.0/500
2900  || copy_tb==0){
2901  enc_ctx->time_base = dec_ctx->time_base;
2902  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2903  enc_ctx->time_base.den *= 2;
2904  enc_ctx->ticks_per_frame = 2;
2905  }
2906  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2907  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2908  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2909  && strcmp(oc->oformat->name, "f4v")
2910  ) {
2911  if( copy_tb<0 && dec_ctx->time_base.den
2912  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2913  && av_q2d(ist->st->time_base) < 1.0/500
2914  || copy_tb==0){
2915  enc_ctx->time_base = dec_ctx->time_base;
2916  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2917  }
2918  }
2919  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2920  && dec_ctx->time_base.num < dec_ctx->time_base.den
2921  && dec_ctx->time_base.num > 0
2922  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2923  enc_ctx->time_base = dec_ctx->time_base;
2924  }
2925 
2926  if (!ost->frame_rate.num)
2927  ost->frame_rate = ist->framerate;
2928  if(ost->frame_rate.num)
2929  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2930 
2931  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2932  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2933 
2934  if (ist->st->nb_side_data) {
2936  sizeof(*ist->st->side_data));
2937  if (!ost->st->side_data)
2938  return AVERROR(ENOMEM);
2939 
2940  ost->st->nb_side_data = 0;
2941  for (j = 0; j < ist->st->nb_side_data; j++) {
2942  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2943  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2944 
2945  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2946  continue;
2947 
2948  sd_dst->data = av_malloc(sd_src->size);
2949  if (!sd_dst->data)
2950  return AVERROR(ENOMEM);
2951  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2952  sd_dst->size = sd_src->size;
2953  sd_dst->type = sd_src->type;
2954  ost->st->nb_side_data++;
2955  }
2956  }
2957 
2958  ost->parser = av_parser_init(enc_ctx->codec_id);
2959 
2960  switch (enc_ctx->codec_type) {
2961  case AVMEDIA_TYPE_AUDIO:
2962  if (audio_volume != 256) {
2963  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2964  exit_program(1);
2965  }
2966  enc_ctx->channel_layout = dec_ctx->channel_layout;
2967  enc_ctx->sample_rate = dec_ctx->sample_rate;
2968  enc_ctx->channels = dec_ctx->channels;
2969  enc_ctx->frame_size = dec_ctx->frame_size;
2970  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2971  enc_ctx->block_align = dec_ctx->block_align;
2972  enc_ctx->initial_padding = dec_ctx->delay;
2973 #if FF_API_AUDIOENC_DELAY
2974  enc_ctx->delay = dec_ctx->delay;
2975 #endif
2976  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2977  enc_ctx->block_align= 0;
2978  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2979  enc_ctx->block_align= 0;
2980  break;
2981  case AVMEDIA_TYPE_VIDEO:
2982  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2983  enc_ctx->width = dec_ctx->width;
2984  enc_ctx->height = dec_ctx->height;
2985  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2986  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2987  sar =
2989  (AVRational){ enc_ctx->height, enc_ctx->width });
2990  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2991  "with stream copy may produce invalid files\n");
2992  }
2993  else if (ist->st->sample_aspect_ratio.num)
2994  sar = ist->st->sample_aspect_ratio;
2995  else
2996  sar = dec_ctx->sample_aspect_ratio;
2997  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2998  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2999  ost->st->r_frame_rate = ist->st->r_frame_rate;
3000  break;
3001  case AVMEDIA_TYPE_SUBTITLE:
3002  enc_ctx->width = dec_ctx->width;
3003  enc_ctx->height = dec_ctx->height;
3004  break;
3005  case AVMEDIA_TYPE_UNKNOWN:
3006  case AVMEDIA_TYPE_DATA:
3008  break;
3009  default:
3010  abort();
3011  }
3012  } else {
3013  if (!ost->enc)
3014  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3015  if (!ost->enc) {
3016  /* should only happen when a default codec is not present. */
3017  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3018  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3019  ret = AVERROR(EINVAL);
3020  goto dump_format;
3021  }
3022 
3023  set_encoder_id(output_files[ost->file_index], ost);
3024 
3025  if (!ost->filter &&
3026  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3027  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3028  FilterGraph *fg;
3029  fg = init_simple_filtergraph(ist, ost);
3030  if (configure_filtergraph(fg)) {
3031  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3032  exit_program(1);
3033  }
3034  }
3035 
3036  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3037  if (!ost->frame_rate.num)
3039  if (ist && !ost->frame_rate.num)
3040  ost->frame_rate = ist->framerate;
3041  if (ist && !ost->frame_rate.num)
3042  ost->frame_rate = ist->st->r_frame_rate;
3043  if (ist && !ost->frame_rate.num) {
3044  ost->frame_rate = (AVRational){25, 1};
3046  "No information "
3047  "about the input framerate is available. Falling "
3048  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3049  "if you want a different framerate.\n",
3050  ost->file_index, ost->index);
3051  }
3052 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3053  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3055  ost->frame_rate = ost->enc->supported_framerates[idx];
3056  }
3057  // reduce frame rate for mpeg4 to be within the spec limits
3058  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3059  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3060  ost->frame_rate.num, ost->frame_rate.den, 65535);
3061  }
3062  }
3063 
3064  switch (enc_ctx->codec_type) {
3065  case AVMEDIA_TYPE_AUDIO:
3066  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3067  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3068  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3069  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3070  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3071  break;
3072  case AVMEDIA_TYPE_VIDEO:
3073  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3074  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3075  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3076  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3078  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3079  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3080  }
3081  for (j = 0; j < ost->forced_kf_count; j++)
3082  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3084  enc_ctx->time_base);
3085 
3086  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3087  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3088  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3089  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3090  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3092  if (!strncmp(ost->enc->name, "libx264", 7) &&
3093  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3096  "No pixel format specified, %s for H.264 encoding chosen.\n"
3097  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3099  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3100  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3103  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3104  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3106  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3107 
3108  ost->st->avg_frame_rate = ost->frame_rate;
3109 
3110  if (!dec_ctx ||
3111  enc_ctx->width != dec_ctx->width ||
3112  enc_ctx->height != dec_ctx->height ||
3113  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3115  }
3116 
3117  if (ost->forced_keyframes) {
3118  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3121  if (ret < 0) {
3123  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3124  return ret;
3125  }
3130 
3131  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3132  // parse it only for static kf timings
3133  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3135  }
3136  }
3137  break;
3138  case AVMEDIA_TYPE_SUBTITLE:
3139  enc_ctx->time_base = (AVRational){1, 1000};
3140  if (!enc_ctx->width) {
3141  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3142  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3143  }
3144  break;
3145  case AVMEDIA_TYPE_DATA:
3146  break;
3147  default:
3148  abort();
3149  break;
3150  }
3151  }
3152 
3153  if (ost->disposition) {
3154  static const AVOption opts[] = {
3155  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3156  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3157  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3158  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3159  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3160  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3161  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3162  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3163  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3164  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3165  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3166  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3167  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3168  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3169  { NULL },
3170  };
3171  static const AVClass class = {
3172  .class_name = "",
3173  .item_name = av_default_item_name,
3174  .option = opts,
3175  .version = LIBAVUTIL_VERSION_INT,
3176  };
3177  const AVClass *pclass = &class;
3178 
3179  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3180  if (ret < 0)
3181  goto dump_format;
3182  }
3183  }
3184 
3185  /* open each encoder */
3186  for (i = 0; i < nb_output_streams; i++) {
3187  ret = init_output_stream(output_streams[i], error, sizeof(error));
3188  if (ret < 0)
3189  goto dump_format;
3190  }
3191 
3192  /* init input streams */
3193  for (i = 0; i < nb_input_streams; i++)
3194  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3195  for (i = 0; i < nb_output_streams; i++) {
3196  ost = output_streams[i];
3197  avcodec_close(ost->enc_ctx);
3198  }
3199  goto dump_format;
3200  }
3201 
3202  /* discard unused programs */
3203  for (i = 0; i < nb_input_files; i++) {
3204  InputFile *ifile = input_files[i];
3205  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3206  AVProgram *p = ifile->ctx->programs[j];
3207  int discard = AVDISCARD_ALL;
3208 
3209  for (k = 0; k < p->nb_stream_indexes; k++)
3210  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3211  discard = AVDISCARD_DEFAULT;
3212  break;
3213  }
3214  p->discard = discard;
3215  }
3216  }
3217 
3218  /* open files and write file headers */
3219  for (i = 0; i < nb_output_files; i++) {
3220  oc = output_files[i]->ctx;
3221  oc->interrupt_callback = int_cb;
3222  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3223  snprintf(error, sizeof(error),
3224  "Could not write header for output file #%d "
3225  "(incorrect codec parameters ?): %s",
3226  i, av_err2str(ret));
3227  ret = AVERROR(EINVAL);
3228  goto dump_format;
3229  }
3230 // assert_avoptions(output_files[i]->opts);
3231  if (strcmp(oc->oformat->name, "rtp")) {
3232  want_sdp = 0;
3233  }
3234  }
3235 
3236  dump_format:
3237  /* dump the file output parameters - cannot be done before in case
3238  of stream copy */
3239  for (i = 0; i < nb_output_files; i++) {
3240  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3241  }
3242 
3243  /* dump the stream mapping */
3244  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3245  for (i = 0; i < nb_input_streams; i++) {
3246  ist = input_streams[i];
3247 
3248  for (j = 0; j < ist->nb_filters; j++) {
3249  if (ist->filters[j]->graph->graph_desc) {
3250  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3251  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3252  ist->filters[j]->name);
3253  if (nb_filtergraphs > 1)
3254  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3255  av_log(NULL, AV_LOG_INFO, "\n");
3256  }
3257  }
3258  }
3259 
3260  for (i = 0; i < nb_output_streams; i++) {
3261  ost = output_streams[i];
3262 
3263  if (ost->attachment_filename) {
3264  /* an attached file */
3265  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3266  ost->attachment_filename, ost->file_index, ost->index);
3267  continue;
3268  }
3269 
3270  if (ost->filter && ost->filter->graph->graph_desc) {
3271  /* output from a complex graph */
3272  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3273  if (nb_filtergraphs > 1)
3274  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3275 
3276  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3277  ost->index, ost->enc ? ost->enc->name : "?");
3278  continue;
3279  }
3280 
3281  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3282  input_streams[ost->source_index]->file_index,
3283  input_streams[ost->source_index]->st->index,
3284  ost->file_index,
3285  ost->index);
3286  if (ost->sync_ist != input_streams[ost->source_index])
3287  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3288  ost->sync_ist->file_index,
3289  ost->sync_ist->st->index);
3290  if (ost->stream_copy)
3291  av_log(NULL, AV_LOG_INFO, " (copy)");
3292  else {
3293  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3294  const AVCodec *out_codec = ost->enc;
3295  const char *decoder_name = "?";
3296  const char *in_codec_name = "?";
3297  const char *encoder_name = "?";
3298  const char *out_codec_name = "?";
3299  const AVCodecDescriptor *desc;
3300 
3301  if (in_codec) {
3302  decoder_name = in_codec->name;
3303  desc = avcodec_descriptor_get(in_codec->id);
3304  if (desc)
3305  in_codec_name = desc->name;
3306  if (!strcmp(decoder_name, in_codec_name))
3307  decoder_name = "native";
3308  }
3309 
3310  if (out_codec) {
3311  encoder_name = out_codec->name;
3312  desc = avcodec_descriptor_get(out_codec->id);
3313  if (desc)
3314  out_codec_name = desc->name;
3315  if (!strcmp(encoder_name, out_codec_name))
3316  encoder_name = "native";
3317  }
3318 
3319  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3320  in_codec_name, decoder_name,
3321  out_codec_name, encoder_name);
3322  }
3323  av_log(NULL, AV_LOG_INFO, "\n");
3324  }
3325 
3326  if (ret) {
3327  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3328  return ret;
3329  }
3330 
3331  if (sdp_filename || want_sdp) {
3332  print_sdp();
3333  }
3334 
3335  transcode_init_done = 1;
3336 
3337  return 0;
3338 }
3339 
3340 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3341 static int need_output(void)
3342 {
3343  int i;
3344 
3345  for (i = 0; i < nb_output_streams; i++) {
3346  OutputStream *ost = output_streams[i];
3347  OutputFile *of = output_files[ost->file_index];
3348  AVFormatContext *os = output_files[ost->file_index]->ctx;
3349 
3350  if (ost->finished ||
3351  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3352  continue;
3353  if (ost->frame_number >= ost->max_frames) {
3354  int j;
3355  for (j = 0; j < of->ctx->nb_streams; j++)
3356  close_output_stream(output_streams[of->ost_index + j]);
3357  continue;
3358  }
3359 
3360  return 1;
3361  }
3362 
3363  return 0;
3364 }
3365 
3366 /**
3367  * Select the output stream to process.
3368  *
3369  * @return selected output stream, or NULL if none available
3370  */
3372 {
3373  int i;
3374  int64_t opts_min = INT64_MAX;
3375  OutputStream *ost_min = NULL;
3376 
3377  for (i = 0; i < nb_output_streams; i++) {
3378  OutputStream *ost = output_streams[i];
3379  int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3380  AV_TIME_BASE_Q);
3381  if (!ost->finished && opts < opts_min) {
3382  opts_min = opts;
3383  ost_min = ost->unavailable ? NULL : ost;
3384  }
3385  }
3386  return ost_min;
3387 }
3388 
3390 {
3391  int i, ret, key;
3392  static int64_t last_time;
3393  if (received_nb_signals)
3394  return AVERROR_EXIT;
3395  /* read_key() returns 0 on EOF */
3396  if(cur_time - last_time >= 100000 && !run_as_daemon){
3397  key = read_key();
3398  last_time = cur_time;
3399  }else
3400  key = -1;
3401  if (key == 'q')
3402  return AVERROR_EXIT;
3403  if (key == '+') av_log_set_level(av_log_get_level()+10);
3404  if (key == '-') av_log_set_level(av_log_get_level()-10);
3405  if (key == 's') qp_hist ^= 1;
3406  if (key == 'h'){
3407  if (do_hex_dump){
3408  do_hex_dump = do_pkt_dump = 0;
3409  } else if(do_pkt_dump){
3410  do_hex_dump = 1;
3411  } else
3412  do_pkt_dump = 1;
3414  }
3415  if (key == 'c' || key == 'C'){
3416  char buf[4096], target[64], command[256], arg[256] = {0};
3417  double time;
3418  int k, n = 0;
3419  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3420  i = 0;
3421  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3422  if (k > 0)
3423  buf[i++] = k;
3424  buf[i] = 0;
3425  if (k > 0 &&
3426  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3427  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3428  target, time, command, arg);
3429  for (i = 0; i < nb_filtergraphs; i++) {
3430  FilterGraph *fg = filtergraphs[i];
3431  if (fg->graph) {
3432  if (time < 0) {
3433  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3434  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3435  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3436  } else if (key == 'c') {
3437  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3438  ret = AVERROR_PATCHWELCOME;
3439  } else {
3440  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3441  if (ret < 0)
3442  fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3443  }
3444  }
3445  }
3446  } else {
3448  "Parse error, at least 3 arguments were expected, "
3449  "only %d given in string '%s'\n", n, buf);
3450  }
3451  }
3452  if (key == 'd' || key == 'D'){
3453  int debug=0;
3454  if(key == 'D') {
3455  debug = input_streams[0]->st->codec->debug<<1;
3456  if(!debug) debug = 1;
3457  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3458  debug += debug;
3459  }else{
3460  char buf[32];
3461  int k = 0;
3462  i = 0;
3463  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3464  if (k > 0)
3465  buf[i++] = k;
3466  buf[i] = 0;
3467  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3468  fprintf(stderr,"error parsing debug value\n");
3469  }
3470  for(i=0;i<nb_input_streams;i++) {
3471  input_streams[i]->st->codec->debug = debug;
3472  }
3473  for(i=0;i<nb_output_streams;i++) {
3474  OutputStream *ost = output_streams[i];
3475  ost->enc_ctx->debug = debug;
3476  }
3477  if(debug) av_log_set_level(AV_LOG_DEBUG);
3478  fprintf(stderr,"debug=%d\n", debug);
3479  }
3480  if (key == '?'){
3481  fprintf(stderr, "key function\n"
3482  "? show this help\n"
3483  "+ increase verbosity\n"
3484  "- decrease verbosity\n"
3485  "c Send command to first matching filter supporting it\n"
3486  "C Send/Que command to all matching filters\n"
3487  "D cycle through available debug modes\n"
3488  "h dump packets/hex press to cycle through the 3 states\n"
3489  "q quit\n"
3490  "s Show QP histogram\n"
3491  );
3492  }
3493  return 0;
3494 }
3495 
3496 #if HAVE_PTHREADS
3497 static void *input_thread(void *arg)
3498 {
3499  InputFile *f = arg;
3500  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3501  int ret = 0;
3502 
3503  while (1) {
3504  AVPacket pkt;
3505  ret = av_read_frame(f->ctx, &pkt);
3506 
3507  if (ret == AVERROR(EAGAIN)) {
3508  av_usleep(10000);
3509  continue;
3510  }
3511  if (ret < 0) {
3513  break;
3514  }
3515  av_dup_packet(&pkt);
3516  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3517  if (flags && ret == AVERROR(EAGAIN)) {
3518  flags = 0;
3519  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3521  "Thread message queue blocking; consider raising the "
3522  "thread_queue_size option (current value: %d)\n",
3523  f->thread_queue_size);
3524  }
3525  if (ret < 0) {
3526  if (ret != AVERROR_EOF)
3527  av_log(f->ctx, AV_LOG_ERROR,
3528  "Unable to send packet to main thread: %s\n",
3529  av_err2str(ret));
3530  av_free_packet(&pkt);
3532  break;
3533  }
3534  }
3535 
3536  return NULL;
3537 }
3538 
3539 static void free_input_threads(void)
3540 {
3541  int i;
3542 
3543  for (i = 0; i < nb_input_files; i++) {
3544  InputFile *f = input_files[i];
3545  AVPacket pkt;
3546 
3547  if (!f || !f->in_thread_queue)
3548  continue;
3550  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3551  av_free_packet(&pkt);
3552 
3553  pthread_join(f->thread, NULL);
3554  f->joined = 1;
3556  }
3557 }
3558 
3559 static int init_input_threads(void)
3560 {
3561  int i, ret;
3562 
3563  if (nb_input_files == 1)
3564  return 0;
3565 
3566  for (i = 0; i < nb_input_files; i++) {
3567  InputFile *f = input_files[i];
3568 
3569  if (f->ctx->pb ? !f->ctx->pb->seekable :
3570  strcmp(f->ctx->iformat->name, "lavfi"))
3571  f->non_blocking = 1;
3573  f->thread_queue_size, sizeof(AVPacket));
3574  if (ret < 0)
3575  return ret;
3576 
3577  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3578  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3580  return AVERROR(ret);
3581  }
3582  }
3583  return 0;
3584 }
3585 
3587 {
3589  f->non_blocking ?
3591 }
3592 #endif
3593 
3595 {
3596  if (f->rate_emu) {
3597  int i;
3598  for (i = 0; i < f->nb_streams; i++) {
3599  InputStream *ist = input_streams[f->ist_index + i];
3600  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3601  int64_t now = av_gettime_relative() - ist->start;
3602  if (pts > now)
3603  return AVERROR(EAGAIN);
3604  }
3605  }
3606 
3607 #if HAVE_PTHREADS
3608  if (nb_input_files > 1)
3609  return get_input_packet_mt(f, pkt);
3610 #endif
3611  return av_read_frame(f->ctx, pkt);
3612 }
3613 
3614 static int got_eagain(void)
3615 {
3616  int i;
3617  for (i = 0; i < nb_output_streams; i++)
3618  if (output_streams[i]->unavailable)
3619  return 1;
3620  return 0;
3621 }
3622 
3623 static void reset_eagain(void)
3624 {
3625  int i;
3626  for (i = 0; i < nb_input_files; i++)
3627  input_files[i]->eagain = 0;
3628  for (i = 0; i < nb_output_streams; i++)
3629  output_streams[i]->unavailable = 0;
3630 }
3631 
3632 /*
3633  * Return
3634  * - 0 -- one packet was read and processed
3635  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3636  * this function should be called again
3637  * - AVERROR_EOF -- this function should not be called again
3638  */
3639 static int process_input(int file_index)
3640 {
3641  InputFile *ifile = input_files[file_index];
3642  AVFormatContext *is;
3643  InputStream *ist;
3644  AVPacket pkt;
3645  int ret, i, j;
3646 
3647  is = ifile->ctx;
3648  ret = get_input_packet(ifile, &pkt);
3649 
3650  if (ret == AVERROR(EAGAIN)) {
3651  ifile->eagain = 1;
3652  return ret;
3653  }
3654  if (ret < 0) {
3655  if (ret != AVERROR_EOF) {
3656  print_error(is->filename, ret);
3657  if (exit_on_error)
3658  exit_program(1);
3659  }
3660 
3661  for (i = 0; i < ifile->nb_streams; i++) {
3662  ist = input_streams[ifile->ist_index + i];
3663  if (ist->decoding_needed) {
3664  ret = process_input_packet(ist, NULL);
3665  if (ret>0)
3666  return 0;
3667  }
3668 
3669  /* mark all outputs that don't go through lavfi as finished */
3670  for (j = 0; j < nb_output_streams; j++) {
3671  OutputStream *ost = output_streams[j];
3672 
3673  if (ost->source_index == ifile->ist_index + i &&
3674  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3675  finish_output_stream(ost);
3676  }
3677  }
3678 
3679  ifile->eof_reached = 1;
3680  return AVERROR(EAGAIN);
3681  }
3682 
3683  reset_eagain();
3684 
3685  if (do_pkt_dump) {
3687  is->streams[pkt.stream_index]);
3688  }
3689  /* the following test is needed in case new streams appear
3690  dynamically in stream : we ignore them */
3691  if (pkt.stream_index >= ifile->nb_streams) {
3692  report_new_stream(file_index, &pkt);
3693  goto discard_packet;
3694  }
3695 
3696  ist = input_streams[ifile->ist_index + pkt.stream_index];
3697 
3698  ist->data_size += pkt.size;
3699  ist->nb_packets++;
3700 
3701  if (ist->discard)
3702  goto discard_packet;
3703 
3704  if (debug_ts) {
3705  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3706  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3710  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3711  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3712  av_ts2str(input_files[ist->file_index]->ts_offset),
3713  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3714  }
3715 
3716  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3717  int64_t stime, stime2;
3718  // Correcting starttime based on the enabled streams
3719  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3720  // so we instead do it here as part of discontinuity handling
3721  if ( ist->next_dts == AV_NOPTS_VALUE
3722  && ifile->ts_offset == -is->start_time
3723  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3724  int64_t new_start_time = INT64_MAX;
3725  for (i=0; i<is->nb_streams; i++) {
3726  AVStream *st = is->streams[i];
3727  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3728  continue;
3729  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3730  }
3731  if (new_start_time > is->start_time) {
3732  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3733  ifile->ts_offset = -new_start_time;
3734  }
3735  }
3736 
3737  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3738  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3739  ist->wrap_correction_done = 1;
3740 
3741  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3742  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3743  ist->wrap_correction_done = 0;
3744  }
3745  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3746  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3747  ist->wrap_correction_done = 0;
3748  }
3749  }
3750 
3751  /* add the stream-global side data to the first packet */
3752  if (ist->nb_packets == 1) {
3753  if (ist->st->nb_side_data)
3755  for (i = 0; i < ist->st->nb_side_data; i++) {
3756  AVPacketSideData *src_sd = &ist->st->side_data[i];
3757  uint8_t *dst_data;
3758 
3759  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3760  continue;
3761  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3762  continue;
3763 
3764  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3765  if (!dst_data)
3766  exit_program(1);
3767 
3768  memcpy(dst_data, src_sd->data, src_sd->size);
3769  }
3770  }
3771 
3772  if (pkt.dts != AV_NOPTS_VALUE)
3773  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3774  if (pkt.pts != AV_NOPTS_VALUE)
3775  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3776 
3777  if (pkt.pts != AV_NOPTS_VALUE)
3778  pkt.pts *= ist->ts_scale;
3779  if (pkt.dts != AV_NOPTS_VALUE)
3780  pkt.dts *= ist->ts_scale;
3781 
3782  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3784  pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3785  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3786  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3787  int64_t delta = pkt_dts - ifile->last_ts;
3788  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3789  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3790  ifile->ts_offset -= delta;
3792  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3793  delta, ifile->ts_offset);
3794  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3795  if (pkt.pts != AV_NOPTS_VALUE)
3796  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3797  }
3798  }
3799 
3800  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3802  pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3803  !copy_ts) {
3804  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3805  int64_t delta = pkt_dts - ist->next_dts;
3806  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3807  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3808  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3809  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3810  ifile->ts_offset -= delta;
3812  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3813  delta, ifile->ts_offset);
3814  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3815  if (pkt.pts != AV_NOPTS_VALUE)
3816  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3817  }
3818  } else {
3819  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3820  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3821  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3822  pkt.dts = AV_NOPTS_VALUE;
3823  }
3824  if (pkt.pts != AV_NOPTS_VALUE){
3825  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3826  delta = pkt_pts - ist->next_dts;
3827  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3828  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3829  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3830  pkt.pts = AV_NOPTS_VALUE;
3831  }
3832  }
3833  }
3834  }
3835 
3836  if (pkt.dts != AV_NOPTS_VALUE)
3837  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3838 
3839  if (debug_ts) {
3840  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3842  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3843  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3844  av_ts2str(input_files[ist->file_index]->ts_offset),
3845  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3846  }
3847 
3848  sub2video_heartbeat(ist, pkt.pts);
3849 
3850  process_input_packet(ist, &pkt);
3851 
3852 discard_packet:
3853  av_free_packet(&pkt);
3854 
3855  return 0;
3856 }
3857 
3858 /**
3859  * Perform a step of transcoding for the specified filter graph.
3860  *
3861  * @param[in] graph filter graph to consider
3862  * @param[out] best_ist input stream where a frame would allow to continue
3863  * @return 0 for success, <0 for error
3864  */
3865 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3866 {
3867  int i, ret;
3868  int nb_requests, nb_requests_max = 0;
3869  InputFilter *ifilter;
3870  InputStream *ist;
3871 
3872  *best_ist = NULL;
3873  ret = avfilter_graph_request_oldest(graph->graph);
3874  if (ret >= 0)
3875  return reap_filters(0);
3876 
3877  if (ret == AVERROR_EOF) {
3878  ret = reap_filters(1);
3879  for (i = 0; i < graph->nb_outputs; i++)
3880  close_output_stream(graph->outputs[i]->ost);
3881  return ret;
3882  }
3883  if (ret != AVERROR(EAGAIN))
3884  return ret;
3885 
3886  for (i = 0; i < graph->nb_inputs; i++) {
3887  ifilter = graph->inputs[i];
3888  ist = ifilter->ist;
3889  if (input_files[ist->file_index]->eagain ||
3890  input_files[ist->file_index]->eof_reached)
3891  continue;
3892  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3893  if (nb_requests > nb_requests_max) {
3894  nb_requests_max = nb_requests;
3895  *best_ist = ist;
3896  }
3897  }
3898 
3899  if (!*best_ist)
3900  for (i = 0; i < graph->nb_outputs; i++)
3901  graph->outputs[i]->ost->unavailable = 1;
3902 
3903  return 0;
3904 }
3905 
3906 /**
3907  * Run a single step of transcoding.
3908  *
3909  * @return 0 for success, <0 for error
3910  */
3911 static int transcode_step(void)
3912 {
3913  OutputStream *ost;
3914  InputStream *ist;
3915  int ret;
3916 
3917  ost = choose_output();
3918  if (!ost) {
3919  if (got_eagain()) {
3920  reset_eagain();
3921  av_usleep(10000);
3922  return 0;
3923  }
3924  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3925  return AVERROR_EOF;
3926  }
3927 
3928  if (ost->filter) {
3929  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3930  return ret;
3931  if (!ist)
3932  return 0;
3933  } else {
3934  av_assert0(ost->source_index >= 0);
3935  ist = input_streams[ost->source_index];
3936  }
3937 
3938  ret = process_input(ist->file_index);
3939  if (ret == AVERROR(EAGAIN)) {
3940  if (input_files[ist->file_index]->eagain)
3941  ost->unavailable = 1;
3942  return 0;
3943  }
3944 
3945  if (ret < 0)
3946  return ret == AVERROR_EOF ? 0 : ret;
3947 
3948  return reap_filters(0);
3949 }
3950 
3951 /*
3952  * The following code is the main loop of the file converter
3953  */
3954 static int transcode(void)
3955 {
3956  int ret, i;
3957  AVFormatContext *os;
3958  OutputStream *ost;
3959  InputStream *ist;
3960  int64_t timer_start;
3961 
3962  ret = transcode_init();
3963  if (ret < 0)
3964  goto fail;
3965 
3966  if (stdin_interaction) {
3967  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3968  }
3969 
3970  timer_start = av_gettime_relative();
3971 
3972 #if HAVE_PTHREADS
3973  if ((ret = init_input_threads()) < 0)
3974  goto fail;
3975 #endif
3976 
3977  while (!received_sigterm) {
3978  int64_t cur_time= av_gettime_relative();
3979 
3980  /* if 'q' pressed, exits */
3981  if (stdin_interaction)
3982  if (check_keyboard_interaction(cur_time) < 0)
3983  break;
3984 
3985  /* check if there's any stream where output is still needed */
3986  if (!need_output()) {
3987  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3988  break;
3989  }
3990 
3991  ret = transcode_step();
3992  if (ret < 0) {
3993  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3994  continue;
3995  } else {
3996  char errbuf[128];
3997  av_strerror(ret, errbuf, sizeof(errbuf));
3998 
3999  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4000  break;
4001  }
4002  }
4003 
4004  /* dump report by using the output first video and audio streams */
4005  print_report(0, timer_start, cur_time);
4006  }
4007 #if HAVE_PTHREADS
4009 #endif
4010 
4011  /* at the end of stream, we must flush the decoder buffers */
4012  for (i = 0; i < nb_input_streams; i++) {
4013  ist = input_streams[i];
4014  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4015  process_input_packet(ist, NULL);
4016  }
4017  }
4018  flush_encoders();
4019 
4020  term_exit();
4021 
4022  /* write the trailer if needed and close file */
4023  for (i = 0; i < nb_output_files; i++) {
4024  os = output_files[i]->ctx;
4025  av_write_trailer(os);
4026  }
4027 
4028  /* dump report by using the first video and audio streams */
4029  print_report(1, timer_start, av_gettime_relative());
4030 
4031  /* close each encoder */
4032  for (i = 0; i < nb_output_streams; i++) {
4033  ost = output_streams[i];
4034  if (ost->encoding_needed) {
4035  av_freep(&ost->enc_ctx->stats_in);
4036  }
4037  }
4038 
4039  /* close each decoder */
4040  for (i = 0; i < nb_input_streams; i++) {
4041  ist = input_streams[i];
4042  if (ist->decoding_needed) {
4043  avcodec_close(ist->dec_ctx);
4044  if (ist->hwaccel_uninit)
4045  ist->hwaccel_uninit(ist->dec_ctx);
4046  }
4047  }
4048 
4049  /* finished ! */
4050  ret = 0;
4051 
4052  fail:
4053 #if HAVE_PTHREADS
4055 #endif
4056 
4057  if (output_streams) {
4058  for (i = 0; i < nb_output_streams; i++) {
4059  ost = output_streams[i];
4060  if (ost) {
4061  if (ost->logfile) {
4062  fclose(ost->logfile);
4063  ost->logfile = NULL;
4064  }
4065  av_freep(&ost->forced_kf_pts);
4066  av_freep(&ost->apad);
4067  av_freep(&ost->disposition);
4068  av_dict_free(&ost->encoder_opts);
4069  av_dict_free(&ost->sws_dict);
4070  av_dict_free(&ost->swr_opts);
4071  av_dict_free(&ost->resample_opts);
4072  av_dict_free(&ost->bsf_args);
4073  }
4074  }
4075  }
4076  return ret;
4077 }
4078 
4079 
4080 static int64_t getutime(void)
4081 {
4082 #if HAVE_GETRUSAGE
4083  struct rusage rusage;
4084 
4085  getrusage(RUSAGE_SELF, &rusage);
4086  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4087 #elif HAVE_GETPROCESSTIMES
4088  HANDLE proc;
4089  FILETIME c, e, k, u;
4090  proc = GetCurrentProcess();
4091  GetProcessTimes(proc, &c, &e, &k, &u);
4092  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4093 #else
4094  return av_gettime_relative();
4095 #endif
4096 }
4097 
4098 static int64_t getmaxrss(void)
4099 {
4100 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4101  struct rusage rusage;
4102  getrusage(RUSAGE_SELF, &rusage);
4103  return (int64_t)rusage.ru_maxrss * 1024;
4104 #elif HAVE_GETPROCESSMEMORYINFO
4105  HANDLE proc;
4106  PROCESS_MEMORY_COUNTERS memcounters;
4107  proc = GetCurrentProcess();
4108  memcounters.cb = sizeof(memcounters);
4109  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4110  return memcounters.PeakPagefileUsage;
4111 #else
4112  return 0;
4113 #endif
4114 }
4115 
4116 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4117 {
4118 }
4119 
4120 int main(int argc, char **argv)
4121 {
4122  int ret;
4123  int64_t ti;
4124 
4125  init_dynload();
4126 
4128 
4129  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4130 
4132  parse_loglevel(argc, argv, options);
4133 
4134  if(argc>1 && !strcmp(argv[1], "-d")){
4135  run_as_daemon=1;
4137  argc--;
4138  argv++;
4139  }
4140 
4142 #if CONFIG_AVDEVICE
4144 #endif
4146  av_register_all();
4148 
4149  show_banner(argc, argv, options);
4150 
4151  term_init();
4152 
4153  /* parse options and open all input/output files */
4154  ret = ffmpeg_parse_options(argc, argv);
4155  if (ret < 0)
4156  exit_program(1);
4157 
4158  if (nb_output_files <= 0 && nb_input_files == 0) {
4159  show_usage();
4160  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4161  exit_program(1);
4162  }
4163 
4164  /* file converter / grab */
4165  if (nb_output_files <= 0) {
4166  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4167  exit_program(1);
4168  }
4169 
4170 // if (nb_input_files == 0) {
4171 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4172 // exit_program(1);
4173 // }
4174 
4175  current_time = ti = getutime();
4176  if (transcode() < 0)
4177  exit_program(1);
4178  ti = getutime() - ti;
4179  if (do_benchmark) {
4180  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4181  }
4182  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4185  exit_program(69);
4186 
4188  return main_return_code;
4189 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1486
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:271
#define extra_bits(eb)
Definition: intrax8.c:152
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:797
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:112
int got_output
Definition: ffmpeg.h:296
#define AV_DISPOSITION_METADATA
Definition: avformat.h:838
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2864
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1802
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1030
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1914
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:388
const struct AVCodec * codec
Definition: avcodec.h:1521
Definition: ffmpeg.h:367
AVRational framerate
Definition: avcodec.h:3312
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:322
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:111
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:476
void term_init(void)
Definition: ffmpeg.c:369
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:284
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:230
int nb_outputs
Definition: ffmpeg.h:247
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3756
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:284
AVDictionary * swr_opts
Definition: ffmpeg.h:437
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:257
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2129
int resample_channels
Definition: ffmpeg.h:291
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:311
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:3009
int stream_copy
Definition: ffmpeg.h:443
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:932
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3784
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1535
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2874
AVOption.
Definition: opt.h:255
AVRational frame_rate
Definition: ffmpeg.h:408
int64_t * forced_kf_pts
Definition: ffmpeg.h:416
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:291
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2951
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:432
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:776
#define CODEC_FLAG_PASS2
Definition: avcodec.h:978
static int process_input(int file_index)
Definition: ffmpeg.c:3639
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:447
int exit_on_error
Definition: ffmpeg_opt.c:105
const char * fmt
Definition: avisynth_c.h:632
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2593
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1458
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
static int run_as_daemon
Definition: ffmpeg.c:130
Memory buffer source API.
void av_log_set_level(int level)
Set the log level.
Definition: log.c:382
AVRational framerate
Definition: ffmpeg.h:280
AVCodecParserContext * parser
Definition: ffmpeg.h:451
static int64_t cur_time
Definition: ffserver.c:255
int decoding_needed
Definition: ffmpeg.h:255
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:926
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1529
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:411
int index
stream index in AVFormatContext
Definition: avformat.h:855
int size
Definition: avcodec.h:1434
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4098
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:131
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2580
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:486
#define AV_DISPOSITION_DUB
Definition: avformat.h:810
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1912
int eagain
Definition: ffmpeg.h:343
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1140
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1732
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:398
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:605
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:822
int quality
Definition: ffmpeg.h:463
unsigned num_rects
Definition: avcodec.h:3813
AVFrame * filter_frame
Definition: ffmpeg.h:262
static int transcode_init(void)
Definition: ffmpeg.c:2782
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2587
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2789
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:98
enum AVMediaType type
Definition: avcodec.h:3495
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:824
static int init_input_threads(void)
Definition: ffmpeg.c:3559
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:653
discard all
Definition: avcodec.h:689
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:966
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:325
int64_t input_ts_offset
Definition: ffmpeg.h:345
int do_hex_dump
Definition: ffmpeg_opt.c:99
static AVPacket pkt
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1895
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3013
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2875
int nb_input_streams
Definition: ffmpeg.c:141
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:68
const char * name
Definition: ffmpeg.h:70
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2801
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:252
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2726
Picture data structure.
Definition: avcodec.h:3754
uint64_t packets_written
Definition: ffmpeg.h:457
AVCodec.
Definition: avcodec.h:3482
#define VSYNC_VFR
Definition: ffmpeg.h:54
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:199
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2309
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:495
int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Setup the picture fields based on the specified image parameters and the provided image data buffer...
Definition: avpicture.c:34
int print_stats
Definition: ffmpeg_opt.c:106
float dts_error_threshold
Definition: ffmpeg_opt.c:90
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:477
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:238
uint64_t data_size
Definition: ffmpeg.h:455
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:459
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
Definition: ffmpeg.c:2277
#define log2(x)
Definition: libm.h:122
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:814
struct FilterGraph * graph
Definition: ffmpeg.h:222
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1641
Undefined.
Definition: avutil.h:265
AVSubtitleRect ** rects
Definition: avcodec.h:3814
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:2347
int encoding_needed
Definition: ffmpeg.h:387
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:610
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4116
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3389
Format I/O context.
Definition: avformat.h:1285
uint64_t samples_decoded
Definition: ffmpeg.h:337
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:221
#define AV_RL64
Definition: intreadwrite.h:173
unsigned int nb_stream_indexes
Definition: avformat.h:1223
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:72
int64_t cur_dts
Definition: avformat.h:1031
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3786
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:882
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
uint64_t frames_decoded
Definition: ffmpeg.h:336
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:241
Public dictionary API.
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:953
char * logfile_prefix
Definition: ffmpeg.h:427
static uint8_t * subtitle_out
Definition: ffmpeg.c:138
static int main_return_code
Definition: ffmpeg.c:321
static int64_t start_time
Definition: ffplay.c:325
int copy_initial_nonkeyframes
Definition: ffmpeg.h:445
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:123
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2280
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:544
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
AVDictionary * sws_dict
Definition: ffmpeg.h:436
Opaque data information usually continuous.
Definition: avutil.h:195
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
static void * input_thread(void *arg)
Definition: ffmpeg.c:3497
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:186
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:215
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3247
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:654
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
attribute_deprecated void(* destruct)(struct AVPacket *)
Definition: avcodec.h:1454
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3755
int stdin_interaction
Definition: ffmpeg_opt.c:108
FILE * logfile
Definition: ffmpeg.h:428
AVDictionary * opts
Definition: ffmpeg.h:474
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:570
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1279
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
libavcodec/libavfilter gluing utilities
#define ECHO(name, type, min, max)
Definition: af_aecho.c:185
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2476
static int need_output(void)
Definition: ffmpeg.c:3341
int last_droped
Definition: ffmpeg.h:404
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:366
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:970
static double psnr(double d)
Definition: ffmpeg.c:1255
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1627
int do_benchmark
Definition: ffmpeg_opt.c:97
Keep a reference to the frame.
Definition: buffersrc.h:62
int audio_sync_method
Definition: ffmpeg_opt.c:93
int shortest
Definition: ffmpeg.h:480
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1353
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:2172
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:4080
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:111
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:94
const char * name
Definition: avcodec.h:5384
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int nb_streams
Definition: ffmpeg.h:351
pthread_t thread
Definition: ffmpeg.h:359
uint8_t * data
Definition: avcodec.h:1433
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * resample_opts
Definition: ffmpeg.h:438
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2663
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:93
AVFilterContext * filter
Definition: ffmpeg.h:227
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4210
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:143
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:408
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1317
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:320
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:819
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1260
int resample_sample_rate
Definition: ffmpeg.h:290
uint8_t * data
Definition: avcodec.h:1383
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:367
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:499
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:294
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3787
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:390
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:3006
AVCodec * dec
Definition: ffmpeg.h:260
static int64_t duration
Definition: ffplay.c:326
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1221
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2781
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:178
int top_field_first
Definition: ffmpeg.h:281
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1413
int nb_output_streams
Definition: ffmpeg.c:146
int file_index
Definition: ffmpeg.h:251
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1451
const OptionDef options[]
Definition: ffserver.c:3810
struct AVBitStreamFilterContext * next
Definition: avcodec.h:5379
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2254
unsigned int * stream_index
Definition: avformat.h:1222
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:287
int resample_height
Definition: ffmpeg.h:285
int wrap_correction_done
Definition: ffmpeg.h:272
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:274
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:262
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:836
unsigned m
Definition: audioconvert.c:187
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:117
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1304
int64_t next_dts
Definition: ffmpeg.h:267
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1479
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:147
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:480
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:50
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2914
static volatile int transcode_init_done
Definition: ffmpeg.c:319
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3496
#define isatty(fd)
Definition: checkasm.c:52
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3543
int rate_emu
Definition: ffmpeg.h:354
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1822
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1298
static void reset_eagain(void)
Definition: ffmpeg.c:3623
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:636
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2420
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:327
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1547
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:606
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:2923
FilterGraph ** filtergraphs
Definition: ffmpeg.c:150
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:464
AVFilterContext * filter
Definition: ffmpeg.h:220
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:324
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:486
int64_t start
Definition: ffmpeg.h:264
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3785
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:334
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:397
int video_sync_method
Definition: ffmpeg_opt.c:94
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:133
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:175
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
char * sdp_filename
Definition: ffmpeg_opt.c:86
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:405
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2202
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:107
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:117
int capabilities
Codec capabilities.
Definition: avcodec.h:3501
int initial_padding
Audio only.
Definition: avcodec.h:3304
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:125
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
Release bitstream filter context.
unsigned int nb_programs
Definition: avformat.h:1436
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:425
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1416
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1607
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:491
AVChapter ** chapters
Definition: avformat.h:1487
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2614
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:123
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1385
int av_log_get_level(void)
Get the current log level.
Definition: log.c:377
const char * name
Name of the codec implementation.
Definition: avcodec.h:3489
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:807
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:71
int side_data_elems
Definition: avcodec.h:1445
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:587
int force_fps
Definition: ffmpeg.h:409
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:937
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1218
#define FFMAX(a, b)
Definition: common.h:90
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
int qp_hist
Definition: ffmpeg_opt.c:107
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:57
float frame_drop_threshold
Definition: ffmpeg_opt.c:95
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:936
int64_t error[4]
Definition: ffmpeg.h:469
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1439
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2935
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2333
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:152
uint32_t end_display_time
Definition: avcodec.h:3812
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3815
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:873
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2591
OutputFilter * filter
Definition: ffmpeg.h:430
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:427
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational frame_aspect_ratio
Definition: ffmpeg.h:413
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:813
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1482
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:821
common internal API header
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1341
static int nb_frames_drop
Definition: ffmpeg.c:132
A bitmap, pict will be set.
Definition: avcodec.h:3766
int nb_output_files
Definition: ffmpeg.c:148
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:160
int bit_rate
the average bitrate
Definition: avcodec.h:1577
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:198
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
static int transcode(void)
Definition: ffmpeg.c:3954
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:788
char filename[1024]
input or output filename
Definition: avformat.h:1361
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3794
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:134
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:246
#define FFMIN(a, b)
Definition: common.h:92
float y
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int saw_first_ts
Definition: ffmpeg.h:277
This side data contains quality related information from the encoder.
Definition: avcodec.h:1303
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1934
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:78
#define FFSIGN(a)
Definition: common.h:69
struct OutputStream * ost
Definition: ffmpeg.h:228
int width
picture width / height.
Definition: avcodec.h:1691
PVOID HANDLE
char * apad
Definition: ffmpeg.h:440
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:421
const char * name
Definition: avformat.h:525
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:221
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:767
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:751
int nb_filtergraphs
Definition: ffmpeg.c:151
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:80
int64_t last_ts
Definition: ffmpeg.h:347
#define TRUE
Definition: windows2linux.h:33
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3586
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:283
int do_pkt_dump
Definition: ffmpeg_opt.c:100
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2570
int64_t max_frames
Definition: ffmpeg.h:401
#define AV_RL32
Definition: intreadwrite.h:146
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:326
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:303
float u
int audio_channels_mapped
Definition: ffmpeg.h:425
int n
Definition: avisynth_c.h:547
AVDictionary * metadata
Definition: avformat.h:928
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1650
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:967
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:192
Opaque data information usually sparse.
Definition: avutil.h:197
#define FF_API_DESTRUCT_PACKET
Definition: version.h:83
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:112
static int restore_tty
Definition: ffmpeg.c:157
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
static int got_eagain(void)
Definition: ffmpeg.c:3614
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:228
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:141
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:3047
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:128
#define av_log2
Definition: intmath.h:100
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:411
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:823
int ret
Definition: ffmpeg.h:297
int audio_volume
Definition: ffmpeg_opt.c:92
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:854
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:484
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
InputFilter ** filters
Definition: ffmpeg.h:312
int fix_sub_duration
Definition: ffmpeg.h:294
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:350
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4223
Definition: ffmpeg.h:69
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2292
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:64
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:809
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:169
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2741
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Filter bitstream.
int frame_size
Definition: mxfenc.c:1819
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:50
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:837
int ost_index
Definition: ffmpeg.h:475
struct InputStream * sync_ist
Definition: ffmpeg.h:391
AVS_Value src
Definition: avisynth_c.h:482
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:721
enum AVMediaType codec_type
Definition: avcodec.h:1520
double ts_scale
Definition: ffmpeg.h:276
int unavailable
Definition: ffmpeg.h:442
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: avcodec.h:3502
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:164
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2533
enum AVCodecID codec_id
Definition: avcodec.h:1529
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:313
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1477
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:184
float max_error_rate
Definition: ffmpeg_opt.c:110
int sample_rate
samples per second
Definition: avcodec.h:2272
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
uint64_t frames_encoded
Definition: ffmpeg.h:459
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2062
AVIOContext * pb
I/O context.
Definition: avformat.h:1327
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:173
int ist_index
Definition: ffmpeg.h:344
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:550
uint8_t flags
Definition: pixdesc.h:90
int debug
debug
Definition: avcodec.h:2852
static void print_sdp(void)
Definition: ffmpeg.c:2437
const char * graph_desc
Definition: ffmpeg.h:239
int guess_layout_max
Definition: ffmpeg.h:282
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
int64_t start_time
Definition: ffmpeg.h:348
#define AVFMT_RAWPICTURE
Format wants AVPicture structure for raw picture data.
Definition: avformat.h:480
main external API structure.
Definition: avcodec.h:1512
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:357
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:466
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:765
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2895
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:252
const char * attachment_filename
Definition: ffmpeg.h:444
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1544
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1785
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:596
AVCodecContext * enc_ctx
Definition: ffmpeg.h:399
void * buf
Definition: avisynth_c.h:553
AVFrame * decoded_frame
Definition: ffmpeg.h:261
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1628
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
Replacements for frequently missing libm functions.
struct AVBitStreamFilter * filter
Definition: avcodec.h:5377
AVCodecContext * dec_ctx
Definition: ffmpeg.h:259
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:3865
AVStream * st
Definition: ffmpeg.h:252
int * audio_channels_map
Definition: ffmpeg.h:424
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:422
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1782
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:145
int index
Definition: gxfenc.c:89
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:907
rational number numerator/denominator
Definition: rational.h:43
int file_index
Definition: ffmpeg.h:383
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:48
static int current_time
Definition: ffmpeg.c:135
int64_t sync_opts
Definition: ffmpeg.h:392
char * vstats_filename
Definition: ffmpeg_opt.c:85
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:157
char * disposition
Definition: ffmpeg.h:447
#define mid_pred
Definition: mathops.h:95
AVMediaType
Definition: avutil.h:191
discard useless packets like 0 size packets in avi
Definition: avcodec.h:684
static av_always_inline av_const long int lrint(double x)
Definition: libm.h:148
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:922
int nb_streams_warn
Definition: ffmpeg.h:353
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2533
AVDictionary * decoder_opts
Definition: ffmpeg.h:279
int autorotate
Definition: ffmpeg.h:284
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:574
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1361
int showed_multi_packet_warning
Definition: ffmpeg.h:278
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:109
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:3693
int64_t ts_offset
Definition: ffmpeg.h:346
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:328
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:869
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3911
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:433
static void free_input_threads(void)
Definition: ffmpeg.c:3539
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3422
misc parsing utilities
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1480
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:245
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:358
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:566
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:262
AVFrame * filtered_frame
Definition: ffmpeg.h:402
int source_index
Definition: ffmpeg.h:385
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:265
static volatile int received_nb_signals
Definition: ffmpeg.c:318
int copy_prior_start
Definition: ffmpeg.h:446
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:465
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1593
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:627
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:84
int nb_filters
Definition: ffmpeg.h:313
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2485
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1370
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
uint8_t level
Definition: svq3.c:150
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:420
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:268
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:68
int resample_sample_fmt
Definition: ffmpeg.h:289
int forced_kf_count
Definition: ffmpeg.h:417
int64_t start
Definition: avformat.h:1251
OSTFinished finished
Definition: ffmpeg.h:441
char * forced_keyframes
Definition: ffmpeg.h:419
uint64_t data_size
Definition: ffmpeg.h:332
int resample_width
Definition: ffmpeg.h:286
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:270
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1042
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1444
struct FilterGraph * graph
Definition: ffmpeg.h:229
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
uint64_t limit_filesize
Definition: ffmpeg.h:478
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1412
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
AVIOContext * progress_avio
Definition: ffmpeg.c:136
AVThreadMessageQueue * in_thread_queue
Definition: ffmpeg.h:358
if(ret< 0)
Definition: vf_mcdeint.c:280
int main(int argc, char **argv)
Definition: ffmpeg.c:4120
int reinit_filters
Definition: ffmpeg.h:315
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:477
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:261
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:928
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:906
static double c[64]
AVStream * st
Definition: muxing.c:54
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:917
static AVCodecContext * dec_ctx
uint32_t start_display_time
Definition: avcodec.h:3811
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1020
uint64_t samples_encoded
Definition: ffmpeg.h:460
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1250
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:208
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:3072
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:915
char * key
Definition: dict.h:87
uint32_t BOOL
static FILE * vstats_file
Definition: ffmpeg.c:115
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:82
AVFrame * last_frame
Definition: ffmpeg.h:403
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int copy_ts
Definition: ffmpeg_opt.c:101
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1297
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3728
AVFormatContext * ctx
Definition: ffmpeg.h:341
int pict_type
Definition: ffmpeg.h:466
static struct termios oldtty
Definition: ffmpeg.c:156
AVCodec * enc
Definition: ffmpeg.h:400
AVSubtitle subtitle
Definition: ffmpeg.h:298
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:636
int eof_reached
Definition: ffmpeg.h:342
int forced_kf_index
Definition: ffmpeg.h:418
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:820
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:487
char * avfilter
Definition: ffmpeg.h:431
uint8_t * name
Definition: ffmpeg.h:223
char * value
Definition: dict.h:88
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:372
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:89
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:708
int channels
number of audio channels
Definition: avcodec.h:2273
int top_field_first
Definition: ffmpeg.h:410
OutputFilter ** outputs
Definition: ffmpeg.h:246
InputFile ** input_files
Definition: ffmpeg.c:142
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2523
void av_log_set_flags(int arg)
Definition: log.c:387
Immediately push the frame to the output.
Definition: buffersrc.h:55
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:219
AVDictionary * bsf_args
Definition: ffmpeg.h:439
AVFormatContext * ctx
Definition: ffmpeg.h:473
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:812
void show_usage(void)
Definition: ffmpeg_opt.c:2873
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
An instance of a filter.
Definition: avfilter.h:633
#define LIBAVCODEC_IDENT
Definition: version.h:43
char * hwaccel_device
Definition: ffmpeg.h:319
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1432
AVDictionary * encoder_opts
Definition: ffmpeg.h:435
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:986
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:113
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4567
int height
Definition: frame.h:220
InputFilter ** inputs
Definition: ffmpeg.h:244
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2269
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:328
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:640
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:329
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2265
OutputFile ** output_files
Definition: ffmpeg.c:147
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
#define av_malloc_array(a, b)
static void flush_encoders(void)
Definition: ffmpeg.c:1705
int copy_tb
Definition: ffmpeg_opt.c:103
static volatile int received_sigterm
Definition: ffmpeg.c:317
#define FFSWAP(type, a, b)
Definition: common.h:95
int discard
Definition: ffmpeg.h:253
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3594
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2050
int thread_queue_size
Definition: ffmpeg.h:362
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:303
int stream_index
Definition: avcodec.h:1435
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:896
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:318
enum AVSubtitleType type
Definition: avcodec.h:3795
int64_t first_pts
Definition: ffmpeg.h:395
int nb_inputs
Definition: ffmpeg.h:245
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:919
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:227
#define DECODING_FOR_OST
Definition: ffmpeg.h:256
int index
Definition: ffmpeg.h:384
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1073
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
uint64_t resample_channel_layout
Definition: ffmpeg.h:292
OSTFinished
Definition: ffmpeg.h:377
This structure stores compressed data.
Definition: avcodec.h:1410
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:963
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:2256
int non_blocking
Definition: ffmpeg.h:360
int delay
Codec delay.
Definition: avcodec.h:1674
int debug_ts
Definition: ffmpeg_opt.c:104
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3371
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:225
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static void sigterm_handler(int sig)
Definition: ffmpeg.c:324
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1426
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:117
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1569
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:91
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1437
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:811
int joined
Definition: ffmpeg.h:361
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
InputStream ** input_streams
Definition: ffmpeg.c:140
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:69
Definition: ffmpeg.h:371
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:796
struct InputStream::@25 prev_sub
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3246