FFmpeg  4.0.2
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
127 
128 static int run_as_daemon = 0;
129 static int nb_frames_dup = 0;
130 static unsigned dup_warning = 1000;
131 static int nb_frames_drop = 0;
132 static int64_t decode_error_stat[2];
133 
134 static int want_sdp = 1;
135 
136 static int current_time;
138 
140 
145 
150 
153 
154 #if HAVE_TERMIOS_H
155 
156 /* init terminal so that we can grab keys */
157 static struct termios oldtty;
158 static int restore_tty;
159 #endif
160 
161 #if HAVE_THREADS
162 static void free_input_threads(void);
163 #endif
164 
165 /* sub2video hack:
166  Convert subtitles to video with alpha to insert them in filter graphs.
167  This is a temporary solution until libavfilter gets real subtitles support.
168  */
169 
171 {
172  int ret;
173  AVFrame *frame = ist->sub2video.frame;
174 
175  av_frame_unref(frame);
176  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
177  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
179  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
180  return ret;
181  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
182  return 0;
183 }
184 
185 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
186  AVSubtitleRect *r)
187 {
188  uint32_t *pal, *dst2;
189  uint8_t *src, *src2;
190  int x, y;
191 
192  if (r->type != SUBTITLE_BITMAP) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
194  return;
195  }
196  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
197  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
198  r->x, r->y, r->w, r->h, w, h
199  );
200  return;
201  }
202 
203  dst += r->y * dst_linesize + r->x * 4;
204  src = r->data[0];
205  pal = (uint32_t *)r->data[1];
206  for (y = 0; y < r->h; y++) {
207  dst2 = (uint32_t *)dst;
208  src2 = src;
209  for (x = 0; x < r->w; x++)
210  *(dst2++) = pal[*(src2++)];
211  dst += dst_linesize;
212  src += r->linesize[0];
213  }
214 }
215 
216 static void sub2video_push_ref(InputStream *ist, int64_t pts)
217 {
218  AVFrame *frame = ist->sub2video.frame;
219  int i;
220  int ret;
221 
222  av_assert1(frame->data[0]);
223  ist->sub2video.last_pts = frame->pts = pts;
224  for (i = 0; i < ist->nb_filters; i++) {
225  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228  if (ret != AVERROR_EOF && ret < 0)
229  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
230  av_err2str(ret));
231  }
232 }
233 
235 {
236  AVFrame *frame = ist->sub2video.frame;
237  int8_t *dst;
238  int dst_linesize;
239  int num_rects, i;
240  int64_t pts, end_pts;
241 
242  if (!frame)
243  return;
244  if (sub) {
245  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
248  AV_TIME_BASE_Q, ist->st->time_base);
249  num_rects = sub->num_rects;
250  } else {
251  pts = ist->sub2video.end_pts;
252  end_pts = INT64_MAX;
253  num_rects = 0;
254  }
255  if (sub2video_get_blank_frame(ist) < 0) {
257  "Impossible to get a blank canvas.\n");
258  return;
259  }
260  dst = frame->data [0];
261  dst_linesize = frame->linesize[0];
262  for (i = 0; i < num_rects; i++)
263  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
264  sub2video_push_ref(ist, pts);
265  ist->sub2video.end_pts = end_pts;
266 }
267 
268 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
269 {
270  InputFile *infile = input_files[ist->file_index];
271  int i, j, nb_reqs;
272  int64_t pts2;
273 
274  /* When a frame is read from a file, examine all sub2video streams in
275  the same file and send the sub2video frame again. Otherwise, decoded
276  video frames could be accumulating in the filter graph while a filter
277  (possibly overlay) is desperately waiting for a subtitle frame. */
278  for (i = 0; i < infile->nb_streams; i++) {
279  InputStream *ist2 = input_streams[infile->ist_index + i];
280  if (!ist2->sub2video.frame)
281  continue;
282  /* subtitles seem to be usually muxed ahead of other streams;
283  if not, subtracting a larger time here is necessary */
284  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
285  /* do not send the heartbeat frame if the subtitle is already ahead */
286  if (pts2 <= ist2->sub2video.last_pts)
287  continue;
288  if (pts2 >= ist2->sub2video.end_pts ||
289  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
290  sub2video_update(ist2, NULL);
291  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
292  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
293  if (nb_reqs)
294  sub2video_push_ref(ist2, pts2);
295  }
296 }
297 
298 static void sub2video_flush(InputStream *ist)
299 {
300  int i;
301  int ret;
302 
303  if (ist->sub2video.end_pts < INT64_MAX)
304  sub2video_update(ist, NULL);
305  for (i = 0; i < ist->nb_filters; i++) {
306  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
307  if (ret != AVERROR_EOF && ret < 0)
308  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
309  }
310 }
311 
312 /* end of sub2video hack */
313 
314 static void term_exit_sigsafe(void)
315 {
316 #if HAVE_TERMIOS_H
317  if(restore_tty)
318  tcsetattr (0, TCSANOW, &oldtty);
319 #endif
320 }
321 
322 void term_exit(void)
323 {
324  av_log(NULL, AV_LOG_QUIET, "%s", "");
326 }
327 
328 static volatile int received_sigterm = 0;
329 static volatile int received_nb_signals = 0;
331 static volatile int ffmpeg_exited = 0;
332 static int main_return_code = 0;
333 
334 static void
336 {
337  int ret;
338  received_sigterm = sig;
341  if(received_nb_signals > 3) {
342  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
343  strlen("Received > 3 system signals, hard exiting\n"));
344  if (ret < 0) { /* Do nothing */ };
345  exit(123);
346  }
347 }
348 
349 #if HAVE_SETCONSOLECTRLHANDLER
350 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
351 {
352  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
353 
354  switch (fdwCtrlType)
355  {
356  case CTRL_C_EVENT:
357  case CTRL_BREAK_EVENT:
358  sigterm_handler(SIGINT);
359  return TRUE;
360 
361  case CTRL_CLOSE_EVENT:
362  case CTRL_LOGOFF_EVENT:
363  case CTRL_SHUTDOWN_EVENT:
364  sigterm_handler(SIGTERM);
365  /* Basically, with these 3 events, when we return from this method the
366  process is hard terminated, so stall as long as we need to
367  to try and let the main thread(s) clean up and gracefully terminate
368  (we have at most 5 seconds, but should be done far before that). */
369  while (!ffmpeg_exited) {
370  Sleep(0);
371  }
372  return TRUE;
373 
374  default:
375  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
376  return FALSE;
377  }
378 }
379 #endif
380 
381 void term_init(void)
382 {
383 #if HAVE_TERMIOS_H
385  struct termios tty;
386  if (tcgetattr (0, &tty) == 0) {
387  oldtty = tty;
388  restore_tty = 1;
389 
390  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
391  |INLCR|IGNCR|ICRNL|IXON);
392  tty.c_oflag |= OPOST;
393  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
394  tty.c_cflag &= ~(CSIZE|PARENB);
395  tty.c_cflag |= CS8;
396  tty.c_cc[VMIN] = 1;
397  tty.c_cc[VTIME] = 0;
398 
399  tcsetattr (0, TCSANOW, &tty);
400  }
401  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
402  }
403 #endif
404 
405  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
406  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
407 #ifdef SIGXCPU
408  signal(SIGXCPU, sigterm_handler);
409 #endif
410 #ifdef SIGPIPE
411  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
412 #endif
413 #if HAVE_SETCONSOLECTRLHANDLER
414  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
415 #endif
416 }
417 
418 /* read a key without blocking */
419 static int read_key(void)
420 {
421  unsigned char ch;
422 #if HAVE_TERMIOS_H
423  int n = 1;
424  struct timeval tv;
425  fd_set rfds;
426 
427  FD_ZERO(&rfds);
428  FD_SET(0, &rfds);
429  tv.tv_sec = 0;
430  tv.tv_usec = 0;
431  n = select(1, &rfds, NULL, NULL, &tv);
432  if (n > 0) {
433  n = read(0, &ch, 1);
434  if (n == 1)
435  return ch;
436 
437  return n;
438  }
439 #elif HAVE_KBHIT
440 # if HAVE_PEEKNAMEDPIPE
441  static int is_pipe;
442  static HANDLE input_handle;
443  DWORD dw, nchars;
444  if(!input_handle){
445  input_handle = GetStdHandle(STD_INPUT_HANDLE);
446  is_pipe = !GetConsoleMode(input_handle, &dw);
447  }
448 
449  if (is_pipe) {
450  /* When running under a GUI, you will end here. */
451  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
452  // input pipe may have been closed by the program that ran ffmpeg
453  return -1;
454  }
455  //Read it
456  if(nchars != 0) {
457  read(0, &ch, 1);
458  return ch;
459  }else{
460  return -1;
461  }
462  }
463 # endif
464  if(kbhit())
465  return(getch());
466 #endif
467  return -1;
468 }
469 
470 static int decode_interrupt_cb(void *ctx)
471 {
473 }
474 
476 
477 static void ffmpeg_cleanup(int ret)
478 {
479  int i, j;
480 
481  if (do_benchmark) {
482  int maxrss = getmaxrss() / 1024;
483  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
484  }
485 
486  for (i = 0; i < nb_filtergraphs; i++) {
487  FilterGraph *fg = filtergraphs[i];
489  for (j = 0; j < fg->nb_inputs; j++) {
490  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
491  AVFrame *frame;
492  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
493  sizeof(frame), NULL);
494  av_frame_free(&frame);
495  }
496  av_fifo_freep(&fg->inputs[j]->frame_queue);
497  if (fg->inputs[j]->ist->sub2video.sub_queue) {
498  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
499  AVSubtitle sub;
501  &sub, sizeof(sub), NULL);
502  avsubtitle_free(&sub);
503  }
505  }
507  av_freep(&fg->inputs[j]->name);
508  av_freep(&fg->inputs[j]);
509  }
510  av_freep(&fg->inputs);
511  for (j = 0; j < fg->nb_outputs; j++) {
512  av_freep(&fg->outputs[j]->name);
513  av_freep(&fg->outputs[j]->formats);
514  av_freep(&fg->outputs[j]->channel_layouts);
515  av_freep(&fg->outputs[j]->sample_rates);
516  av_freep(&fg->outputs[j]);
517  }
518  av_freep(&fg->outputs);
519  av_freep(&fg->graph_desc);
520 
521  av_freep(&filtergraphs[i]);
522  }
523  av_freep(&filtergraphs);
524 
526 
527  /* close files */
528  for (i = 0; i < nb_output_files; i++) {
529  OutputFile *of = output_files[i];
531  if (!of)
532  continue;
533  s = of->ctx;
534  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
535  avio_closep(&s->pb);
537  av_dict_free(&of->opts);
538 
539  av_freep(&output_files[i]);
540  }
541  for (i = 0; i < nb_output_streams; i++) {
542  OutputStream *ost = output_streams[i];
543 
544  if (!ost)
545  continue;
546 
547  for (j = 0; j < ost->nb_bitstream_filters; j++)
548  av_bsf_free(&ost->bsf_ctx[j]);
549  av_freep(&ost->bsf_ctx);
550 
552  av_frame_free(&ost->last_frame);
553  av_dict_free(&ost->encoder_opts);
554 
555  av_freep(&ost->forced_keyframes);
557  av_freep(&ost->avfilter);
558  av_freep(&ost->logfile_prefix);
559 
561  ost->audio_channels_mapped = 0;
562 
563  av_dict_free(&ost->sws_dict);
564 
567 
568  if (ost->muxing_queue) {
569  while (av_fifo_size(ost->muxing_queue)) {
570  AVPacket pkt;
571  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
572  av_packet_unref(&pkt);
573  }
575  }
576 
577  av_freep(&output_streams[i]);
578  }
579 #if HAVE_THREADS
580  free_input_threads();
581 #endif
582  for (i = 0; i < nb_input_files; i++) {
583  avformat_close_input(&input_files[i]->ctx);
584  av_freep(&input_files[i]);
585  }
586  for (i = 0; i < nb_input_streams; i++) {
587  InputStream *ist = input_streams[i];
588 
591  av_dict_free(&ist->decoder_opts);
594  av_freep(&ist->filters);
595  av_freep(&ist->hwaccel_device);
596  av_freep(&ist->dts_buffer);
597 
599 
600  av_freep(&input_streams[i]);
601  }
602 
603  if (vstats_file) {
604  if (fclose(vstats_file))
606  "Error closing vstats file, loss of information possible: %s\n",
607  av_err2str(AVERROR(errno)));
608  }
610 
611  av_freep(&input_streams);
612  av_freep(&input_files);
613  av_freep(&output_streams);
614  av_freep(&output_files);
615 
616  uninit_opts();
617 
619 
620  if (received_sigterm) {
621  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
622  (int) received_sigterm);
623  } else if (ret && atomic_load(&transcode_init_done)) {
624  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
625  }
626  term_exit();
627  ffmpeg_exited = 1;
628 }
629 
631 {
632  AVDictionaryEntry *t = NULL;
633 
634  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
636  }
637 }
638 
640 {
642  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
643  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
644  exit_program(1);
645  }
646 }
647 
648 static void abort_codec_experimental(AVCodec *c, int encoder)
649 {
650  exit_program(1);
651 }
652 
653 static void update_benchmark(const char *fmt, ...)
654 {
655  if (do_benchmark_all) {
656  int64_t t = getutime();
657  va_list va;
658  char buf[1024];
659 
660  if (fmt) {
661  va_start(va, fmt);
662  vsnprintf(buf, sizeof(buf), fmt, va);
663  va_end(va);
664  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
665  }
666  current_time = t;
667  }
668 }
669 
670 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
671 {
672  int i;
673  for (i = 0; i < nb_output_streams; i++) {
674  OutputStream *ost2 = output_streams[i];
675  ost2->finished |= ost == ost2 ? this_stream : others;
676  }
677 }
678 
679 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
680 {
681  AVFormatContext *s = of->ctx;
682  AVStream *st = ost->st;
683  int ret;
684 
685  /*
686  * Audio encoders may split the packets -- #frames in != #packets out.
687  * But there is no reordering, so we can limit the number of output packets
688  * by simply dropping them here.
689  * Counting encoded video frames needs to be done separately because of
690  * reordering, see do_video_out().
691  * Do not count the packet when unqueued because it has been counted when queued.
692  */
693  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
694  if (ost->frame_number >= ost->max_frames) {
695  av_packet_unref(pkt);
696  return;
697  }
698  ost->frame_number++;
699  }
700 
701  if (!of->header_written) {
702  AVPacket tmp_pkt = {0};
703  /* the muxer is not initialized yet, buffer the packet */
704  if (!av_fifo_space(ost->muxing_queue)) {
705  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
706  ost->max_muxing_queue_size);
707  if (new_size <= av_fifo_size(ost->muxing_queue)) {
709  "Too many packets buffered for output stream %d:%d.\n",
710  ost->file_index, ost->st->index);
711  exit_program(1);
712  }
713  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
714  if (ret < 0)
715  exit_program(1);
716  }
717  ret = av_packet_ref(&tmp_pkt, pkt);
718  if (ret < 0)
719  exit_program(1);
720  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
721  av_packet_unref(pkt);
722  return;
723  }
724 
727  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
728 
729  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
730  int i;
732  NULL);
733  ost->quality = sd ? AV_RL32(sd) : -1;
734  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
735 
736  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
737  if (sd && i < sd[5])
738  ost->error[i] = AV_RL64(sd + 8 + 8*i);
739  else
740  ost->error[i] = -1;
741  }
742 
743  if (ost->frame_rate.num && ost->is_cfr) {
744  if (pkt->duration > 0)
745  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
746  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
747  ost->mux_timebase);
748  }
749  }
750 
751  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
752 
753  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
754  if (pkt->dts != AV_NOPTS_VALUE &&
755  pkt->pts != AV_NOPTS_VALUE &&
756  pkt->dts > pkt->pts) {
757  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
758  pkt->dts, pkt->pts,
759  ost->file_index, ost->st->index);
760  pkt->pts =
761  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
762  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
763  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
764  }
766  pkt->dts != AV_NOPTS_VALUE &&
767  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
768  ost->last_mux_dts != AV_NOPTS_VALUE) {
769  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
770  if (pkt->dts < max) {
771  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
772  av_log(s, loglevel, "Non-monotonous DTS in output stream "
773  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
774  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
775  if (exit_on_error) {
776  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
777  exit_program(1);
778  }
779  av_log(s, loglevel, "changing to %"PRId64". This may result "
780  "in incorrect timestamps in the output file.\n",
781  max);
782  if (pkt->pts >= pkt->dts)
783  pkt->pts = FFMAX(pkt->pts, max);
784  pkt->dts = max;
785  }
786  }
787  }
788  ost->last_mux_dts = pkt->dts;
789 
790  ost->data_size += pkt->size;
791  ost->packets_written++;
792 
793  pkt->stream_index = ost->index;
794 
795  if (debug_ts) {
796  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
797  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
799  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
800  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
801  pkt->size
802  );
803  }
804 
805  ret = av_interleaved_write_frame(s, pkt);
806  if (ret < 0) {
807  print_error("av_interleaved_write_frame()", ret);
808  main_return_code = 1;
810  }
811  av_packet_unref(pkt);
812 }
813 
815 {
816  OutputFile *of = output_files[ost->file_index];
817 
818  ost->finished |= ENCODER_FINISHED;
819  if (of->shortest) {
820  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
821  of->recording_time = FFMIN(of->recording_time, end);
822  }
823 }
824 
825 /*
826  * Send a single packet to the output, applying any bitstream filters
827  * associated with the output stream. This may result in any number
828  * of packets actually being written, depending on what bitstream
829  * filters are applied. The supplied packet is consumed and will be
830  * blank (as if newly-allocated) when this function returns.
831  *
832  * If eof is set, instead indicate EOF to all bitstream filters and
833  * therefore flush any delayed packets to the output. A blank packet
834  * must be supplied in this case.
835  */
837  OutputStream *ost, int eof)
838 {
839  int ret = 0;
840 
841  /* apply the output bitstream filters, if any */
842  if (ost->nb_bitstream_filters) {
843  int idx;
844 
845  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
846  if (ret < 0)
847  goto finish;
848 
849  eof = 0;
850  idx = 1;
851  while (idx) {
852  /* get a packet from the previous filter up the chain */
853  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
854  if (ret == AVERROR(EAGAIN)) {
855  ret = 0;
856  idx--;
857  continue;
858  } else if (ret == AVERROR_EOF) {
859  eof = 1;
860  } else if (ret < 0)
861  goto finish;
862 
863  /* send it to the next filter down the chain or to the muxer */
864  if (idx < ost->nb_bitstream_filters) {
865  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
866  if (ret < 0)
867  goto finish;
868  idx++;
869  eof = 0;
870  } else if (eof)
871  goto finish;
872  else
873  write_packet(of, pkt, ost, 0);
874  }
875  } else if (!eof)
876  write_packet(of, pkt, ost, 0);
877 
878 finish:
879  if (ret < 0 && ret != AVERROR_EOF) {
880  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
881  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
882  if(exit_on_error)
883  exit_program(1);
884  }
885 }
886 
888 {
889  OutputFile *of = output_files[ost->file_index];
890 
891  if (of->recording_time != INT64_MAX &&
893  AV_TIME_BASE_Q) >= 0) {
894  close_output_stream(ost);
895  return 0;
896  }
897  return 1;
898 }
899 
900 static void do_audio_out(OutputFile *of, OutputStream *ost,
901  AVFrame *frame)
902 {
903  AVCodecContext *enc = ost->enc_ctx;
904  AVPacket pkt;
905  int ret;
906 
907  av_init_packet(&pkt);
908  pkt.data = NULL;
909  pkt.size = 0;
910 
911  if (!check_recording_time(ost))
912  return;
913 
914  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
915  frame->pts = ost->sync_opts;
916  ost->sync_opts = frame->pts + frame->nb_samples;
917  ost->samples_encoded += frame->nb_samples;
918  ost->frames_encoded++;
919 
920  av_assert0(pkt.size || !pkt.data);
922  if (debug_ts) {
923  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
924  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
925  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
926  enc->time_base.num, enc->time_base.den);
927  }
928 
929  ret = avcodec_send_frame(enc, frame);
930  if (ret < 0)
931  goto error;
932 
933  while (1) {
934  ret = avcodec_receive_packet(enc, &pkt);
935  if (ret == AVERROR(EAGAIN))
936  break;
937  if (ret < 0)
938  goto error;
939 
940  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
941 
942  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
943 
944  if (debug_ts) {
945  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
946  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
947  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
948  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
949  }
950 
951  output_packet(of, &pkt, ost, 0);
952  }
953 
954  return;
955 error:
956  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
957  exit_program(1);
958 }
959 
960 static void do_subtitle_out(OutputFile *of,
961  OutputStream *ost,
962  AVSubtitle *sub)
963 {
964  int subtitle_out_max_size = 1024 * 1024;
965  int subtitle_out_size, nb, i;
966  AVCodecContext *enc;
967  AVPacket pkt;
968  int64_t pts;
969 
970  if (sub->pts == AV_NOPTS_VALUE) {
971  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972  if (exit_on_error)
973  exit_program(1);
974  return;
975  }
976 
977  enc = ost->enc_ctx;
978 
979  if (!subtitle_out) {
980  subtitle_out = av_malloc(subtitle_out_max_size);
981  if (!subtitle_out) {
982  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
983  exit_program(1);
984  }
985  }
986 
987  /* Note: DVB subtitle need one packet to draw them and one other
988  packet to clear them */
989  /* XXX: signal it in the codec context ? */
991  nb = 2;
992  else
993  nb = 1;
994 
995  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
996  pts = sub->pts;
997  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
998  pts -= output_files[ost->file_index]->start_time;
999  for (i = 0; i < nb; i++) {
1000  unsigned save_num_rects = sub->num_rects;
1001 
1002  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1003  if (!check_recording_time(ost))
1004  return;
1005 
1006  sub->pts = pts;
1007  // start_display_time is required to be 0
1008  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1009  sub->end_display_time -= sub->start_display_time;
1010  sub->start_display_time = 0;
1011  if (i == 1)
1012  sub->num_rects = 0;
1013 
1014  ost->frames_encoded++;
1015 
1016  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1017  subtitle_out_max_size, sub);
1018  if (i == 1)
1019  sub->num_rects = save_num_rects;
1020  if (subtitle_out_size < 0) {
1021  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1022  exit_program(1);
1023  }
1024 
1025  av_init_packet(&pkt);
1026  pkt.data = subtitle_out;
1027  pkt.size = subtitle_out_size;
1028  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1029  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1031  /* XXX: the pts correction is handled here. Maybe handling
1032  it in the codec would be better */
1033  if (i == 0)
1034  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1035  else
1036  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1037  }
1038  pkt.dts = pkt.pts;
1039  output_packet(of, &pkt, ost, 0);
1040  }
1041 }
1042 
1043 static void do_video_out(OutputFile *of,
1044  OutputStream *ost,
1045  AVFrame *next_picture,
1046  double sync_ipts)
1047 {
1048  int ret, format_video_sync;
1049  AVPacket pkt;
1050  AVCodecContext *enc = ost->enc_ctx;
1051  AVCodecParameters *mux_par = ost->st->codecpar;
1052  AVRational frame_rate;
1053  int nb_frames, nb0_frames, i;
1054  double delta, delta0;
1055  double duration = 0;
1056  int frame_size = 0;
1057  InputStream *ist = NULL;
1059 
1060  if (ost->source_index >= 0)
1061  ist = input_streams[ost->source_index];
1062 
1063  frame_rate = av_buffersink_get_frame_rate(filter);
1064  if (frame_rate.num > 0 && frame_rate.den > 0)
1065  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1066 
1067  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1068  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1069 
1070  if (!ost->filters_script &&
1071  !ost->filters &&
1072  next_picture &&
1073  ist &&
1074  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1075  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1076  }
1077 
1078  if (!next_picture) {
1079  //end, flushing
1080  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1081  ost->last_nb0_frames[1],
1082  ost->last_nb0_frames[2]);
1083  } else {
1084  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1085  delta = delta0 + duration;
1086 
1087  /* by default, we output a single frame */
1088  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1089  nb_frames = 1;
1090 
1091  format_video_sync = video_sync_method;
1092  if (format_video_sync == VSYNC_AUTO) {
1093  if(!strcmp(of->ctx->oformat->name, "avi")) {
1094  format_video_sync = VSYNC_VFR;
1095  } else
1096  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1097  if ( ist
1098  && format_video_sync == VSYNC_CFR
1099  && input_files[ist->file_index]->ctx->nb_streams == 1
1100  && input_files[ist->file_index]->input_ts_offset == 0) {
1101  format_video_sync = VSYNC_VSCFR;
1102  }
1103  if (format_video_sync == VSYNC_CFR && copy_ts) {
1104  format_video_sync = VSYNC_VSCFR;
1105  }
1106  }
1107  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1108 
1109  if (delta0 < 0 &&
1110  delta > 0 &&
1111  format_video_sync != VSYNC_PASSTHROUGH &&
1112  format_video_sync != VSYNC_DROP) {
1113  if (delta0 < -0.6) {
1114  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1115  } else
1116  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1117  sync_ipts = ost->sync_opts;
1118  duration += delta0;
1119  delta0 = 0;
1120  }
1121 
1122  switch (format_video_sync) {
1123  case VSYNC_VSCFR:
1124  if (ost->frame_number == 0 && delta0 >= 0.5) {
1125  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1126  delta = duration;
1127  delta0 = 0;
1128  ost->sync_opts = lrint(sync_ipts);
1129  }
1130  case VSYNC_CFR:
1131  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1132  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1133  nb_frames = 0;
1134  } else if (delta < -1.1)
1135  nb_frames = 0;
1136  else if (delta > 1.1) {
1137  nb_frames = lrintf(delta);
1138  if (delta0 > 1.1)
1139  nb0_frames = lrintf(delta0 - 0.6);
1140  }
1141  break;
1142  case VSYNC_VFR:
1143  if (delta <= -0.6)
1144  nb_frames = 0;
1145  else if (delta > 0.6)
1146  ost->sync_opts = lrint(sync_ipts);
1147  break;
1148  case VSYNC_DROP:
1149  case VSYNC_PASSTHROUGH:
1150  ost->sync_opts = lrint(sync_ipts);
1151  break;
1152  default:
1153  av_assert0(0);
1154  }
1155  }
1156 
1157  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1158  nb0_frames = FFMIN(nb0_frames, nb_frames);
1159 
1160  memmove(ost->last_nb0_frames + 1,
1161  ost->last_nb0_frames,
1162  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1163  ost->last_nb0_frames[0] = nb0_frames;
1164 
1165  if (nb0_frames == 0 && ost->last_dropped) {
1166  nb_frames_drop++;
1168  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1169  ost->frame_number, ost->st->index, ost->last_frame->pts);
1170  }
1171  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1172  if (nb_frames > dts_error_threshold * 30) {
1173  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1174  nb_frames_drop++;
1175  return;
1176  }
1177  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1178  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1179  if (nb_frames_dup > dup_warning) {
1180  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1181  dup_warning *= 10;
1182  }
1183  }
1184  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1185 
1186  /* duplicates frame if needed */
1187  for (i = 0; i < nb_frames; i++) {
1188  AVFrame *in_picture;
1189  av_init_packet(&pkt);
1190  pkt.data = NULL;
1191  pkt.size = 0;
1192 
1193  if (i < nb0_frames && ost->last_frame) {
1194  in_picture = ost->last_frame;
1195  } else
1196  in_picture = next_picture;
1197 
1198  if (!in_picture)
1199  return;
1200 
1201  in_picture->pts = ost->sync_opts;
1202 
1203 #if 1
1204  if (!check_recording_time(ost))
1205 #else
1206  if (ost->frame_number >= ost->max_frames)
1207 #endif
1208  return;
1209 
1210  {
1211  int forced_keyframe = 0;
1212  double pts_time;
1213 
1215  ost->top_field_first >= 0)
1216  in_picture->top_field_first = !!ost->top_field_first;
1217 
1218  if (in_picture->interlaced_frame) {
1219  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1220  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1221  else
1222  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1223  } else
1224  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1225 
1226  in_picture->quality = enc->global_quality;
1227  in_picture->pict_type = 0;
1228 
1229  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1230  in_picture->pts * av_q2d(enc->time_base) : NAN;
1231  if (ost->forced_kf_index < ost->forced_kf_count &&
1232  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1233  ost->forced_kf_index++;
1234  forced_keyframe = 1;
1235  } else if (ost->forced_keyframes_pexpr) {
1236  double res;
1237  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1240  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1246  res);
1247  if (res) {
1248  forced_keyframe = 1;
1254  }
1255 
1257  } else if ( ost->forced_keyframes
1258  && !strncmp(ost->forced_keyframes, "source", 6)
1259  && in_picture->key_frame==1) {
1260  forced_keyframe = 1;
1261  }
1262 
1263  if (forced_keyframe) {
1264  in_picture->pict_type = AV_PICTURE_TYPE_I;
1265  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1266  }
1267 
1269  if (debug_ts) {
1270  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1271  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1272  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1273  enc->time_base.num, enc->time_base.den);
1274  }
1275 
1276  ost->frames_encoded++;
1277 
1278  ret = avcodec_send_frame(enc, in_picture);
1279  if (ret < 0)
1280  goto error;
1281 
1282  while (1) {
1283  ret = avcodec_receive_packet(enc, &pkt);
1284  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1285  if (ret == AVERROR(EAGAIN))
1286  break;
1287  if (ret < 0)
1288  goto error;
1289 
1290  if (debug_ts) {
1291  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1292  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1293  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1294  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1295  }
1296 
1297  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1298  pkt.pts = ost->sync_opts;
1299 
1300  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1301 
1302  if (debug_ts) {
1303  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1304  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1305  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1306  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1307  }
1308 
1309  frame_size = pkt.size;
1310  output_packet(of, &pkt, ost, 0);
1311 
1312  /* if two pass, output log */
1313  if (ost->logfile && enc->stats_out) {
1314  fprintf(ost->logfile, "%s", enc->stats_out);
1315  }
1316  }
1317  }
1318  ost->sync_opts++;
1319  /*
1320  * For video, number of frames in == number of packets out.
1321  * But there may be reordering, so we can't throw away frames on encoder
1322  * flush, we need to limit them here, before they go into encoder.
1323  */
1324  ost->frame_number++;
1325 
1326  if (vstats_filename && frame_size)
1327  do_video_stats(ost, frame_size);
1328  }
1329 
1330  if (!ost->last_frame)
1331  ost->last_frame = av_frame_alloc();
1332  av_frame_unref(ost->last_frame);
1333  if (next_picture && ost->last_frame)
1334  av_frame_ref(ost->last_frame, next_picture);
1335  else
1336  av_frame_free(&ost->last_frame);
1337 
1338  return;
1339 error:
1340  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1341  exit_program(1);
1342 }
1343 
1344 static double psnr(double d)
1345 {
1346  return -10.0 * log10(d);
1347 }
1348 
1350 {
1351  AVCodecContext *enc;
1352  int frame_number;
1353  double ti1, bitrate, avg_bitrate;
1354 
1355  /* this is executed just the first time do_video_stats is called */
1356  if (!vstats_file) {
1357  vstats_file = fopen(vstats_filename, "w");
1358  if (!vstats_file) {
1359  perror("fopen");
1360  exit_program(1);
1361  }
1362  }
1363 
1364  enc = ost->enc_ctx;
1365  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1366  frame_number = ost->st->nb_frames;
1367  if (vstats_version <= 1) {
1368  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1369  ost->quality / (float)FF_QP2LAMBDA);
1370  } else {
1371  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1372  ost->quality / (float)FF_QP2LAMBDA);
1373  }
1374 
1375  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1376  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1377 
1378  fprintf(vstats_file,"f_size= %6d ", frame_size);
1379  /* compute pts value */
1380  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1381  if (ti1 < 0.01)
1382  ti1 = 0.01;
1383 
1384  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1385  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1386  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1387  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1388  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1389  }
1390 }
1391 
1392 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1393 
1395 {
1396  OutputFile *of = output_files[ost->file_index];
1397  int i;
1398 
1400 
1401  if (of->shortest) {
1402  for (i = 0; i < of->ctx->nb_streams; i++)
1403  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1404  }
1405 }
1406 
1407 /**
1408  * Get and encode new output from any of the filtergraphs, without causing
1409  * activity.
1410  *
1411  * @return 0 for success, <0 for severe errors
1412  */
1413 static int reap_filters(int flush)
1414 {
1415  AVFrame *filtered_frame = NULL;
1416  int i;
1417 
1418  /* Reap all buffers present in the buffer sinks */
1419  for (i = 0; i < nb_output_streams; i++) {
1420  OutputStream *ost = output_streams[i];
1421  OutputFile *of = output_files[ost->file_index];
1423  AVCodecContext *enc = ost->enc_ctx;
1424  int ret = 0;
1425 
1426  if (!ost->filter || !ost->filter->graph->graph)
1427  continue;
1428  filter = ost->filter->filter;
1429 
1430  if (!ost->initialized) {
1431  char error[1024] = "";
1432  ret = init_output_stream(ost, error, sizeof(error));
1433  if (ret < 0) {
1434  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1435  ost->file_index, ost->index, error);
1436  exit_program(1);
1437  }
1438  }
1439 
1440  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1441  return AVERROR(ENOMEM);
1442  }
1443  filtered_frame = ost->filtered_frame;
1444 
1445  while (1) {
1446  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1447  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1449  if (ret < 0) {
1450  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1452  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1453  } else if (flush && ret == AVERROR_EOF) {
1455  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1456  }
1457  break;
1458  }
1459  if (ost->finished) {
1460  av_frame_unref(filtered_frame);
1461  continue;
1462  }
1463  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1464  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1465  AVRational filter_tb = av_buffersink_get_time_base(filter);
1466  AVRational tb = enc->time_base;
1467  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1468 
1469  tb.den <<= extra_bits;
1470  float_pts =
1471  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1472  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1473  float_pts /= 1 << extra_bits;
1474  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1475  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1476 
1477  filtered_frame->pts =
1478  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1479  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1480  }
1481  //if (ost->source_index >= 0)
1482  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1483 
1484  switch (av_buffersink_get_type(filter)) {
1485  case AVMEDIA_TYPE_VIDEO:
1486  if (!ost->frame_aspect_ratio.num)
1487  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1488 
1489  if (debug_ts) {
1490  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1491  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1492  float_pts,
1493  enc->time_base.num, enc->time_base.den);
1494  }
1495 
1496  do_video_out(of, ost, filtered_frame, float_pts);
1497  break;
1498  case AVMEDIA_TYPE_AUDIO:
1499  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1500  enc->channels != filtered_frame->channels) {
1502  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1503  break;
1504  }
1505  do_audio_out(of, ost, filtered_frame);
1506  break;
1507  default:
1508  // TODO support subtitle filters
1509  av_assert0(0);
1510  }
1511 
1512  av_frame_unref(filtered_frame);
1513  }
1514  }
1515 
1516  return 0;
1517 }
1518 
1519 static void print_final_stats(int64_t total_size)
1520 {
1521  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1522  uint64_t subtitle_size = 0;
1523  uint64_t data_size = 0;
1524  float percent = -1.0;
1525  int i, j;
1526  int pass1_used = 1;
1527 
1528  for (i = 0; i < nb_output_streams; i++) {
1529  OutputStream *ost = output_streams[i];
1530  switch (ost->enc_ctx->codec_type) {
1531  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1532  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1533  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1534  default: other_size += ost->data_size; break;
1535  }
1536  extra_size += ost->enc_ctx->extradata_size;
1537  data_size += ost->data_size;
1540  pass1_used = 0;
1541  }
1542 
1543  if (data_size && total_size>0 && total_size >= data_size)
1544  percent = 100.0 * (total_size - data_size) / data_size;
1545 
1546  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1547  video_size / 1024.0,
1548  audio_size / 1024.0,
1549  subtitle_size / 1024.0,
1550  other_size / 1024.0,
1551  extra_size / 1024.0);
1552  if (percent >= 0.0)
1553  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1554  else
1555  av_log(NULL, AV_LOG_INFO, "unknown");
1556  av_log(NULL, AV_LOG_INFO, "\n");
1557 
1558  /* print verbose per-stream stats */
1559  for (i = 0; i < nb_input_files; i++) {
1560  InputFile *f = input_files[i];
1561  uint64_t total_packets = 0, total_size = 0;
1562 
1563  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1564  i, f->ctx->url);
1565 
1566  for (j = 0; j < f->nb_streams; j++) {
1567  InputStream *ist = input_streams[f->ist_index + j];
1568  enum AVMediaType type = ist->dec_ctx->codec_type;
1569 
1570  total_size += ist->data_size;
1571  total_packets += ist->nb_packets;
1572 
1573  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1574  i, j, media_type_string(type));
1575  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1576  ist->nb_packets, ist->data_size);
1577 
1578  if (ist->decoding_needed) {
1579  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1580  ist->frames_decoded);
1581  if (type == AVMEDIA_TYPE_AUDIO)
1582  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1583  av_log(NULL, AV_LOG_VERBOSE, "; ");
1584  }
1585 
1586  av_log(NULL, AV_LOG_VERBOSE, "\n");
1587  }
1588 
1589  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1590  total_packets, total_size);
1591  }
1592 
1593  for (i = 0; i < nb_output_files; i++) {
1594  OutputFile *of = output_files[i];
1595  uint64_t total_packets = 0, total_size = 0;
1596 
1597  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1598  i, of->ctx->url);
1599 
1600  for (j = 0; j < of->ctx->nb_streams; j++) {
1601  OutputStream *ost = output_streams[of->ost_index + j];
1602  enum AVMediaType type = ost->enc_ctx->codec_type;
1603 
1604  total_size += ost->data_size;
1605  total_packets += ost->packets_written;
1606 
1607  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1608  i, j, media_type_string(type));
1609  if (ost->encoding_needed) {
1610  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1611  ost->frames_encoded);
1612  if (type == AVMEDIA_TYPE_AUDIO)
1613  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1614  av_log(NULL, AV_LOG_VERBOSE, "; ");
1615  }
1616 
1617  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1618  ost->packets_written, ost->data_size);
1619 
1620  av_log(NULL, AV_LOG_VERBOSE, "\n");
1621  }
1622 
1623  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1624  total_packets, total_size);
1625  }
1626  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1627  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1628  if (pass1_used) {
1629  av_log(NULL, AV_LOG_WARNING, "\n");
1630  } else {
1631  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1632  }
1633  }
1634 }
1635 
1636 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1637 {
1638  AVBPrint buf, buf_script;
1639  OutputStream *ost;
1640  AVFormatContext *oc;
1641  int64_t total_size;
1642  AVCodecContext *enc;
1643  int frame_number, vid, i;
1644  double bitrate;
1645  double speed;
1646  int64_t pts = INT64_MIN + 1;
1647  static int64_t last_time = -1;
1648  static int qp_histogram[52];
1649  int hours, mins, secs, us;
1650  const char *hours_sign;
1651  int ret;
1652  float t;
1653 
1654  if (!print_stats && !is_last_report && !progress_avio)
1655  return;
1656 
1657  if (!is_last_report) {
1658  if (last_time == -1) {
1659  last_time = cur_time;
1660  return;
1661  }
1662  if ((cur_time - last_time) < 500000)
1663  return;
1664  last_time = cur_time;
1665  }
1666 
1667  t = (cur_time-timer_start) / 1000000.0;
1668 
1669 
1670  oc = output_files[0]->ctx;
1671 
1672  total_size = avio_size(oc->pb);
1673  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1674  total_size = avio_tell(oc->pb);
1675 
1676  vid = 0;
1678  av_bprint_init(&buf_script, 0, 1);
1679  for (i = 0; i < nb_output_streams; i++) {
1680  float q = -1;
1681  ost = output_streams[i];
1682  enc = ost->enc_ctx;
1683  if (!ost->stream_copy)
1684  q = ost->quality / (float) FF_QP2LAMBDA;
1685 
1686  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1687  av_bprintf(&buf, "q=%2.1f ", q);
1688  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1689  ost->file_index, ost->index, q);
1690  }
1691  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1692  float fps;
1693 
1694  frame_number = ost->frame_number;
1695  fps = t > 1 ? frame_number / t : 0;
1696  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1697  frame_number, fps < 9.95, fps, q);
1698  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1699  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1700  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1701  ost->file_index, ost->index, q);
1702  if (is_last_report)
1703  av_bprintf(&buf, "L");
1704  if (qp_hist) {
1705  int j;
1706  int qp = lrintf(q);
1707  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1708  qp_histogram[qp]++;
1709  for (j = 0; j < 32; j++)
1710  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1711  }
1712 
1713  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1714  int j;
1715  double error, error_sum = 0;
1716  double scale, scale_sum = 0;
1717  double p;
1718  char type[3] = { 'Y','U','V' };
1719  av_bprintf(&buf, "PSNR=");
1720  for (j = 0; j < 3; j++) {
1721  if (is_last_report) {
1722  error = enc->error[j];
1723  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1724  } else {
1725  error = ost->error[j];
1726  scale = enc->width * enc->height * 255.0 * 255.0;
1727  }
1728  if (j)
1729  scale /= 4;
1730  error_sum += error;
1731  scale_sum += scale;
1732  p = psnr(error / scale);
1733  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1734  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1735  ost->file_index, ost->index, type[j] | 32, p);
1736  }
1737  p = psnr(error_sum / scale_sum);
1738  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1739  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1740  ost->file_index, ost->index, p);
1741  }
1742  vid = 1;
1743  }
1744  /* compute min output value */
1746  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1747  ost->st->time_base, AV_TIME_BASE_Q));
1748  if (is_last_report)
1749  nb_frames_drop += ost->last_dropped;
1750  }
1751 
1752  secs = FFABS(pts) / AV_TIME_BASE;
1753  us = FFABS(pts) % AV_TIME_BASE;
1754  mins = secs / 60;
1755  secs %= 60;
1756  hours = mins / 60;
1757  mins %= 60;
1758  hours_sign = (pts < 0) ? "-" : "";
1759 
1760  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1761  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1762 
1763  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1764  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1765  if (pts == AV_NOPTS_VALUE) {
1766  av_bprintf(&buf, "N/A ");
1767  } else {
1768  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1769  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1770  }
1771 
1772  if (bitrate < 0) {
1773  av_bprintf(&buf, "bitrate=N/A");
1774  av_bprintf(&buf_script, "bitrate=N/A\n");
1775  }else{
1776  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1777  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1778  }
1779 
1780  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1781  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1782  if (pts == AV_NOPTS_VALUE) {
1783  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1784  av_bprintf(&buf_script, "out_time=N/A\n");
1785  } else {
1786  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1787  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1788  hours_sign, hours, mins, secs, us);
1789  }
1790 
1792  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1793  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1794  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1795 
1796  if (speed < 0) {
1797  av_bprintf(&buf, " speed=N/A");
1798  av_bprintf(&buf_script, "speed=N/A\n");
1799  } else {
1800  av_bprintf(&buf, " speed=%4.3gx", speed);
1801  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1802  }
1803 
1804  if (print_stats || is_last_report) {
1805  const char end = is_last_report ? '\n' : '\r';
1806  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1807  fprintf(stderr, "%s %c", buf.str, end);
1808  } else
1809  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1810 
1811  fflush(stderr);
1812  }
1813  av_bprint_finalize(&buf, NULL);
1814 
1815  if (progress_avio) {
1816  av_bprintf(&buf_script, "progress=%s\n",
1817  is_last_report ? "end" : "continue");
1818  avio_write(progress_avio, buf_script.str,
1819  FFMIN(buf_script.len, buf_script.size - 1));
1820  avio_flush(progress_avio);
1821  av_bprint_finalize(&buf_script, NULL);
1822  if (is_last_report) {
1823  if ((ret = avio_closep(&progress_avio)) < 0)
1825  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1826  }
1827  }
1828 
1829  if (is_last_report)
1830  print_final_stats(total_size);
1831 }
1832 
1834 {
1835  // We never got any input. Set a fake format, which will
1836  // come from libavformat.
1837  ifilter->format = par->format;
1838  ifilter->sample_rate = par->sample_rate;
1839  ifilter->channels = par->channels;
1840  ifilter->channel_layout = par->channel_layout;
1841  ifilter->width = par->width;
1842  ifilter->height = par->height;
1843  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1844 }
1845 
1846 static void flush_encoders(void)
1847 {
1848  int i, ret;
1849 
1850  for (i = 0; i < nb_output_streams; i++) {
1851  OutputStream *ost = output_streams[i];
1852  AVCodecContext *enc = ost->enc_ctx;
1853  OutputFile *of = output_files[ost->file_index];
1854 
1855  if (!ost->encoding_needed)
1856  continue;
1857 
1858  // Try to enable encoding with no input frames.
1859  // Maybe we should just let encoding fail instead.
1860  if (!ost->initialized) {
1861  FilterGraph *fg = ost->filter->graph;
1862  char error[1024] = "";
1863 
1865  "Finishing stream %d:%d without any data written to it.\n",
1866  ost->file_index, ost->st->index);
1867 
1868  if (ost->filter && !fg->graph) {
1869  int x;
1870  for (x = 0; x < fg->nb_inputs; x++) {
1871  InputFilter *ifilter = fg->inputs[x];
1872  if (ifilter->format < 0)
1873  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1874  }
1875 
1877  continue;
1878 
1879  ret = configure_filtergraph(fg);
1880  if (ret < 0) {
1881  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1882  exit_program(1);
1883  }
1884 
1885  finish_output_stream(ost);
1886  }
1887 
1888  ret = init_output_stream(ost, error, sizeof(error));
1889  if (ret < 0) {
1890  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1891  ost->file_index, ost->index, error);
1892  exit_program(1);
1893  }
1894  }
1895 
1896  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1897  continue;
1898 
1900  continue;
1901 
1902  for (;;) {
1903  const char *desc = NULL;
1904  AVPacket pkt;
1905  int pkt_size;
1906 
1907  switch (enc->codec_type) {
1908  case AVMEDIA_TYPE_AUDIO:
1909  desc = "audio";
1910  break;
1911  case AVMEDIA_TYPE_VIDEO:
1912  desc = "video";
1913  break;
1914  default:
1915  av_assert0(0);
1916  }
1917 
1918  av_init_packet(&pkt);
1919  pkt.data = NULL;
1920  pkt.size = 0;
1921 
1923 
1924  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1925  ret = avcodec_send_frame(enc, NULL);
1926  if (ret < 0) {
1927  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1928  desc,
1929  av_err2str(ret));
1930  exit_program(1);
1931  }
1932  }
1933 
1934  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1935  if (ret < 0 && ret != AVERROR_EOF) {
1936  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1937  desc,
1938  av_err2str(ret));
1939  exit_program(1);
1940  }
1941  if (ost->logfile && enc->stats_out) {
1942  fprintf(ost->logfile, "%s", enc->stats_out);
1943  }
1944  if (ret == AVERROR_EOF) {
1945  output_packet(of, &pkt, ost, 1);
1946  break;
1947  }
1948  if (ost->finished & MUXER_FINISHED) {
1949  av_packet_unref(&pkt);
1950  continue;
1951  }
1952  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1953  pkt_size = pkt.size;
1954  output_packet(of, &pkt, ost, 0);
1956  do_video_stats(ost, pkt_size);
1957  }
1958  }
1959  }
1960 }
1961 
1962 /*
1963  * Check whether a packet from ist should be written into ost at this time
1964  */
1966 {
1967  OutputFile *of = output_files[ost->file_index];
1968  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1969 
1970  if (ost->source_index != ist_index)
1971  return 0;
1972 
1973  if (ost->finished)
1974  return 0;
1975 
1976  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1977  return 0;
1978 
1979  return 1;
1980 }
1981 
1982 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1983 {
1984  OutputFile *of = output_files[ost->file_index];
1985  InputFile *f = input_files [ist->file_index];
1986  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1987  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1988  AVPacket opkt = { 0 };
1989 
1990  av_init_packet(&opkt);
1991 
1992  // EOF: flush output bitstream filters.
1993  if (!pkt) {
1994  output_packet(of, &opkt, ost, 1);
1995  return;
1996  }
1997 
1998  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000  return;
2001 
2002  if (!ost->frame_number && !ost->copy_prior_start) {
2003  int64_t comp_start = start_time;
2004  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2005  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2006  if (pkt->pts == AV_NOPTS_VALUE ?
2007  ist->pts < comp_start :
2008  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2009  return;
2010  }
2011 
2012  if (of->recording_time != INT64_MAX &&
2013  ist->pts >= of->recording_time + start_time) {
2014  close_output_stream(ost);
2015  return;
2016  }
2017 
2018  if (f->recording_time != INT64_MAX) {
2019  start_time = f->ctx->start_time;
2020  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2021  start_time += f->start_time;
2022  if (ist->pts >= f->recording_time + start_time) {
2023  close_output_stream(ost);
2024  return;
2025  }
2026  }
2027 
2028  /* force the input stream PTS */
2029  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2030  ost->sync_opts++;
2031 
2032  if (pkt->pts != AV_NOPTS_VALUE)
2033  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2034  else
2035  opkt.pts = AV_NOPTS_VALUE;
2036 
2037  if (pkt->dts == AV_NOPTS_VALUE)
2038  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2039  else
2040  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2041  opkt.dts -= ost_tb_start_time;
2042 
2043  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045  if(!duration)
2046  duration = ist->dec_ctx->frame_size;
2047  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2048  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2049  ost->mux_timebase) - ost_tb_start_time;
2050  }
2051 
2052  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2053 
2054  opkt.flags = pkt->flags;
2055 
2056  if (pkt->buf) {
2057  opkt.buf = av_buffer_ref(pkt->buf);
2058  if (!opkt.buf)
2059  exit_program(1);
2060  }
2061  opkt.data = pkt->data;
2062  opkt.size = pkt->size;
2063 
2064  av_copy_packet_side_data(&opkt, pkt);
2065 
2066  output_packet(of, &opkt, ost, 0);
2067 }
2068 
2070 {
2071  AVCodecContext *dec = ist->dec_ctx;
2072 
2073  if (!dec->channel_layout) {
2074  char layout_name[256];
2075 
2076  if (dec->channels > ist->guess_layout_max)
2077  return 0;
2079  if (!dec->channel_layout)
2080  return 0;
2081  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2082  dec->channels, dec->channel_layout);
2083  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2084  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2085  }
2086  return 1;
2087 }
2088 
2089 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2090 {
2091  if (*got_output || ret<0)
2092  decode_error_stat[ret<0] ++;
2093 
2094  if (ret < 0 && exit_on_error)
2095  exit_program(1);
2096 
2097  if (exit_on_error && *got_output && ist) {
2099  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2100  exit_program(1);
2101  }
2102  }
2103 }
2104 
2105 // Filters can be configured only if the formats of all inputs are known.
2107 {
2108  int i;
2109  for (i = 0; i < fg->nb_inputs; i++) {
2110  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2111  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2112  return 0;
2113  }
2114  return 1;
2115 }
2116 
2118 {
2119  FilterGraph *fg = ifilter->graph;
2120  int need_reinit, ret, i;
2121 
2122  /* determine if the parameters for this input changed */
2123  need_reinit = ifilter->format != frame->format;
2124  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2125  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2126  need_reinit = 1;
2127 
2128  switch (ifilter->ist->st->codecpar->codec_type) {
2129  case AVMEDIA_TYPE_AUDIO:
2130  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2131  ifilter->channels != frame->channels ||
2132  ifilter->channel_layout != frame->channel_layout;
2133  break;
2134  case AVMEDIA_TYPE_VIDEO:
2135  need_reinit |= ifilter->width != frame->width ||
2136  ifilter->height != frame->height;
2137  break;
2138  }
2139 
2140  if (need_reinit) {
2141  ret = ifilter_parameters_from_frame(ifilter, frame);
2142  if (ret < 0)
2143  return ret;
2144  }
2145 
2146  /* (re)init the graph if possible, otherwise buffer the frame and return */
2147  if (need_reinit || !fg->graph) {
2148  for (i = 0; i < fg->nb_inputs; i++) {
2149  if (!ifilter_has_all_input_formats(fg)) {
2150  AVFrame *tmp = av_frame_clone(frame);
2151  if (!tmp)
2152  return AVERROR(ENOMEM);
2153  av_frame_unref(frame);
2154 
2155  if (!av_fifo_space(ifilter->frame_queue)) {
2156  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2157  if (ret < 0) {
2158  av_frame_free(&tmp);
2159  return ret;
2160  }
2161  }
2162  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2163  return 0;
2164  }
2165  }
2166 
2167  ret = reap_filters(1);
2168  if (ret < 0 && ret != AVERROR_EOF) {
2169  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2170  return ret;
2171  }
2172 
2173  ret = configure_filtergraph(fg);
2174  if (ret < 0) {
2175  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2176  return ret;
2177  }
2178  }
2179 
2181  if (ret < 0) {
2182  if (ret != AVERROR_EOF)
2183  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2184  return ret;
2185  }
2186 
2187  return 0;
2188 }
2189 
2190 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2191 {
2192  int ret;
2193 
2194  ifilter->eof = 1;
2195 
2196  if (ifilter->filter) {
2197  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2198  if (ret < 0)
2199  return ret;
2200  } else {
2201  // the filtergraph was never configured
2202  if (ifilter->format < 0)
2203  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2204  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2205  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2206  return AVERROR_INVALIDDATA;
2207  }
2208  }
2209 
2210  return 0;
2211 }
2212 
2213 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2214 // There is the following difference: if you got a frame, you must call
2215 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2216 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2217 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2218 {
2219  int ret;
2220 
2221  *got_frame = 0;
2222 
2223  if (pkt) {
2224  ret = avcodec_send_packet(avctx, pkt);
2225  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2226  // decoded frames with avcodec_receive_frame() until done.
2227  if (ret < 0 && ret != AVERROR_EOF)
2228  return ret;
2229  }
2230 
2231  ret = avcodec_receive_frame(avctx, frame);
2232  if (ret < 0 && ret != AVERROR(EAGAIN))
2233  return ret;
2234  if (ret >= 0)
2235  *got_frame = 1;
2236 
2237  return 0;
2238 }
2239 
2240 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2241 {
2242  int i, ret;
2243  AVFrame *f;
2244 
2245  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2246  for (i = 0; i < ist->nb_filters; i++) {
2247  if (i < ist->nb_filters - 1) {
2248  f = ist->filter_frame;
2249  ret = av_frame_ref(f, decoded_frame);
2250  if (ret < 0)
2251  break;
2252  } else
2253  f = decoded_frame;
2254  ret = ifilter_send_frame(ist->filters[i], f);
2255  if (ret == AVERROR_EOF)
2256  ret = 0; /* ignore */
2257  if (ret < 0) {
2259  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2260  break;
2261  }
2262  }
2263  return ret;
2264 }
2265 
2266 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2267  int *decode_failed)
2268 {
2269  AVFrame *decoded_frame;
2270  AVCodecContext *avctx = ist->dec_ctx;
2271  int ret, err = 0;
2272  AVRational decoded_frame_tb;
2273 
2274  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2275  return AVERROR(ENOMEM);
2276  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2277  return AVERROR(ENOMEM);
2278  decoded_frame = ist->decoded_frame;
2279 
2281  ret = decode(avctx, decoded_frame, got_output, pkt);
2282  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2283  if (ret < 0)
2284  *decode_failed = 1;
2285 
2286  if (ret >= 0 && avctx->sample_rate <= 0) {
2287  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2288  ret = AVERROR_INVALIDDATA;
2289  }
2290 
2291  if (ret != AVERROR_EOF)
2292  check_decode_result(ist, got_output, ret);
2293 
2294  if (!*got_output || ret < 0)
2295  return ret;
2296 
2297  ist->samples_decoded += decoded_frame->nb_samples;
2298  ist->frames_decoded++;
2299 
2300 #if 1
2301  /* increment next_dts to use for the case where the input stream does not
2302  have timestamps or there are multiple frames in the packet */
2303  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2304  avctx->sample_rate;
2305  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2306  avctx->sample_rate;
2307 #endif
2308 
2309  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2310  decoded_frame_tb = ist->st->time_base;
2311  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2312  decoded_frame->pts = pkt->pts;
2313  decoded_frame_tb = ist->st->time_base;
2314  }else {
2315  decoded_frame->pts = ist->dts;
2316  decoded_frame_tb = AV_TIME_BASE_Q;
2317  }
2318  if (decoded_frame->pts != AV_NOPTS_VALUE)
2319  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2320  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2321  (AVRational){1, avctx->sample_rate});
2322  ist->nb_samples = decoded_frame->nb_samples;
2323  err = send_frame_to_filters(ist, decoded_frame);
2324 
2326  av_frame_unref(decoded_frame);
2327  return err < 0 ? err : ret;
2328 }
2329 
2330 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2331  int *decode_failed)
2332 {
2333  AVFrame *decoded_frame;
2334  int i, ret = 0, err = 0;
2335  int64_t best_effort_timestamp;
2336  int64_t dts = AV_NOPTS_VALUE;
2337  AVPacket avpkt;
2338 
2339  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2340  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2341  // skip the packet.
2342  if (!eof && pkt && pkt->size == 0)
2343  return 0;
2344 
2345  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2346  return AVERROR(ENOMEM);
2347  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2348  return AVERROR(ENOMEM);
2349  decoded_frame = ist->decoded_frame;
2350  if (ist->dts != AV_NOPTS_VALUE)
2351  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2352  if (pkt) {
2353  avpkt = *pkt;
2354  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2355  }
2356 
2357  // The old code used to set dts on the drain packet, which does not work
2358  // with the new API anymore.
2359  if (eof) {
2360  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2361  if (!new)
2362  return AVERROR(ENOMEM);
2363  ist->dts_buffer = new;
2364  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2365  }
2366 
2368  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2369  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2370  if (ret < 0)
2371  *decode_failed = 1;
2372 
2373  // The following line may be required in some cases where there is no parser
2374  // or the parser does not has_b_frames correctly
2375  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2376  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2377  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2378  } else
2380  "video_delay is larger in decoder than demuxer %d > %d.\n"
2381  "If you want to help, upload a sample "
2382  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2383  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2384  ist->dec_ctx->has_b_frames,
2385  ist->st->codecpar->video_delay);
2386  }
2387 
2388  if (ret != AVERROR_EOF)
2389  check_decode_result(ist, got_output, ret);
2390 
2391  if (*got_output && ret >= 0) {
2392  if (ist->dec_ctx->width != decoded_frame->width ||
2393  ist->dec_ctx->height != decoded_frame->height ||
2394  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2395  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2396  decoded_frame->width,
2397  decoded_frame->height,
2398  decoded_frame->format,
2399  ist->dec_ctx->width,
2400  ist->dec_ctx->height,
2401  ist->dec_ctx->pix_fmt);
2402  }
2403  }
2404 
2405  if (!*got_output || ret < 0)
2406  return ret;
2407 
2408  if(ist->top_field_first>=0)
2409  decoded_frame->top_field_first = ist->top_field_first;
2410 
2411  ist->frames_decoded++;
2412 
2413  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2414  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2415  if (err < 0)
2416  goto fail;
2417  }
2418  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2419 
2420  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2421  *duration_pts = decoded_frame->pkt_duration;
2422 
2423  if (ist->framerate.num)
2424  best_effort_timestamp = ist->cfr_next_pts++;
2425 
2426  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2427  best_effort_timestamp = ist->dts_buffer[0];
2428 
2429  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2430  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2431  ist->nb_dts_buffer--;
2432  }
2433 
2434  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2435  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2436 
2437  if (ts != AV_NOPTS_VALUE)
2438  ist->next_pts = ist->pts = ts;
2439  }
2440 
2441  if (debug_ts) {
2442  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2443  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2444  ist->st->index, av_ts2str(decoded_frame->pts),
2445  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2446  best_effort_timestamp,
2447  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2448  decoded_frame->key_frame, decoded_frame->pict_type,
2449  ist->st->time_base.num, ist->st->time_base.den);
2450  }
2451 
2452  if (ist->st->sample_aspect_ratio.num)
2453  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2454 
2455  err = send_frame_to_filters(ist, decoded_frame);
2456 
2457 fail:
2459  av_frame_unref(decoded_frame);
2460  return err < 0 ? err : ret;
2461 }
2462 
2463 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2464  int *decode_failed)
2465 {
2466  AVSubtitle subtitle;
2467  int free_sub = 1;
2468  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2469  &subtitle, got_output, pkt);
2470 
2471  check_decode_result(NULL, got_output, ret);
2472 
2473  if (ret < 0 || !*got_output) {
2474  *decode_failed = 1;
2475  if (!pkt->size)
2476  sub2video_flush(ist);
2477  return ret;
2478  }
2479 
2480  if (ist->fix_sub_duration) {
2481  int end = 1;
2482  if (ist->prev_sub.got_output) {
2483  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2484  1000, AV_TIME_BASE);
2485  if (end < ist->prev_sub.subtitle.end_display_time) {
2486  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2487  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2489  end <= 0 ? ", dropping it" : "");
2491  }
2492  }
2493  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2494  FFSWAP(int, ret, ist->prev_sub.ret);
2495  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2496  if (end <= 0)
2497  goto out;
2498  }
2499 
2500  if (!*got_output)
2501  return ret;
2502 
2503  if (ist->sub2video.frame) {
2504  sub2video_update(ist, &subtitle);
2505  } else if (ist->nb_filters) {
2506  if (!ist->sub2video.sub_queue)
2507  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2508  if (!ist->sub2video.sub_queue)
2509  exit_program(1);
2510  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2512  if (ret < 0)
2513  exit_program(1);
2514  }
2515  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2516  free_sub = 0;
2517  }
2518 
2519  if (!subtitle.num_rects)
2520  goto out;
2521 
2522  ist->frames_decoded++;
2523 
2524  for (i = 0; i < nb_output_streams; i++) {
2525  OutputStream *ost = output_streams[i];
2526 
2527  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2528  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2529  continue;
2530 
2531  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2532  }
2533 
2534 out:
2535  if (free_sub)
2536  avsubtitle_free(&subtitle);
2537  return ret;
2538 }
2539 
2541 {
2542  int i, ret;
2543  /* TODO keep pts also in stream time base to avoid converting back */
2544  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2546 
2547  for (i = 0; i < ist->nb_filters; i++) {
2548  ret = ifilter_send_eof(ist->filters[i], pts);
2549  if (ret < 0)
2550  return ret;
2551  }
2552  return 0;
2553 }
2554 
2555 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2556 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2557 {
2558  int ret = 0, i;
2559  int repeating = 0;
2560  int eof_reached = 0;
2561 
2562  AVPacket avpkt;
2563  if (!ist->saw_first_ts) {
2564  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2565  ist->pts = 0;
2566  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2567  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2568  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2569  }
2570  ist->saw_first_ts = 1;
2571  }
2572 
2573  if (ist->next_dts == AV_NOPTS_VALUE)
2574  ist->next_dts = ist->dts;
2575  if (ist->next_pts == AV_NOPTS_VALUE)
2576  ist->next_pts = ist->pts;
2577 
2578  if (!pkt) {
2579  /* EOF handling */
2580  av_init_packet(&avpkt);
2581  avpkt.data = NULL;
2582  avpkt.size = 0;
2583  } else {
2584  avpkt = *pkt;
2585  }
2586 
2587  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2588  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2589  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2590  ist->next_pts = ist->pts = ist->dts;
2591  }
2592 
2593  // while we have more to decode or while the decoder did output something on EOF
2594  while (ist->decoding_needed) {
2595  int64_t duration_dts = 0;
2596  int64_t duration_pts = 0;
2597  int got_output = 0;
2598  int decode_failed = 0;
2599 
2600  ist->pts = ist->next_pts;
2601  ist->dts = ist->next_dts;
2602 
2603  switch (ist->dec_ctx->codec_type) {
2604  case AVMEDIA_TYPE_AUDIO:
2605  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2606  &decode_failed);
2607  break;
2608  case AVMEDIA_TYPE_VIDEO:
2609  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2610  &decode_failed);
2611  if (!repeating || !pkt || got_output) {
2612  if (pkt && pkt->duration) {
2613  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2614  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2616  duration_dts = ((int64_t)AV_TIME_BASE *
2617  ist->dec_ctx->framerate.den * ticks) /
2619  }
2620 
2621  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2622  ist->next_dts += duration_dts;
2623  }else
2624  ist->next_dts = AV_NOPTS_VALUE;
2625  }
2626 
2627  if (got_output) {
2628  if (duration_pts > 0) {
2629  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2630  } else {
2631  ist->next_pts += duration_dts;
2632  }
2633  }
2634  break;
2635  case AVMEDIA_TYPE_SUBTITLE:
2636  if (repeating)
2637  break;
2638  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2639  if (!pkt && ret >= 0)
2640  ret = AVERROR_EOF;
2641  break;
2642  default:
2643  return -1;
2644  }
2645 
2646  if (ret == AVERROR_EOF) {
2647  eof_reached = 1;
2648  break;
2649  }
2650 
2651  if (ret < 0) {
2652  if (decode_failed) {
2653  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2654  ist->file_index, ist->st->index, av_err2str(ret));
2655  } else {
2656  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2657  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2658  }
2659  if (!decode_failed || exit_on_error)
2660  exit_program(1);
2661  break;
2662  }
2663 
2664  if (got_output)
2665  ist->got_output = 1;
2666 
2667  if (!got_output)
2668  break;
2669 
2670  // During draining, we might get multiple output frames in this loop.
2671  // ffmpeg.c does not drain the filter chain on configuration changes,
2672  // which means if we send multiple frames at once to the filters, and
2673  // one of those frames changes configuration, the buffered frames will
2674  // be lost. This can upset certain FATE tests.
2675  // Decode only 1 frame per call on EOF to appease these FATE tests.
2676  // The ideal solution would be to rewrite decoding to use the new
2677  // decoding API in a better way.
2678  if (!pkt)
2679  break;
2680 
2681  repeating = 1;
2682  }
2683 
2684  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2685  /* except when looping we need to flush but not to send an EOF */
2686  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2687  int ret = send_filter_eof(ist);
2688  if (ret < 0) {
2689  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2690  exit_program(1);
2691  }
2692  }
2693 
2694  /* handle stream copy */
2695  if (!ist->decoding_needed && pkt) {
2696  ist->dts = ist->next_dts;
2697  switch (ist->dec_ctx->codec_type) {
2698  case AVMEDIA_TYPE_AUDIO:
2699  if (ist->dec_ctx->sample_rate) {
2700  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2701  ist->dec_ctx->sample_rate;
2702  } else {
2703  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2704  }
2705  break;
2706  case AVMEDIA_TYPE_VIDEO:
2707  if (ist->framerate.num) {
2708  // TODO: Remove work-around for c99-to-c89 issue 7
2709  AVRational time_base_q = AV_TIME_BASE_Q;
2710  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2711  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2712  } else if (pkt->duration) {
2713  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2714  } else if(ist->dec_ctx->framerate.num != 0) {
2715  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2716  ist->next_dts += ((int64_t)AV_TIME_BASE *
2717  ist->dec_ctx->framerate.den * ticks) /
2719  }
2720  break;
2721  }
2722  ist->pts = ist->dts;
2723  ist->next_pts = ist->next_dts;
2724  }
2725  for (i = 0; i < nb_output_streams; i++) {
2726  OutputStream *ost = output_streams[i];
2727 
2728  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2729  continue;
2730 
2731  do_streamcopy(ist, ost, pkt);
2732  }
2733 
2734  return !eof_reached;
2735 }
2736 
2737 static void print_sdp(void)
2738 {
2739  char sdp[16384];
2740  int i;
2741  int j;
2742  AVIOContext *sdp_pb;
2743  AVFormatContext **avc;
2744 
2745  for (i = 0; i < nb_output_files; i++) {
2746  if (!output_files[i]->header_written)
2747  return;
2748  }
2749 
2750  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2751  if (!avc)
2752  exit_program(1);
2753  for (i = 0, j = 0; i < nb_output_files; i++) {
2754  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2755  avc[j] = output_files[i]->ctx;
2756  j++;
2757  }
2758  }
2759 
2760  if (!j)
2761  goto fail;
2762 
2763  av_sdp_create(avc, j, sdp, sizeof(sdp));
2764 
2765  if (!sdp_filename) {
2766  printf("SDP:\n%s\n", sdp);
2767  fflush(stdout);
2768  } else {
2769  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2770  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2771  } else {
2772  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2773  avio_closep(&sdp_pb);
2775  }
2776  }
2777 
2778 fail:
2779  av_freep(&avc);
2780 }
2781 
2783 {
2784  InputStream *ist = s->opaque;
2785  const enum AVPixelFormat *p;
2786  int ret;
2787 
2788  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2790  const AVCodecHWConfig *config = NULL;
2791  int i;
2792 
2793  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2794  break;
2795 
2796  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2797  ist->hwaccel_id == HWACCEL_AUTO) {
2798  for (i = 0;; i++) {
2799  config = avcodec_get_hw_config(s->codec, i);
2800  if (!config)
2801  break;
2802  if (!(config->methods &
2804  continue;
2805  if (config->pix_fmt == *p)
2806  break;
2807  }
2808  }
2809  if (config) {
2810  if (config->device_type != ist->hwaccel_device_type) {
2811  // Different hwaccel offered, ignore.
2812  continue;
2813  }
2814 
2815  ret = hwaccel_decode_init(s);
2816  if (ret < 0) {
2817  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2819  "%s hwaccel requested for input stream #%d:%d, "
2820  "but cannot be initialized.\n",
2822  ist->file_index, ist->st->index);
2823  return AV_PIX_FMT_NONE;
2824  }
2825  continue;
2826  }
2827  } else {
2828  const HWAccel *hwaccel = NULL;
2829  int i;
2830  for (i = 0; hwaccels[i].name; i++) {
2831  if (hwaccels[i].pix_fmt == *p) {
2832  hwaccel = &hwaccels[i];
2833  break;
2834  }
2835  }
2836  if (!hwaccel) {
2837  // No hwaccel supporting this pixfmt.
2838  continue;
2839  }
2840  if (hwaccel->id != ist->hwaccel_id) {
2841  // Does not match requested hwaccel.
2842  continue;
2843  }
2844 
2845  ret = hwaccel->init(s);
2846  if (ret < 0) {
2848  "%s hwaccel requested for input stream #%d:%d, "
2849  "but cannot be initialized.\n", hwaccel->name,
2850  ist->file_index, ist->st->index);
2851  return AV_PIX_FMT_NONE;
2852  }
2853  }
2854 
2855  if (ist->hw_frames_ctx) {
2857  if (!s->hw_frames_ctx)
2858  return AV_PIX_FMT_NONE;
2859  }
2860 
2861  ist->hwaccel_pix_fmt = *p;
2862  break;
2863  }
2864 
2865  return *p;
2866 }
2867 
2869 {
2870  InputStream *ist = s->opaque;
2871 
2872  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2873  return ist->hwaccel_get_buffer(s, frame, flags);
2874 
2875  return avcodec_default_get_buffer2(s, frame, flags);
2876 }
2877 
2878 static int init_input_stream(int ist_index, char *error, int error_len)
2879 {
2880  int ret;
2881  InputStream *ist = input_streams[ist_index];
2882 
2883  if (ist->decoding_needed) {
2884  AVCodec *codec = ist->dec;
2885  if (!codec) {
2886  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2887  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2888  return AVERROR(EINVAL);
2889  }
2890 
2891  ist->dec_ctx->opaque = ist;
2892  ist->dec_ctx->get_format = get_format;
2893  ist->dec_ctx->get_buffer2 = get_buffer;
2894  ist->dec_ctx->thread_safe_callbacks = 1;
2895 
2896  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2897  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2898  (ist->decoding_needed & DECODING_FOR_OST)) {
2899  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2901  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2902  }
2903 
2904  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2905 
2906  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2907  * audio, and video decoders such as cuvid or mediacodec */
2908  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2909 
2910  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2911  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2912  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2914  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2915 
2916  ret = hw_device_setup_for_decode(ist);
2917  if (ret < 0) {
2918  snprintf(error, error_len, "Device setup failed for "
2919  "decoder on input stream #%d:%d : %s",
2920  ist->file_index, ist->st->index, av_err2str(ret));
2921  return ret;
2922  }
2923 
2924  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2925  if (ret == AVERROR_EXPERIMENTAL)
2926  abort_codec_experimental(codec, 0);
2927 
2928  snprintf(error, error_len,
2929  "Error while opening decoder for input stream "
2930  "#%d:%d : %s",
2931  ist->file_index, ist->st->index, av_err2str(ret));
2932  return ret;
2933  }
2935  }
2936 
2937  ist->next_pts = AV_NOPTS_VALUE;
2938  ist->next_dts = AV_NOPTS_VALUE;
2939 
2940  return 0;
2941 }
2942 
2944 {
2945  if (ost->source_index >= 0)
2946  return input_streams[ost->source_index];
2947  return NULL;
2948 }
2949 
2950 static int compare_int64(const void *a, const void *b)
2951 {
2952  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2953 }
2954 
2955 /* open the muxer when all the streams are initialized */
2956 static int check_init_output_file(OutputFile *of, int file_index)
2957 {
2958  int ret, i;
2959 
2960  for (i = 0; i < of->ctx->nb_streams; i++) {
2961  OutputStream *ost = output_streams[of->ost_index + i];
2962  if (!ost->initialized)
2963  return 0;
2964  }
2965 
2966  of->ctx->interrupt_callback = int_cb;
2967 
2968  ret = avformat_write_header(of->ctx, &of->opts);
2969  if (ret < 0) {
2971  "Could not write header for output file #%d "
2972  "(incorrect codec parameters ?): %s\n",
2973  file_index, av_err2str(ret));
2974  return ret;
2975  }
2976  //assert_avoptions(of->opts);
2977  of->header_written = 1;
2978 
2979  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2980 
2981  if (sdp_filename || want_sdp)
2982  print_sdp();
2983 
2984  /* flush the muxing queues */
2985  for (i = 0; i < of->ctx->nb_streams; i++) {
2986  OutputStream *ost = output_streams[of->ost_index + i];
2987 
2988  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2989  if (!av_fifo_size(ost->muxing_queue))
2990  ost->mux_timebase = ost->st->time_base;
2991 
2992  while (av_fifo_size(ost->muxing_queue)) {
2993  AVPacket pkt;
2994  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2995  write_packet(of, &pkt, ost, 1);
2996  }
2997  }
2998 
2999  return 0;
3000 }
3001 
3003 {
3004  AVBSFContext *ctx;
3005  int i, ret;
3006 
3007  if (!ost->nb_bitstream_filters)
3008  return 0;
3009 
3010  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3011  ctx = ost->bsf_ctx[i];
3012 
3013  ret = avcodec_parameters_copy(ctx->par_in,
3014  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3015  if (ret < 0)
3016  return ret;
3017 
3018  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3019 
3020  ret = av_bsf_init(ctx);
3021  if (ret < 0) {
3022  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3023  ost->bsf_ctx[i]->filter->name);
3024  return ret;
3025  }
3026  }
3027 
3028  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3029  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3030  if (ret < 0)
3031  return ret;
3032 
3033  ost->st->time_base = ctx->time_base_out;
3034 
3035  return 0;
3036 }
3037 
3039 {
3040  OutputFile *of = output_files[ost->file_index];
3041  InputStream *ist = get_input_stream(ost);
3042  AVCodecParameters *par_dst = ost->st->codecpar;
3043  AVCodecParameters *par_src = ost->ref_par;
3044  AVRational sar;
3045  int i, ret;
3046  uint32_t codec_tag = par_dst->codec_tag;
3047 
3048  av_assert0(ist && !ost->filter);
3049 
3050  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3051  if (ret >= 0)
3052  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3053  if (ret < 0) {
3055  "Error setting up codec context options.\n");
3056  return ret;
3057  }
3059 
3060  if (!codec_tag) {
3061  unsigned int codec_tag_tmp;
3062  if (!of->ctx->oformat->codec_tag ||
3063  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3064  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3065  codec_tag = par_src->codec_tag;
3066  }
3067 
3068  ret = avcodec_parameters_copy(par_dst, par_src);
3069  if (ret < 0)
3070  return ret;
3071 
3072  par_dst->codec_tag = codec_tag;
3073 
3074  if (!ost->frame_rate.num)
3075  ost->frame_rate = ist->framerate;
3076  ost->st->avg_frame_rate = ost->frame_rate;
3077 
3079  if (ret < 0)
3080  return ret;
3081 
3082  // copy timebase while removing common factors
3083  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3085 
3086  // copy estimated duration as a hint to the muxer
3087  if (ost->st->duration <= 0 && ist->st->duration > 0)
3088  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3089 
3090  // copy disposition
3091  ost->st->disposition = ist->st->disposition;
3092 
3093  if (ist->st->nb_side_data) {
3094  for (i = 0; i < ist->st->nb_side_data; i++) {
3095  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3096  uint8_t *dst_data;
3097 
3098  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3099  if (!dst_data)
3100  return AVERROR(ENOMEM);
3101  memcpy(dst_data, sd_src->data, sd_src->size);
3102  }
3103  }
3104 
3105  if (ost->rotate_overridden) {
3107  sizeof(int32_t) * 9);
3108  if (sd)
3110  }
3111 
3112  switch (par_dst->codec_type) {
3113  case AVMEDIA_TYPE_AUDIO:
3114  if (audio_volume != 256) {
3115  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3116  exit_program(1);
3117  }
3118  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3119  par_dst->block_align= 0;
3120  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3121  par_dst->block_align= 0;
3122  break;
3123  case AVMEDIA_TYPE_VIDEO:
3124  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3125  sar =
3127  (AVRational){ par_dst->height, par_dst->width });
3128  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3129  "with stream copy may produce invalid files\n");
3130  }
3131  else if (ist->st->sample_aspect_ratio.num)
3132  sar = ist->st->sample_aspect_ratio;
3133  else
3134  sar = par_src->sample_aspect_ratio;
3135  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3136  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3137  ost->st->r_frame_rate = ist->st->r_frame_rate;
3138  break;
3139  }
3140 
3141  ost->mux_timebase = ist->st->time_base;
3142 
3143  return 0;
3144 }
3145 
3147 {
3148  AVDictionaryEntry *e;
3149 
3150  uint8_t *encoder_string;
3151  int encoder_string_len;
3152  int format_flags = 0;
3153  int codec_flags = ost->enc_ctx->flags;
3154 
3155  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3156  return;
3157 
3158  e = av_dict_get(of->opts, "fflags", NULL, 0);
3159  if (e) {
3160  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3161  if (!o)
3162  return;
3163  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3164  }
3165  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3166  if (e) {
3167  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3168  if (!o)
3169  return;
3170  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3171  }
3172 
3173  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3174  encoder_string = av_mallocz(encoder_string_len);
3175  if (!encoder_string)
3176  exit_program(1);
3177 
3178  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3179  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3180  else
3181  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3182  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3183  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3185 }
3186 
3187 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3188  AVCodecContext *avctx)
3189 {
3190  char *p;
3191  int n = 1, i, size, index = 0;
3192  int64_t t, *pts;
3193 
3194  for (p = kf; *p; p++)
3195  if (*p == ',')
3196  n++;
3197  size = n;
3198  pts = av_malloc_array(size, sizeof(*pts));
3199  if (!pts) {
3200  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3201  exit_program(1);
3202  }
3203 
3204  p = kf;
3205  for (i = 0; i < n; i++) {
3206  char *next = strchr(p, ',');
3207 
3208  if (next)
3209  *next++ = 0;
3210 
3211  if (!memcmp(p, "chapters", 8)) {
3212 
3213  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3214  int j;
3215 
3216  if (avf->nb_chapters > INT_MAX - size ||
3217  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3218  sizeof(*pts)))) {
3220  "Could not allocate forced key frames array.\n");
3221  exit_program(1);
3222  }
3223  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3224  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3225 
3226  for (j = 0; j < avf->nb_chapters; j++) {
3227  AVChapter *c = avf->chapters[j];
3228  av_assert1(index < size);
3229  pts[index++] = av_rescale_q(c->start, c->time_base,
3230  avctx->time_base) + t;
3231  }
3232 
3233  } else {
3234 
3235  t = parse_time_or_die("force_key_frames", p, 1);
3236  av_assert1(index < size);
3237  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3238 
3239  }
3240 
3241  p = next;
3242  }
3243 
3244  av_assert0(index == size);
3245  qsort(pts, size, sizeof(*pts), compare_int64);
3246  ost->forced_kf_count = size;
3247  ost->forced_kf_pts = pts;
3248 }
3249 
3250 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3251 {
3252  InputStream *ist = get_input_stream(ost);
3253  AVCodecContext *enc_ctx = ost->enc_ctx;
3254  AVFormatContext *oc;
3255 
3256  if (ost->enc_timebase.num > 0) {
3257  enc_ctx->time_base = ost->enc_timebase;
3258  return;
3259  }
3260 
3261  if (ost->enc_timebase.num < 0) {
3262  if (ist) {
3263  enc_ctx->time_base = ist->st->time_base;
3264  return;
3265  }
3266 
3267  oc = output_files[ost->file_index]->ctx;
3268  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3269  }
3270 
3271  enc_ctx->time_base = default_time_base;
3272 }
3273 
3275 {
3276  InputStream *ist = get_input_stream(ost);
3277  AVCodecContext *enc_ctx = ost->enc_ctx;
3279  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3280  int j, ret;
3281 
3282  set_encoder_id(output_files[ost->file_index], ost);
3283 
3284  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3285  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3286  // which have to be filtered out to prevent leaking them to output files.
3287  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3288 
3289  if (ist) {
3290  ost->st->disposition = ist->st->disposition;
3291 
3292  dec_ctx = ist->dec_ctx;
3293 
3294  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3295  } else {
3296  for (j = 0; j < oc->nb_streams; j++) {
3297  AVStream *st = oc->streams[j];
3298  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3299  break;
3300  }
3301  if (j == oc->nb_streams)
3302  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3305  }
3306 
3307  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3308  if (!ost->frame_rate.num)
3310  if (ist && !ost->frame_rate.num)
3311  ost->frame_rate = ist->framerate;
3312  if (ist && !ost->frame_rate.num)
3313  ost->frame_rate = ist->st->r_frame_rate;
3314  if (ist && !ost->frame_rate.num) {
3315  ost->frame_rate = (AVRational){25, 1};
3317  "No information "
3318  "about the input framerate is available. Falling "
3319  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3320  "if you want a different framerate.\n",
3321  ost->file_index, ost->index);
3322  }
3323 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3324  if (ost->enc->supported_framerates && !ost->force_fps) {
3325  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3326  ost->frame_rate = ost->enc->supported_framerates[idx];
3327  }
3328  // reduce frame rate for mpeg4 to be within the spec limits
3329  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3330  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3331  ost->frame_rate.num, ost->frame_rate.den, 65535);
3332  }
3333  }
3334 
3335  switch (enc_ctx->codec_type) {
3336  case AVMEDIA_TYPE_AUDIO:
3338  if (dec_ctx)
3339  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3340  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3344 
3345  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3346  break;
3347 
3348  case AVMEDIA_TYPE_VIDEO:
3350 
3351  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3353  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3355  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3356  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3357  }
3358  for (j = 0; j < ost->forced_kf_count; j++)
3359  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3361  enc_ctx->time_base);
3362 
3363  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3364  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3365  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3366  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3367  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3369 
3370  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3371  if (dec_ctx)
3372  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3373  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3374 
3375  enc_ctx->framerate = ost->frame_rate;
3376 
3377  ost->st->avg_frame_rate = ost->frame_rate;
3378 
3379  if (!dec_ctx ||
3380  enc_ctx->width != dec_ctx->width ||
3381  enc_ctx->height != dec_ctx->height ||
3382  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3384  }
3385 
3386  if (ost->forced_keyframes) {
3387  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3390  if (ret < 0) {
3392  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3393  return ret;
3394  }
3399 
3400  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3401  // parse it only for static kf timings
3402  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3404  }
3405  }
3406  break;
3407  case AVMEDIA_TYPE_SUBTITLE:
3408  enc_ctx->time_base = AV_TIME_BASE_Q;
3409  if (!enc_ctx->width) {
3410  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3411  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3412  }
3413  break;
3414  case AVMEDIA_TYPE_DATA:
3415  break;
3416  default:
3417  abort();
3418  break;
3419  }
3420 
3421  ost->mux_timebase = enc_ctx->time_base;
3422 
3423  return 0;
3424 }
3425 
3426 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3427 {
3428  int ret = 0;
3429 
3430  if (ost->encoding_needed) {
3431  AVCodec *codec = ost->enc;
3432  AVCodecContext *dec = NULL;
3433  InputStream *ist;
3434 
3435  ret = init_output_stream_encode(ost);
3436  if (ret < 0)
3437  return ret;
3438 
3439  if ((ist = get_input_stream(ost)))
3440  dec = ist->dec_ctx;
3441  if (dec && dec->subtitle_header) {
3442  /* ASS code assumes this buffer is null terminated so add extra byte. */
3444  if (!ost->enc_ctx->subtitle_header)
3445  return AVERROR(ENOMEM);
3446  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3448  }
3449  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3450  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3451  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3452  !codec->defaults &&
3453  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3454  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3455  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3456 
3457  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3461  if (!ost->enc_ctx->hw_frames_ctx)
3462  return AVERROR(ENOMEM);
3463  } else {
3464  ret = hw_device_setup_for_encode(ost);
3465  if (ret < 0) {
3466  snprintf(error, error_len, "Device setup failed for "
3467  "encoder on output stream #%d:%d : %s",
3468  ost->file_index, ost->index, av_err2str(ret));
3469  return ret;
3470  }
3471  }
3472 
3473  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3474  if (ret == AVERROR_EXPERIMENTAL)
3475  abort_codec_experimental(codec, 1);
3476  snprintf(error, error_len,
3477  "Error while opening encoder for output stream #%d:%d - "
3478  "maybe incorrect parameters such as bit_rate, rate, width or height",
3479  ost->file_index, ost->index);
3480  return ret;
3481  }
3482  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3483  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3485  ost->enc_ctx->frame_size);
3487  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3488  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3489  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3490  " It takes bits/s as argument, not kbits/s\n");
3491 
3493  if (ret < 0) {
3495  "Error initializing the output stream codec context.\n");
3496  exit_program(1);
3497  }
3498  /*
3499  * FIXME: ost->st->codec should't be needed here anymore.
3500  */
3501  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3502  if (ret < 0)
3503  return ret;
3504 
3505  if (ost->enc_ctx->nb_coded_side_data) {
3506  int i;
3507 
3508  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3509  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3510  uint8_t *dst_data;
3511 
3512  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3513  if (!dst_data)
3514  return AVERROR(ENOMEM);
3515  memcpy(dst_data, sd_src->data, sd_src->size);
3516  }
3517  }
3518 
3519  /*
3520  * Add global input side data. For now this is naive, and copies it
3521  * from the input stream's global side data. All side data should
3522  * really be funneled over AVFrame and libavfilter, then added back to
3523  * packet side data, and then potentially using the first packet for
3524  * global side data.
3525  */
3526  if (ist) {
3527  int i;
3528  for (i = 0; i < ist->st->nb_side_data; i++) {
3529  AVPacketSideData *sd = &ist->st->side_data[i];
3530  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3531  if (!dst)
3532  return AVERROR(ENOMEM);
3533  memcpy(dst, sd->data, sd->size);
3534  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3535  av_display_rotation_set((uint32_t *)dst, 0);
3536  }
3537  }
3538 
3539  // copy timebase while removing common factors
3540  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3541  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3542 
3543  // copy estimated duration as a hint to the muxer
3544  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3545  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3546 
3547  ost->st->codec->codec= ost->enc_ctx->codec;
3548  } else if (ost->stream_copy) {
3549  ret = init_output_stream_streamcopy(ost);
3550  if (ret < 0)
3551  return ret;
3552  }
3553 
3554  // parse user provided disposition, and update stream values
3555  if (ost->disposition) {
3556  static const AVOption opts[] = {
3557  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3558  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3559  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3560  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3561  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3562  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3563  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3564  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3565  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3566  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3567  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3568  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3569  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3570  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3571  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3572  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3573  { NULL },
3574  };
3575  static const AVClass class = {
3576  .class_name = "",
3577  .item_name = av_default_item_name,
3578  .option = opts,
3579  .version = LIBAVUTIL_VERSION_INT,
3580  };
3581  const AVClass *pclass = &class;
3582 
3583  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3584  if (ret < 0)
3585  return ret;
3586  }
3587 
3588  /* initialize bitstream filters for the output stream
3589  * needs to be done here, because the codec id for streamcopy is not
3590  * known until now */
3591  ret = init_output_bsfs(ost);
3592  if (ret < 0)
3593  return ret;
3594 
3595  ost->initialized = 1;
3596 
3597  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3598  if (ret < 0)
3599  return ret;
3600 
3601  return ret;
3602 }
3603 
3604 static void report_new_stream(int input_index, AVPacket *pkt)
3605 {
3606  InputFile *file = input_files[input_index];
3607  AVStream *st = file->ctx->streams[pkt->stream_index];
3608 
3609  if (pkt->stream_index < file->nb_streams_warn)
3610  return;
3611  av_log(file->ctx, AV_LOG_WARNING,
3612  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3614  input_index, pkt->stream_index,
3615  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3616  file->nb_streams_warn = pkt->stream_index + 1;
3617 }
3618 
3619 static int transcode_init(void)
3620 {
3621  int ret = 0, i, j, k;
3622  AVFormatContext *oc;
3623  OutputStream *ost;
3624  InputStream *ist;
3625  char error[1024] = {0};
3626 
3627  for (i = 0; i < nb_filtergraphs; i++) {
3628  FilterGraph *fg = filtergraphs[i];
3629  for (j = 0; j < fg->nb_outputs; j++) {
3630  OutputFilter *ofilter = fg->outputs[j];
3631  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3632  continue;
3633  if (fg->nb_inputs != 1)
3634  continue;
3635  for (k = nb_input_streams-1; k >= 0 ; k--)
3636  if (fg->inputs[0]->ist == input_streams[k])
3637  break;
3638  ofilter->ost->source_index = k;
3639  }
3640  }
3641 
3642  /* init framerate emulation */
3643  for (i = 0; i < nb_input_files; i++) {
3644  InputFile *ifile = input_files[i];
3645  if (ifile->rate_emu)
3646  for (j = 0; j < ifile->nb_streams; j++)
3647  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3648  }
3649 
3650  /* init input streams */
3651  for (i = 0; i < nb_input_streams; i++)
3652  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3653  for (i = 0; i < nb_output_streams; i++) {
3654  ost = output_streams[i];
3655  avcodec_close(ost->enc_ctx);
3656  }
3657  goto dump_format;
3658  }
3659 
3660  /* open each encoder */
3661  for (i = 0; i < nb_output_streams; i++) {
3662  // skip streams fed from filtergraphs until we have a frame for them
3663  if (output_streams[i]->filter)
3664  continue;
3665 
3666  ret = init_output_stream(output_streams[i], error, sizeof(error));
3667  if (ret < 0)
3668  goto dump_format;
3669  }
3670 
3671  /* discard unused programs */
3672  for (i = 0; i < nb_input_files; i++) {
3673  InputFile *ifile = input_files[i];
3674  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3675  AVProgram *p = ifile->ctx->programs[j];
3676  int discard = AVDISCARD_ALL;
3677 
3678  for (k = 0; k < p->nb_stream_indexes; k++)
3679  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3680  discard = AVDISCARD_DEFAULT;
3681  break;
3682  }
3683  p->discard = discard;
3684  }
3685  }
3686 
3687  /* write headers for files with no streams */
3688  for (i = 0; i < nb_output_files; i++) {
3689  oc = output_files[i]->ctx;
3690  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3691  ret = check_init_output_file(output_files[i], i);
3692  if (ret < 0)
3693  goto dump_format;
3694  }
3695  }
3696 
3697  dump_format:
3698  /* dump the stream mapping */
3699  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3700  for (i = 0; i < nb_input_streams; i++) {
3701  ist = input_streams[i];
3702 
3703  for (j = 0; j < ist->nb_filters; j++) {
3704  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3705  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3706  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3707  ist->filters[j]->name);
3708  if (nb_filtergraphs > 1)
3709  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3710  av_log(NULL, AV_LOG_INFO, "\n");
3711  }
3712  }
3713  }
3714 
3715  for (i = 0; i < nb_output_streams; i++) {
3716  ost = output_streams[i];
3717 
3718  if (ost->attachment_filename) {
3719  /* an attached file */
3720  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3721  ost->attachment_filename, ost->file_index, ost->index);
3722  continue;
3723  }
3724 
3725  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3726  /* output from a complex graph */
3727  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3728  if (nb_filtergraphs > 1)
3729  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3730 
3731  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3732  ost->index, ost->enc ? ost->enc->name : "?");
3733  continue;
3734  }
3735 
3736  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3737  input_streams[ost->source_index]->file_index,
3738  input_streams[ost->source_index]->st->index,
3739  ost->file_index,
3740  ost->index);
3741  if (ost->sync_ist != input_streams[ost->source_index])
3742  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3743  ost->sync_ist->file_index,
3744  ost->sync_ist->st->index);
3745  if (ost->stream_copy)
3746  av_log(NULL, AV_LOG_INFO, " (copy)");
3747  else {
3748  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3749  const AVCodec *out_codec = ost->enc;
3750  const char *decoder_name = "?";
3751  const char *in_codec_name = "?";
3752  const char *encoder_name = "?";
3753  const char *out_codec_name = "?";
3754  const AVCodecDescriptor *desc;
3755 
3756  if (in_codec) {
3757  decoder_name = in_codec->name;
3758  desc = avcodec_descriptor_get(in_codec->id);
3759  if (desc)
3760  in_codec_name = desc->name;
3761  if (!strcmp(decoder_name, in_codec_name))
3762  decoder_name = "native";
3763  }
3764 
3765  if (out_codec) {
3766  encoder_name = out_codec->name;
3767  desc = avcodec_descriptor_get(out_codec->id);
3768  if (desc)
3769  out_codec_name = desc->name;
3770  if (!strcmp(encoder_name, out_codec_name))
3771  encoder_name = "native";
3772  }
3773 
3774  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3775  in_codec_name, decoder_name,
3776  out_codec_name, encoder_name);
3777  }
3778  av_log(NULL, AV_LOG_INFO, "\n");
3779  }
3780 
3781  if (ret) {
3782  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3783  return ret;
3784  }
3785 
3787 
3788  return 0;
3789 }
3790 
3791 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3792 static int need_output(void)
3793 {
3794  int i;
3795 
3796  for (i = 0; i < nb_output_streams; i++) {
3797  OutputStream *ost = output_streams[i];
3798  OutputFile *of = output_files[ost->file_index];
3799  AVFormatContext *os = output_files[ost->file_index]->ctx;
3800 
3801  if (ost->finished ||
3802  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3803  continue;
3804  if (ost->frame_number >= ost->max_frames) {
3805  int j;
3806  for (j = 0; j < of->ctx->nb_streams; j++)
3807  close_output_stream(output_streams[of->ost_index + j]);
3808  continue;
3809  }
3810 
3811  return 1;
3812  }
3813 
3814  return 0;
3815 }
3816 
3817 /**
3818  * Select the output stream to process.
3819  *
3820  * @return selected output stream, or NULL if none available
3821  */
3823 {
3824  int i;
3825  int64_t opts_min = INT64_MAX;
3826  OutputStream *ost_min = NULL;
3827 
3828  for (i = 0; i < nb_output_streams; i++) {
3829  OutputStream *ost = output_streams[i];
3830  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3831  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3832  AV_TIME_BASE_Q);
3833  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3834  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3835 
3836  if (!ost->initialized && !ost->inputs_done)
3837  return ost;
3838 
3839  if (!ost->finished && opts < opts_min) {
3840  opts_min = opts;
3841  ost_min = ost->unavailable ? NULL : ost;
3842  }
3843  }
3844  return ost_min;
3845 }
3846 
3847 static void set_tty_echo(int on)
3848 {
3849 #if HAVE_TERMIOS_H
3850  struct termios tty;
3851  if (tcgetattr(0, &tty) == 0) {
3852  if (on) tty.c_lflag |= ECHO;
3853  else tty.c_lflag &= ~ECHO;
3854  tcsetattr(0, TCSANOW, &tty);
3855  }
3856 #endif
3857 }
3858 
3859 static int check_keyboard_interaction(int64_t cur_time)
3860 {
3861  int i, ret, key;
3862  static int64_t last_time;
3863  if (received_nb_signals)
3864  return AVERROR_EXIT;
3865  /* read_key() returns 0 on EOF */
3866  if(cur_time - last_time >= 100000 && !run_as_daemon){
3867  key = read_key();
3868  last_time = cur_time;
3869  }else
3870  key = -1;
3871  if (key == 'q')
3872  return AVERROR_EXIT;
3873  if (key == '+') av_log_set_level(av_log_get_level()+10);
3874  if (key == '-') av_log_set_level(av_log_get_level()-10);
3875  if (key == 's') qp_hist ^= 1;
3876  if (key == 'h'){
3877  if (do_hex_dump){
3878  do_hex_dump = do_pkt_dump = 0;
3879  } else if(do_pkt_dump){
3880  do_hex_dump = 1;
3881  } else
3882  do_pkt_dump = 1;
3884  }
3885  if (key == 'c' || key == 'C'){
3886  char buf[4096], target[64], command[256], arg[256] = {0};
3887  double time;
3888  int k, n = 0;
3889  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3890  i = 0;
3891  set_tty_echo(1);
3892  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3893  if (k > 0)
3894  buf[i++] = k;
3895  buf[i] = 0;
3896  set_tty_echo(0);
3897  fprintf(stderr, "\n");
3898  if (k > 0 &&
3899  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3900  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3901  target, time, command, arg);
3902  for (i = 0; i < nb_filtergraphs; i++) {
3903  FilterGraph *fg = filtergraphs[i];
3904  if (fg->graph) {
3905  if (time < 0) {
3906  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3907  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3908  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3909  } else if (key == 'c') {
3910  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3911  ret = AVERROR_PATCHWELCOME;
3912  } else {
3913  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3914  if (ret < 0)
3915  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3916  }
3917  }
3918  }
3919  } else {
3921  "Parse error, at least 3 arguments were expected, "
3922  "only %d given in string '%s'\n", n, buf);
3923  }
3924  }
3925  if (key == 'd' || key == 'D'){
3926  int debug=0;
3927  if(key == 'D') {
3928  debug = input_streams[0]->st->codec->debug<<1;
3929  if(!debug) debug = 1;
3930  while(debug & (FF_DEBUG_DCT_COEFF
3931 #if FF_API_DEBUG_MV
3932  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3933 #endif
3934  )) //unsupported, would just crash
3935  debug += debug;
3936  }else{
3937  char buf[32];
3938  int k = 0;
3939  i = 0;
3940  set_tty_echo(1);
3941  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3942  if (k > 0)
3943  buf[i++] = k;
3944  buf[i] = 0;
3945  set_tty_echo(0);
3946  fprintf(stderr, "\n");
3947  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3948  fprintf(stderr,"error parsing debug value\n");
3949  }
3950  for(i=0;i<nb_input_streams;i++) {
3951  input_streams[i]->st->codec->debug = debug;
3952  }
3953  for(i=0;i<nb_output_streams;i++) {
3954  OutputStream *ost = output_streams[i];
3955  ost->enc_ctx->debug = debug;
3956  }
3957  if(debug) av_log_set_level(AV_LOG_DEBUG);
3958  fprintf(stderr,"debug=%d\n", debug);
3959  }
3960  if (key == '?'){
3961  fprintf(stderr, "key function\n"
3962  "? show this help\n"
3963  "+ increase verbosity\n"
3964  "- decrease verbosity\n"
3965  "c Send command to first matching filter supporting it\n"
3966  "C Send/Queue command to all matching filters\n"
3967  "D cycle through available debug modes\n"
3968  "h dump packets/hex press to cycle through the 3 states\n"
3969  "q quit\n"
3970  "s Show QP histogram\n"
3971  );
3972  }
3973  return 0;
3974 }
3975 
3976 #if HAVE_THREADS
3977 static void *input_thread(void *arg)
3978 {
3979  InputFile *f = arg;
3980  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3981  int ret = 0;
3982 
3983  while (1) {
3984  AVPacket pkt;
3985  ret = av_read_frame(f->ctx, &pkt);
3986 
3987  if (ret == AVERROR(EAGAIN)) {
3988  av_usleep(10000);
3989  continue;
3990  }
3991  if (ret < 0) {
3992  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3993  break;
3994  }
3995  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3996  if (flags && ret == AVERROR(EAGAIN)) {
3997  flags = 0;
3998  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4000  "Thread message queue blocking; consider raising the "
4001  "thread_queue_size option (current value: %d)\n",
4002  f->thread_queue_size);
4003  }
4004  if (ret < 0) {
4005  if (ret != AVERROR_EOF)
4006  av_log(f->ctx, AV_LOG_ERROR,
4007  "Unable to send packet to main thread: %s\n",
4008  av_err2str(ret));
4009  av_packet_unref(&pkt);
4010  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4011  break;
4012  }
4013  }
4014 
4015  return NULL;
4016 }
4017 
4018 static void free_input_thread(int i)
4019 {
4020  InputFile *f = input_files[i];
4021  AVPacket pkt;
4022 
4023  if (!f || !f->in_thread_queue)
4024  return;
4026  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4027  av_packet_unref(&pkt);
4028 
4029  pthread_join(f->thread, NULL);
4030  f->joined = 1;
4031  av_thread_message_queue_free(&f->in_thread_queue);
4032 }
4033 
4034 static void free_input_threads(void)
4035 {
4036  int i;
4037 
4038  for (i = 0; i < nb_input_files; i++)
4039  free_input_thread(i);
4040 }
4041 
4042 static int init_input_thread(int i)
4043 {
4044  int ret;
4045  InputFile *f = input_files[i];
4046 
4047  if (nb_input_files == 1)
4048  return 0;
4049 
4050  if (f->ctx->pb ? !f->ctx->pb->seekable :
4051  strcmp(f->ctx->iformat->name, "lavfi"))
4052  f->non_blocking = 1;
4053  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4054  f->thread_queue_size, sizeof(AVPacket));
4055  if (ret < 0)
4056  return ret;
4057 
4058  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4059  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4060  av_thread_message_queue_free(&f->in_thread_queue);
4061  return AVERROR(ret);
4062  }
4063 
4064  return 0;
4065 }
4066 
4067 static int init_input_threads(void)
4068 {
4069  int i, ret;
4070 
4071  for (i = 0; i < nb_input_files; i++) {
4072  ret = init_input_thread(i);
4073  if (ret < 0)
4074  return ret;
4075  }
4076  return 0;
4077 }
4078 
4079 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4080 {
4081  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4082  f->non_blocking ?
4084 }
4085 #endif
4086 
4088 {
4089  if (f->rate_emu) {
4090  int i;
4091  for (i = 0; i < f->nb_streams; i++) {
4092  InputStream *ist = input_streams[f->ist_index + i];
4093  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4094  int64_t now = av_gettime_relative() - ist->start;
4095  if (pts > now)
4096  return AVERROR(EAGAIN);
4097  }
4098  }
4099 
4100 #if HAVE_THREADS
4101  if (nb_input_files > 1)
4102  return get_input_packet_mt(f, pkt);
4103 #endif
4104  return av_read_frame(f->ctx, pkt);
4105 }
4106 
4107 static int got_eagain(void)
4108 {
4109  int i;
4110  for (i = 0; i < nb_output_streams; i++)
4111  if (output_streams[i]->unavailable)
4112  return 1;
4113  return 0;
4114 }
4115 
4116 static void reset_eagain(void)
4117 {
4118  int i;
4119  for (i = 0; i < nb_input_files; i++)
4120  input_files[i]->eagain = 0;
4121  for (i = 0; i < nb_output_streams; i++)
4122  output_streams[i]->unavailable = 0;
4123 }
4124 
4125 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4126 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4127  AVRational time_base)
4128 {
4129  int ret;
4130 
4131  if (!*duration) {
4132  *duration = tmp;
4133  return tmp_time_base;
4134  }
4135 
4136  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4137  if (ret < 0) {
4138  *duration = tmp;
4139  return tmp_time_base;
4140  }
4141 
4142  return time_base;
4143 }
4144 
4146 {
4147  InputStream *ist;
4148  AVCodecContext *avctx;
4149  int i, ret, has_audio = 0;
4150  int64_t duration = 0;
4151 
4152  ret = av_seek_frame(is, -1, is->start_time, 0);
4153  if (ret < 0)
4154  return ret;
4155 
4156  for (i = 0; i < ifile->nb_streams; i++) {
4157  ist = input_streams[ifile->ist_index + i];
4158  avctx = ist->dec_ctx;
4159 
4160  /* duration is the length of the last frame in a stream
4161  * when audio stream is present we don't care about
4162  * last video frame length because it's not defined exactly */
4163  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4164  has_audio = 1;
4165  }
4166 
4167  for (i = 0; i < ifile->nb_streams; i++) {
4168  ist = input_streams[ifile->ist_index + i];
4169  avctx = ist->dec_ctx;
4170 
4171  if (has_audio) {
4172  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4173  AVRational sample_rate = {1, avctx->sample_rate};
4174 
4175  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4176  } else {
4177  continue;
4178  }
4179  } else {
4180  if (ist->framerate.num) {
4181  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4182  } else if (ist->st->avg_frame_rate.num) {
4183  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4184  } else {
4185  duration = 1;
4186  }
4187  }
4188  if (!ifile->duration)
4189  ifile->time_base = ist->st->time_base;
4190  /* the total duration of the stream, max_pts - min_pts is
4191  * the duration of the stream without the last frame */
4192  duration += ist->max_pts - ist->min_pts;
4193  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4194  ifile->time_base);
4195  }
4196 
4197  if (ifile->loop > 0)
4198  ifile->loop--;
4199 
4200  return ret;
4201 }
4202 
4203 /*
4204  * Return
4205  * - 0 -- one packet was read and processed
4206  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4207  * this function should be called again
4208  * - AVERROR_EOF -- this function should not be called again
4209  */
4210 static int process_input(int file_index)
4211 {
4212  InputFile *ifile = input_files[file_index];
4213  AVFormatContext *is;
4214  InputStream *ist;
4215  AVPacket pkt;
4216  int ret, thread_ret, i, j;
4217  int64_t duration;
4218  int64_t pkt_dts;
4219 
4220  is = ifile->ctx;
4221  ret = get_input_packet(ifile, &pkt);
4222 
4223  if (ret == AVERROR(EAGAIN)) {
4224  ifile->eagain = 1;
4225  return ret;
4226  }
4227  if (ret < 0 && ifile->loop) {
4228  AVCodecContext *avctx;
4229  for (i = 0; i < ifile->nb_streams; i++) {
4230  ist = input_streams[ifile->ist_index + i];
4231  avctx = ist->dec_ctx;
4232  if (ist->decoding_needed) {
4233  ret = process_input_packet(ist, NULL, 1);
4234  if (ret>0)
4235  return 0;
4236  avcodec_flush_buffers(avctx);
4237  }
4238  }
4239 #if HAVE_THREADS
4240  free_input_thread(file_index);
4241 #endif
4242  ret = seek_to_start(ifile, is);
4243 #if HAVE_THREADS
4244  thread_ret = init_input_thread(file_index);
4245  if (thread_ret < 0)
4246  return thread_ret;
4247 #endif
4248  if (ret < 0)
4249  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4250  else
4251  ret = get_input_packet(ifile, &pkt);
4252  if (ret == AVERROR(EAGAIN)) {
4253  ifile->eagain = 1;
4254  return ret;
4255  }
4256  }
4257  if (ret < 0) {
4258  if (ret != AVERROR_EOF) {
4259  print_error(is->url, ret);
4260  if (exit_on_error)
4261  exit_program(1);
4262  }
4263 
4264  for (i = 0; i < ifile->nb_streams; i++) {
4265  ist = input_streams[ifile->ist_index + i];
4266  if (ist->decoding_needed) {
4267  ret = process_input_packet(ist, NULL, 0);
4268  if (ret>0)
4269  return 0;
4270  }
4271 
4272  /* mark all outputs that don't go through lavfi as finished */
4273  for (j = 0; j < nb_output_streams; j++) {
4274  OutputStream *ost = output_streams[j];
4275 
4276  if (ost->source_index == ifile->ist_index + i &&
4277  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4278  finish_output_stream(ost);
4279  }
4280  }
4281 
4282  ifile->eof_reached = 1;
4283  return AVERROR(EAGAIN);
4284  }
4285 
4286  reset_eagain();
4287 
4288  if (do_pkt_dump) {
4290  is->streams[pkt.stream_index]);
4291  }
4292  /* the following test is needed in case new streams appear
4293  dynamically in stream : we ignore them */
4294  if (pkt.stream_index >= ifile->nb_streams) {
4295  report_new_stream(file_index, &pkt);
4296  goto discard_packet;
4297  }
4298 
4299  ist = input_streams[ifile->ist_index + pkt.stream_index];
4300 
4301  ist->data_size += pkt.size;
4302  ist->nb_packets++;
4303 
4304  if (ist->discard)
4305  goto discard_packet;
4306 
4307  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4308  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4309  exit_program(1);
4310  }
4311 
4312  if (debug_ts) {
4313  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4314  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4318  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4319  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4320  av_ts2str(input_files[ist->file_index]->ts_offset),
4321  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4322  }
4323 
4324  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4325  int64_t stime, stime2;
4326  // Correcting starttime based on the enabled streams
4327  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4328  // so we instead do it here as part of discontinuity handling
4329  if ( ist->next_dts == AV_NOPTS_VALUE
4330  && ifile->ts_offset == -is->start_time
4331  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4332  int64_t new_start_time = INT64_MAX;
4333  for (i=0; i<is->nb_streams; i++) {
4334  AVStream *st = is->streams[i];
4335  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4336  continue;
4337  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4338  }
4339  if (new_start_time > is->start_time) {
4340  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4341  ifile->ts_offset = -new_start_time;
4342  }
4343  }
4344 
4345  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4346  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4347  ist->wrap_correction_done = 1;
4348 
4349  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4350  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4351  ist->wrap_correction_done = 0;
4352  }
4353  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4354  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4355  ist->wrap_correction_done = 0;
4356  }
4357  }
4358 
4359  /* add the stream-global side data to the first packet */
4360  if (ist->nb_packets == 1) {
4361  for (i = 0; i < ist->st->nb_side_data; i++) {
4362  AVPacketSideData *src_sd = &ist->st->side_data[i];
4363  uint8_t *dst_data;
4364 
4365  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4366  continue;
4367 
4368  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4369  continue;
4370 
4371  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4372  if (!dst_data)
4373  exit_program(1);
4374 
4375  memcpy(dst_data, src_sd->data, src_sd->size);
4376  }
4377  }
4378 
4379  if (pkt.dts != AV_NOPTS_VALUE)
4380  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4381  if (pkt.pts != AV_NOPTS_VALUE)
4382  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4383 
4384  if (pkt.pts != AV_NOPTS_VALUE)
4385  pkt.pts *= ist->ts_scale;
4386  if (pkt.dts != AV_NOPTS_VALUE)
4387  pkt.dts *= ist->ts_scale;
4388 
4390  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4392  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4393  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4394  int64_t delta = pkt_dts - ifile->last_ts;
4395  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4396  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4397  ifile->ts_offset -= delta;
4399  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4400  delta, ifile->ts_offset);
4401  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4402  if (pkt.pts != AV_NOPTS_VALUE)
4403  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4404  }
4405  }
4406 
4407  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4408  if (pkt.pts != AV_NOPTS_VALUE) {
4409  pkt.pts += duration;
4410  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4411  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4412  }
4413 
4414  if (pkt.dts != AV_NOPTS_VALUE)
4415  pkt.dts += duration;
4416 
4418  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4420  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4421  !copy_ts) {
4422  int64_t delta = pkt_dts - ist->next_dts;
4423  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4424  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4425  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4426  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4427  ifile->ts_offset -= delta;
4429  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4430  delta, ifile->ts_offset);
4431  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4432  if (pkt.pts != AV_NOPTS_VALUE)
4433  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4434  }
4435  } else {
4436  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4437  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4438  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4439  pkt.dts = AV_NOPTS_VALUE;
4440  }
4441  if (pkt.pts != AV_NOPTS_VALUE){
4442  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4443  delta = pkt_pts - ist->next_dts;
4444  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4445  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4446  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4447  pkt.pts = AV_NOPTS_VALUE;
4448  }
4449  }
4450  }
4451  }
4452 
4453  if (pkt.dts != AV_NOPTS_VALUE)
4454  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4455 
4456  if (debug_ts) {
4457  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4459  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4460  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4461  av_ts2str(input_files[ist->file_index]->ts_offset),
4462  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4463  }
4464 
4465  sub2video_heartbeat(ist, pkt.pts);
4466 
4467  process_input_packet(ist, &pkt, 0);
4468 
4469 discard_packet:
4470  av_packet_unref(&pkt);
4471 
4472  return 0;
4473 }
4474 
4475 /**
4476  * Perform a step of transcoding for the specified filter graph.
4477  *
4478  * @param[in] graph filter graph to consider
4479  * @param[out] best_ist input stream where a frame would allow to continue
4480  * @return 0 for success, <0 for error
4481  */
4482 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4483 {
4484  int i, ret;
4485  int nb_requests, nb_requests_max = 0;
4486  InputFilter *ifilter;
4487  InputStream *ist;
4488 
4489  *best_ist = NULL;
4490  ret = avfilter_graph_request_oldest(graph->graph);
4491  if (ret >= 0)
4492  return reap_filters(0);
4493 
4494  if (ret == AVERROR_EOF) {
4495  ret = reap_filters(1);
4496  for (i = 0; i < graph->nb_outputs; i++)
4497  close_output_stream(graph->outputs[i]->ost);
4498  return ret;
4499  }
4500  if (ret != AVERROR(EAGAIN))
4501  return ret;
4502 
4503  for (i = 0; i < graph->nb_inputs; i++) {
4504  ifilter = graph->inputs[i];
4505  ist = ifilter->ist;
4506  if (input_files[ist->file_index]->eagain ||
4507  input_files[ist->file_index]->eof_reached)
4508  continue;
4509  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4510  if (nb_requests > nb_requests_max) {
4511  nb_requests_max = nb_requests;
4512  *best_ist = ist;
4513  }
4514  }
4515 
4516  if (!*best_ist)
4517  for (i = 0; i < graph->nb_outputs; i++)
4518  graph->outputs[i]->ost->unavailable = 1;
4519 
4520  return 0;
4521 }
4522 
4523 /**
4524  * Run a single step of transcoding.
4525  *
4526  * @return 0 for success, <0 for error
4527  */
4528 static int transcode_step(void)
4529 {
4530  OutputStream *ost;
4531  InputStream *ist = NULL;
4532  int ret;
4533 
4534  ost = choose_output();
4535  if (!ost) {
4536  if (got_eagain()) {
4537  reset_eagain();
4538  av_usleep(10000);
4539  return 0;
4540  }
4541  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4542  return AVERROR_EOF;
4543  }
4544 
4545  if (ost->filter && !ost->filter->graph->graph) {
4547  ret = configure_filtergraph(ost->filter->graph);
4548  if (ret < 0) {
4549  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4550  return ret;
4551  }
4552  }
4553  }
4554 
4555  if (ost->filter && ost->filter->graph->graph) {
4556  if (!ost->initialized) {
4557  char error[1024] = {0};
4558  ret = init_output_stream(ost, error, sizeof(error));
4559  if (ret < 0) {
4560  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4561  ost->file_index, ost->index, error);
4562  exit_program(1);
4563  }
4564  }
4565  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4566  return ret;
4567  if (!ist)
4568  return 0;
4569  } else if (ost->filter) {
4570  int i;
4571  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4572  InputFilter *ifilter = ost->filter->graph->inputs[i];
4573  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4574  ist = ifilter->ist;
4575  break;
4576  }
4577  }
4578  if (!ist) {
4579  ost->inputs_done = 1;
4580  return 0;
4581  }
4582  } else {
4583  av_assert0(ost->source_index >= 0);
4584  ist = input_streams[ost->source_index];
4585  }
4586 
4587  ret = process_input(ist->file_index);
4588  if (ret == AVERROR(EAGAIN)) {
4589  if (input_files[ist->file_index]->eagain)
4590  ost->unavailable = 1;
4591  return 0;
4592  }
4593 
4594  if (ret < 0)
4595  return ret == AVERROR_EOF ? 0 : ret;
4596 
4597  return reap_filters(0);
4598 }
4599 
4600 /*
4601  * The following code is the main loop of the file converter
4602  */
4603 static int transcode(void)
4604 {
4605  int ret, i;
4606  AVFormatContext *os;
4607  OutputStream *ost;
4608  InputStream *ist;
4609  int64_t timer_start;
4610  int64_t total_packets_written = 0;
4611 
4612  ret = transcode_init();
4613  if (ret < 0)
4614  goto fail;
4615 
4616  if (stdin_interaction) {
4617  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4618  }
4619 
4620  timer_start = av_gettime_relative();
4621 
4622 #if HAVE_THREADS
4623  if ((ret = init_input_threads()) < 0)
4624  goto fail;
4625 #endif
4626 
4627  while (!received_sigterm) {
4628  int64_t cur_time= av_gettime_relative();
4629 
4630  /* if 'q' pressed, exits */
4631  if (stdin_interaction)
4632  if (check_keyboard_interaction(cur_time) < 0)
4633  break;
4634 
4635  /* check if there's any stream where output is still needed */
4636  if (!need_output()) {
4637  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4638  break;
4639  }
4640 
4641  ret = transcode_step();
4642  if (ret < 0 && ret != AVERROR_EOF) {
4643  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4644  break;
4645  }
4646 
4647  /* dump report by using the output first video and audio streams */
4648  print_report(0, timer_start, cur_time);
4649  }
4650 #if HAVE_THREADS
4651  free_input_threads();
4652 #endif
4653 
4654  /* at the end of stream, we must flush the decoder buffers */
4655  for (i = 0; i < nb_input_streams; i++) {
4656  ist = input_streams[i];
4657  if (!input_files[ist->file_index]->eof_reached) {
4658  process_input_packet(ist, NULL, 0);
4659  }
4660  }
4661  flush_encoders();
4662 
4663  term_exit();
4664 
4665  /* write the trailer if needed and close file */
4666  for (i = 0; i < nb_output_files; i++) {
4667  os = output_files[i]->ctx;
4668  if (!output_files[i]->header_written) {
4670  "Nothing was written into output file %d (%s), because "
4671  "at least one of its streams received no packets.\n",
4672  i, os->url);
4673  continue;
4674  }
4675  if ((ret = av_write_trailer(os)) < 0) {
4676  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4677  if (exit_on_error)
4678  exit_program(1);
4679  }
4680  }
4681 
4682  /* dump report by using the first video and audio streams */
4683  print_report(1, timer_start, av_gettime_relative());
4684 
4685  /* close each encoder */
4686  for (i = 0; i < nb_output_streams; i++) {
4687  ost = output_streams[i];
4688  if (ost->encoding_needed) {
4689  av_freep(&ost->enc_ctx->stats_in);
4690  }
4691  total_packets_written += ost->packets_written;
4692  }
4693 
4694  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4695  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4696  exit_program(1);
4697  }
4698 
4699  /* close each decoder */
4700  for (i = 0; i < nb_input_streams; i++) {
4701  ist = input_streams[i];
4702  if (ist->decoding_needed) {
4703  avcodec_close(ist->dec_ctx);
4704  if (ist->hwaccel_uninit)
4705  ist->hwaccel_uninit(ist->dec_ctx);
4706  }
4707  }
4708 
4711 
4712  /* finished ! */
4713  ret = 0;
4714 
4715  fail:
4716 #if HAVE_THREADS
4717  free_input_threads();
4718 #endif
4719 
4720  if (output_streams) {
4721  for (i = 0; i < nb_output_streams; i++) {
4722  ost = output_streams[i];
4723  if (ost) {
4724  if (ost->logfile) {
4725  if (fclose(ost->logfile))
4727  "Error closing logfile, loss of information possible: %s\n",
4728  av_err2str(AVERROR(errno)));
4729  ost->logfile = NULL;
4730  }
4731  av_freep(&ost->forced_kf_pts);
4732  av_freep(&ost->apad);
4733  av_freep(&ost->disposition);
4734  av_dict_free(&ost->encoder_opts);
4735  av_dict_free(&ost->sws_dict);
4736  av_dict_free(&ost->swr_opts);
4737  av_dict_free(&ost->resample_opts);
4738  }
4739  }
4740  }
4741  return ret;
4742 }
4743 
4744 
4745 static int64_t getutime(void)
4746 {
4747 #if HAVE_GETRUSAGE
4748  struct rusage rusage;
4749 
4750  getrusage(RUSAGE_SELF, &rusage);
4751  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4752 #elif HAVE_GETPROCESSTIMES
4753  HANDLE proc;
4754  FILETIME c, e, k, u;
4755  proc = GetCurrentProcess();
4756  GetProcessTimes(proc, &c, &e, &k, &u);
4757  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4758 #else
4759  return av_gettime_relative();
4760 #endif
4761 }
4762 
4763 static int64_t getmaxrss(void)
4764 {
4765 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4766  struct rusage rusage;
4767  getrusage(RUSAGE_SELF, &rusage);
4768  return (int64_t)rusage.ru_maxrss * 1024;
4769 #elif HAVE_GETPROCESSMEMORYINFO
4770  HANDLE proc;
4771  PROCESS_MEMORY_COUNTERS memcounters;
4772  proc = GetCurrentProcess();
4773  memcounters.cb = sizeof(memcounters);
4774  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4775  return memcounters.PeakPagefileUsage;
4776 #else
4777  return 0;
4778 #endif
4779 }
4780 
4781 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4782 {
4783 }
4784 
4785 int main(int argc, char **argv)
4786 {
4787  int i, ret;
4788  int64_t ti;
4789 
4790  init_dynload();
4791 
4793 
4794  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4795 
4797  parse_loglevel(argc, argv, options);
4798 
4799  if(argc>1 && !strcmp(argv[1], "-d")){
4800  run_as_daemon=1;
4802  argc--;
4803  argv++;
4804  }
4805 
4806 #if CONFIG_AVDEVICE
4808 #endif
4810 
4811  show_banner(argc, argv, options);
4812 
4813  /* parse options and open all input/output files */
4814  ret = ffmpeg_parse_options(argc, argv);
4815  if (ret < 0)
4816  exit_program(1);
4817 
4818  if (nb_output_files <= 0 && nb_input_files == 0) {
4819  show_usage();
4820  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4821  exit_program(1);
4822  }
4823 
4824  /* file converter / grab */
4825  if (nb_output_files <= 0) {
4826  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4827  exit_program(1);
4828  }
4829 
4830 // if (nb_input_files == 0) {
4831 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4832 // exit_program(1);
4833 // }
4834 
4835  for (i = 0; i < nb_output_files; i++) {
4836  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4837  want_sdp = 0;
4838  }
4839 
4840  current_time = ti = getutime();
4841  if (transcode() < 0)
4842  exit_program(1);
4843  ti = getutime() - ti;
4844  if (do_benchmark) {
4845  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4846  }
4847  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4850  exit_program(69);
4851 
4853  return main_return_code;
4854 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1569
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:316
int nb_bitstream_filters
Definition: ffmpeg.h:462
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:895
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1769
AVRational enc_timebase
Definition: ffmpeg.h:460
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:679
int got_output
Definition: ffmpeg.h:341
#define AV_DISPOSITION_METADATA
Definition: avformat.h:856
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1982
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1075
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2069
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:448
const struct AVCodec * codec
Definition: avcodec.h:1527
Definition: ffmpeg.h:425
AVRational framerate
Definition: avcodec.h:3040
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:3965
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:836
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:960
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:557
void term_init(void)
Definition: ffmpeg.c:381
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5721
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:263
int nb_outputs
Definition: ffmpeg.h:292
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
AVDictionary * swr_opts
Definition: ffmpeg.h:508
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:302
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2363
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3038
void term_exit(void)
Definition: ffmpeg.c:322
int stream_copy
Definition: ffmpeg.h:513
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1185
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3826
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1618
#define atomic_store(object, desired)
Definition: stdatomic.h:85
AVOption.
Definition: opt.h:246
AVRational frame_rate
Definition: ffmpeg.h:477
int64_t * forced_kf_pts
Definition: ffmpeg.h:487
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:298
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2693
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:376
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:503
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:874
static int process_input(int file_index)
Definition: ffmpeg.c:4210
int exit_on_error
Definition: ffmpeg_opt.c:103
int64_t cfr_next_pts
Definition: ffmpeg.h:326
const char * fmt
Definition: avisynth_c.h:769
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3426
static atomic_int transcode_init_done
Definition: ffmpeg.c:330
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1568
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1450
#define AV_DICT_DONT_OVERWRITE
Don&#39;t overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:128
Memory buffer source API.
const char * desc
Definition: nvenc.c:65
void av_log_set_level(int level)
Set the log level.
Definition: log.c:385
AVRational framerate
Definition: ffmpeg.h:333
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3960
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:420
int height
Definition: ffmpeg.h:247
int64_t max_pts
Definition: ffmpeg.h:322
int decoding_needed
Definition: ffmpeg.h:300
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3884
void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:234
const struct AVBitStreamFilter * filter
The bitstream filter this context is an instance of.
Definition: avcodec.h:5696
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:935
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5687
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1636
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:481
int index
stream index in AVFormatContext
Definition: avformat.h:874
int size
Definition: avcodec.h:1431
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4763
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int max_muxing_queue_size
Definition: ffmpeg.h:541
const char * b
Definition: vf_curves.c:113
static int nb_frames_dup
Definition: ffmpeg.c:129
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2943
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:269
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:655
#define AV_DISPOSITION_DUB
Definition: avformat.h:820
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1896
int eagain
Definition: ffmpeg.h:396
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1727
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3274
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:648
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:832
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:371
int quality
Definition: ffmpeg.h:539
unsigned num_rects
Definition: avcodec.h:3864
AVFrame * filter_frame
Definition: ffmpeg.h:307
static int transcode_init(void)
Definition: ffmpeg.c:3619
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2950
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2556
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2540
int do_benchmark_all
Definition: ffmpeg_opt.c:96
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:834
const char * key
int last_dropped
Definition: ffmpeg.h:471
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
discard all
Definition: avcodec.h:794
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:975
int64_t input_ts_offset
Definition: ffmpeg.h:402
int do_hex_dump
Definition: ffmpeg_opt.c:97
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2741
int nb_input_streams
Definition: ffmpeg.c:142
const char * name
Definition: ffmpeg.h:68
intptr_t atomic_int
Definition: stdatomic.h:55
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:998
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3604
#define src
Definition: vp8dsp.c:254
uint64_t packets_written
Definition: ffmpeg.h:533
AVCodec.
Definition: avcodec.h:3408
#define VSYNC_VFR
Definition: ffmpeg.h:52
int nb_dts_buffer
Definition: ffmpeg.h:388
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:479
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3876
int print_stats
Definition: ffmpeg_opt.c:105
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:134
float dts_error_threshold
Definition: ffmpeg_opt.c:88
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:283
uint64_t data_size
Definition: ffmpeg.h:531
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:470
AVBSFContext ** bsf_ctx
Definition: ffmpeg.h:463
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:824
struct FilterGraph * graph
Definition: ffmpeg.h:238
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1640
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2240
Undefined.
Definition: avutil.h:273
AVSubtitleRect ** rects
Definition: avcodec.h:3865
int encoding_needed
Definition: ffmpeg.h:447
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:653
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4781
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:556
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3859
Format I/O context.
Definition: avformat.h:1342
uint64_t samples_decoded
Definition: ffmpeg.h:385
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:237
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2330
#define AV_RL64
Definition: intreadwrite.h:173
unsigned int nb_stream_indexes
Definition: avformat.h:1265
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:70
int64_t cur_dts
Definition: avformat.h:1076
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3828
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:984
uint64_t frames_decoded
Definition: ffmpeg.h:384
int header_written
Definition: ffmpeg.h:563
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:286
Immediately push the frame to the output.
Definition: buffersrc.h:46
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
char * logfile_prefix
Definition: ffmpeg.h:498
static uint8_t * subtitle_out
Definition: ffmpeg.c:139
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:201
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:114
static int main_return_code
Definition: ffmpeg.c:332
static int64_t start_time
Definition: ffplay.c:327
int copy_initial_nonkeyframes
Definition: ffmpeg.h:523
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:131
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:3002
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2181
int64_t * dts_buffer
Definition: ffmpeg.h:387
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:526
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
Opaque data information usually continuous.
Definition: avutil.h:203
AVDictionary * sws_dict
Definition: ffmpeg.h:507
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int width
Video only.
Definition: avcodec.h:3950
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:216
AVOptions.
int subtitle_header_size
Definition: avcodec.h:2986
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:661
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5694
int stdin_interaction
Definition: ffmpeg_opt.c:107
FILE * logfile
Definition: ffmpeg.h:499
AVDictionary * opts
Definition: ffmpeg.h:555
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:620
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1448
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1211
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
#define ECHO(name, type, min, max)
Definition: af_aecho.c:186
#define FF_API_DEBUG_MV
Definition: version.h:58
static int need_output(void)
Definition: ffmpeg.c:3792
Keep a reference to the frame.
Definition: buffersrc.h:53
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:441
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3386
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:979
static double psnr(double d)
Definition: ffmpeg.c:1344
int do_benchmark
Definition: ffmpeg_opt.c:95
int audio_sync_method
Definition: ffmpeg_opt.c:91
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:344
int shortest
Definition: ffmpeg.h:561
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1410
int64_t duration
Definition: movenc.c:63
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2078
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
static int64_t getutime(void)
Definition: ffmpeg.c:4745
static AVFrame * frame
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1833
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:113
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
const char * name
Definition: avcodec.h:5737
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static void finish(void)
Definition: movenc.c:345
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:857
int nb_streams
Definition: ffmpeg.h:409
uint8_t * data
Definition: avcodec.h:1430
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
enum AVMediaType type
Definition: ffmpeg.h:240
static void set_tty_echo(int on)
Definition: ffmpeg.c:3847
AVDictionary * resample_opts
Definition: ffmpeg.h:509
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3187
static int flags
Definition: log.c:55
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:260
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4145
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:4931
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
int * formats
Definition: ffmpeg.h:277
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:144
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:419
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1413
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:331
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:841
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1349
uint8_t * data
Definition: avcodec.h:1374
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:365
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:506
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3829
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCodec * dec
Definition: ffmpeg.h:305
AVBufferRef * av_buffersink_get_hw_frames_ctx(const AVFilterContext *ctx)
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1263
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2532
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:218
int top_field_first
Definition: ffmpeg.h:334
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1490
int nb_output_streams
Definition: ffmpeg.c:147
int file_index
Definition: ffmpeg.h:296
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:198
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2155
unsigned int * stream_index
Definition: avformat.h:1264
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
int wrap_correction_done
Definition: ffmpeg.h:317
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:319
uint64_t channel_layout
Audio only.
Definition: avcodec.h:3986
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:268
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:854
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1361
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:601
int64_t next_dts
Definition: ffmpeg.h:312
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1462
attribute_deprecated int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:226
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:561
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:349
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1061
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3054
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2463
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5756
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3422
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:373
AVRational sample_aspect_ratio
Definition: ffmpeg.h:248
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3481
int rate_emu
Definition: ffmpeg.h:412
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2003
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1807
int sample_rate
Definition: ffmpeg.h:250
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1394
static void reset_eagain(void)
Definition: ffmpeg.c:4116
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:43
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:374
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:694
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3216
FilterGraph ** filtergraphs
Definition: ffmpeg.c:151
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:475
AVFilterContext * filter
Definition: ffmpeg.h:236
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:471
#define atomic_load(object)
Definition: stdatomic.h:93
int64_t start
Definition: ffmpeg.h:309
int loop
Definition: ffmpeg.h:398
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3827
uint64_t nb_packets
Definition: ffmpeg.h:382
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:457
int video_sync_method
Definition: ffmpeg_opt.c:92
int format
Definition: ffmpeg.h:245
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:132
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
char * sdp_filename
Definition: ffmpeg_opt.c:84
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:472
Display matrix.
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3391
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
char * url
input or output URL.
Definition: avformat.h:1438
int video_delay
Video only.
Definition: avcodec.h:3979
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:709
const char * r
Definition: vf_curves.c:111
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
int capabilities
Codec capabilities.
Definition: avcodec.h:3427
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:149
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:263
unsigned int nb_programs
Definition: avformat.h:1519
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:552
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3880
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1413
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1598
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:558
AVChapter ** chapters
Definition: avformat.h:1570
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5727
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1376
int av_log_get_level(void)
Get the current log level.
Definition: log.c:380
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
const char * name
Name of the codec implementation.
Definition: avcodec.h:3415
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:887
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:630
int eof
Definition: ffmpeg.h:256
int force_fps
Definition: ffmpeg.h:479
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:414
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:946
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1260
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:1992
int qp_hist
Definition: ffmpeg_opt.c:106
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:116
float frame_drop_threshold
Definition: ffmpeg_opt.c:93
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1031
int64_t error[4]
Definition: ffmpeg.h:550
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1436
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3135
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2224
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
uint32_t end_display_time
Definition: avcodec.h:3863
static int want_sdp
Definition: ffmpeg.c:134
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3866
OutputFilter * filter
Definition: ffmpeg.h:501
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2089
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:396
AVRational frame_aspect_ratio
Definition: ffmpeg.h:484
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:823
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2190
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1595
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:831
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
AVRational mux_timebase
Definition: ffmpeg.h:459
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1398
AVDictionary * opts
Definition: movenc.c:50
int block_align
Audio only.
Definition: avcodec.h:4001
static int nb_frames_drop
Definition: ffmpeg.c:131
A bitmap, pict will be set.
Definition: avcodec.h:3808
int linesize[4]
Definition: avcodec.h:3844
int nb_output_files
Definition: ffmpeg.c:149
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:238
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:478
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:301
static int transcode(void)
Definition: ffmpeg.c:4603
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:886
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:456
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:508
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:3172
uint64_t * channel_layouts
Definition: ffmpeg.h:278
#define VSYNC_AUTO
Definition: ffmpeg.h:49
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:215
int saw_first_ts
Definition: ffmpeg.h:331
int abort_on_flags
Definition: ffmpeg_opt.c:104
This side data contains quality related information from the encoder.
Definition: avcodec.h:1235
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:261
int width
picture width / height.
Definition: avcodec.h:1690
PVOID HANDLE
uint8_t w
Definition: llviddspenc.c:38
char * apad
Definition: ffmpeg.h:510
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3197
int64_t nb_samples
Definition: ffmpeg.h:328
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5733
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:298
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:492
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:175
int64_t duration
Definition: ffmpeg.h:399
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:444
const char * name
Definition: avformat.h:507
int width
Definition: ffmpeg.h:247
int32_t
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:865
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:849
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2266
int nb_filtergraphs
Definition: ffmpeg.c:152
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:90
int64_t last_ts
Definition: ffmpeg.h:405
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:341
int do_pkt_dump
Definition: ffmpeg_opt.c:98
int64_t max_frames
Definition: ffmpeg.h:468
#define AV_RL32
Definition: intreadwrite.h:146
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:314
int audio_channels_mapped
Definition: ffmpeg.h:496
int n
Definition: avisynth_c.h:684
AVDictionary * metadata
Definition: avformat.h:937
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5465
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1649
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:691
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3843
static int got_eagain(void)
Definition: ffmpeg.c:4107
int inputs_done
Definition: ffmpeg.h:520
static void error(const char *err)
int vstats_version
Definition: ffmpeg_opt.c:112
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:538
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:842
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1127
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:136
AVCodecContext * enc
Definition: muxing.c:55
#define av_log2
Definition: intmath.h:83
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:833
int ret
Definition: ffmpeg.h:342
int audio_volume
Definition: ffmpeg_opt.c:90
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:873
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1946
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:469
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
InputFilter ** filters
Definition: ffmpeg.h:358
int fix_sub_duration
Definition: ffmpeg.h:339
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:642
#define VSYNC_DROP
Definition: ffmpeg.h:54
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2106
int64_t recording_time
Definition: ffmpeg.h:408
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4943
Definition: ffmpeg.h:67
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2193
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:76
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:69
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2956
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:819
AVStream * st
Definition: ffmpeg.h:297
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:170
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3146
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
int frame_size
Definition: mxfenc.c:1947
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:365
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:855
int ost_index
Definition: ffmpeg.h:556
struct InputStream * sync_ist
Definition: ffmpeg.h:451
#define AV_BPRINT_SIZE_AUTOMATIC
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1629
enum AVMediaType codec_type
Definition: avcodec.h:1526
double ts_scale
Definition: ffmpeg.h:330
int unavailable
Definition: ffmpeg.h:512
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:498
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2878
enum AVCodecID codec_id
Definition: avcodec.h:1528
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1590
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:185
float max_error_rate
Definition: ffmpeg_opt.c:109
int sample_rate
samples per second
Definition: avcodec.h:2173
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
uint64_t frames_encoded
Definition: ffmpeg.h:535
AVIOContext * pb
I/O context.
Definition: avformat.h:1384
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AVFifoBuffer * muxing_queue
Definition: ffmpeg.h:544
int ist_index
Definition: ffmpeg.h:397
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:532
static int loop
Definition: ffplay.c:336
int debug
debug
Definition: avcodec.h:2598
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static void print_sdp(void)
Definition: ffmpeg.c:2737
const char * graph_desc
Definition: ffmpeg.h:284
int guess_layout_max
Definition: ffmpeg.h:335
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:406
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1747
main external API structure.
Definition: avcodec.h:1518
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:592
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:342
uint8_t * data
The data buffer.
Definition: buffer.h:89
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:477
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:863
int * sample_rates
Definition: ffmpeg.h:279
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1042
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:306
const char * attachment_filename
Definition: ffmpeg.h:522
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1965
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:390
AVRational time_base
Definition: ffmpeg.h:401
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:639
AVCodecContext * enc_ctx
Definition: ffmpeg.h:465
void * buf
Definition: avisynth_c.h:690
AVFrame * decoded_frame
Definition: ffmpeg.h:306
int extradata_size
Definition: avcodec.h:1619
Perform non-blocking operation.
Definition: threadmessage.h:31
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:254
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
Replacements for frequently missing libm functions.
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4482
int nb_coded_side_data
Definition: avcodec.h:3173
int channels
Definition: ffmpeg.h:251
int * audio_channels_map
Definition: ffmpeg.h:495
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:391
int configure_filtergraph(FilterGraph *fg)
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2328
OutputStream ** output_streams
Definition: ffmpeg.c:146
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:88
int index
Definition: gxfenc.c:89
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2610
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:443
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:433
double rotate_override_value
Definition: ffmpeg.h:482
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2021
static int current_time
Definition: ffmpeg.c:136
int64_t sync_opts
Definition: ffmpeg.h:452
char * vstats_filename
Definition: ffmpeg_opt.c:83
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:165
AVCodecContext * dec_ctx
Definition: ffmpeg.h:304
char * disposition
Definition: ffmpeg.h:525
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
cl_device_type type
int filtergraph_is_simple(FilterGraph *fg)
struct InputStream::@24 prev_sub
#define mid_pred
Definition: mathops.h:97
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:789
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1178
int av_buffersink_get_w(const AVFilterContext *ctx)
int nb_streams_warn
Definition: ffmpeg.h:411
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3250
AVDictionary * decoder_opts
Definition: ffmpeg.h:332
int autorotate
Definition: ffmpeg.h:337
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:707
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:538
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:483
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:108
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4384
int64_t ts_offset
Definition: ffmpeg.h:404
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:293
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4528
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:504
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:514
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1700
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1767
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4126
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:699
AVFrame * filtered_frame
Definition: ffmpeg.h:469
int source_index
Definition: ffmpeg.h:445
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:322
static volatile int received_nb_signals
Definition: ffmpeg.c:329
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:372
int copy_prior_start
Definition: ffmpeg.h:524
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:551
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1584
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:670
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:359
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2782
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1447
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
uint8_t level
Definition: svq3.c:207
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:491
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:313
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2505
int forced_kf_count
Definition: ffmpeg.h:488
int64_t start
Definition: avformat.h:1302
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:922
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
OSTFinished finished
Definition: ffmpeg.h:511
char * forced_keyframes
Definition: ffmpeg.h:490
int sample_rate
Audio only.
Definition: avcodec.h:3994
uint64_t data_size
Definition: ffmpeg.h:380
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:67
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:315
static AVStream * ost
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1081
struct FilterGraph * graph
Definition: ffmpeg.h:262
uint64_t limit_filesize
Definition: ffmpeg.h:559
const OptionDef options[]
Definition: ffmpeg_opt.c:3292
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1519
AVIOContext * progress_avio
Definition: ffmpeg.c:137
int main(int argc, char **argv)
Definition: ffmpeg.c:4785
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:465
AVCodecParameters * ref_par
Definition: ffmpeg.h:466
#define VSYNC_CFR
Definition: ffmpeg.h:51
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1023
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:912
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:474
AVStream * st
Definition: muxing.c:54
static AVCodecContext * dec_ctx
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:926
uint32_t start_display_time
Definition: avcodec.h:3862
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1065
uint64_t samples_encoded
Definition: ffmpeg.h:536
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1301
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:2798
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:924
char * key
Definition: dict.h:86
uint32_t BOOL
static FILE * vstats_file
Definition: ffmpeg.c:112
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:90
AVFrame * last_frame
Definition: ffmpeg.h:470
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:152
uint64_t channel_layout
Definition: ffmpeg.h:252
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1463
int copy_ts
Definition: ffmpeg_opt.c:99
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1043
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1354
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4422
AVFormatContext * ctx
Definition: ffmpeg.h:394
int pict_type
Definition: ffmpeg.h:547
AVSubtitle subtitle
Definition: ffmpeg.h:343
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:853
int eof_reached
Definition: ffmpeg.h:395
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
int forced_kf_index
Definition: ffmpeg.h:489
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:472
char * avfilter
Definition: ffmpeg.h:502
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:472
uint8_t * name
Definition: ffmpeg.h:239
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:370
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
float dts_delta_threshold
Definition: ffmpeg_opt.c:87
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
int channels
number of audio channels
Definition: avcodec.h:2174
int top_field_first
Definition: ffmpeg.h:480
int av_buffersink_get_channels(const AVFilterContext *ctx)
OutputFilter ** outputs
Definition: ffmpeg.h:291
InputFile ** input_files
Definition: ffmpeg.c:143
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2868
void av_log_set_flags(int arg)
Definition: log.c:390
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:296
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2117
AVFormatContext * ctx
Definition: ffmpeg.h:554
#define lrint
Definition: tablegen.h:53
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:822
void show_usage(void)
Definition: ffmpeg_opt.c:3166
int channels
Audio only.
Definition: avcodec.h:3990
An instance of a filter.
Definition: avfilter.h:338
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:900
#define LIBAVCODEC_IDENT
Definition: version.h:42
char * hwaccel_device
Definition: ffmpeg.h:366
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1429
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * encoder_opts
Definition: ffmpeg.h:506
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1247
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:5050
int height
Definition: frame.h:276
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:289
#define av_freep(p)
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:375
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:647
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2217
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2540
OutputFile ** output_files
Definition: ffmpeg.c:148
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1020
#define av_malloc_array(a, b)
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3398
static void flush_encoders(void)
Definition: ffmpeg.c:1846
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: avcodec.h:3888
int copy_tb
Definition: ffmpeg_opt.c:101
int64_t min_pts
Definition: ffmpeg.h:321
int initialized
Definition: ffmpeg.h:518
static volatile int received_sigterm
Definition: ffmpeg.c:328
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:298
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4087
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
int stream_index
Definition: avcodec.h:1432
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:902
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:364
int depth
Number of bits in the component.
Definition: pixdesc.h:58
enum AVSubtitleType type
Definition: avcodec.h:3846
int64_t first_pts
Definition: ffmpeg.h:455
int nb_inputs
Definition: ffmpeg.h:290
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:928
#define DECODING_FOR_OST
Definition: ffmpeg.h:301
int index
Definition: ffmpeg.h:444
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:997
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3354
OSTFinished
Definition: ffmpeg.h:437
This structure stores compressed data.
Definition: avcodec.h:1407
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1215
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5715
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:345
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:242
int debug_ts
Definition: ffmpeg_opt.c:102
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3822
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
static void sigterm_handler(int sig)
Definition: ffmpeg.c:335
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1423
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:141
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1560
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1520
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:821
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
InputStream ** input_streams
Definition: ffmpeg.c:141
static unsigned dup_warning
Definition: ffmpeg.c:130
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:68
Definition: ffmpeg.h:429
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:814
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:2985
static uint8_t tmp[11]
Definition: aes_ctr.c:26