FFmpeg  4.3
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 
348 static void
350 {
351  int ret;
352  received_sigterm = sig;
355  if(received_nb_signals > 3) {
356  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357  strlen("Received > 3 system signals, hard exiting\n"));
358  if (ret < 0) { /* Do nothing */ };
359  exit(123);
360  }
361 }
362 
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367 
368  switch (fdwCtrlType)
369  {
370  case CTRL_C_EVENT:
371  case CTRL_BREAK_EVENT:
372  sigterm_handler(SIGINT);
373  return TRUE;
374 
375  case CTRL_CLOSE_EVENT:
376  case CTRL_LOGOFF_EVENT:
377  case CTRL_SHUTDOWN_EVENT:
378  sigterm_handler(SIGTERM);
379  /* Basically, with these 3 events, when we return from this method the
380  process is hard terminated, so stall as long as we need to
381  to try and let the main thread(s) clean up and gracefully terminate
382  (we have at most 5 seconds, but should be done far before that). */
383  while (!ffmpeg_exited) {
384  Sleep(0);
385  }
386  return TRUE;
387 
388  default:
389  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390  return FALSE;
391  }
392 }
393 #endif
394 
395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
399  struct termios tty;
400  if (tcgetattr (0, &tty) == 0) {
401  oldtty = tty;
402  restore_tty = 1;
403 
404  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405  |INLCR|IGNCR|ICRNL|IXON);
406  tty.c_oflag |= OPOST;
407  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408  tty.c_cflag &= ~(CSIZE|PARENB);
409  tty.c_cflag |= CS8;
410  tty.c_cc[VMIN] = 1;
411  tty.c_cc[VTIME] = 0;
412 
413  tcsetattr (0, TCSANOW, &tty);
414  }
415  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416  }
417 #endif
418 
419  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422  signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431 
432 /* read a key without blocking */
433 static int read_key(void)
434 {
435  unsigned char ch;
436 #if HAVE_TERMIOS_H
437  int n = 1;
438  struct timeval tv;
439  fd_set rfds;
440 
441  FD_ZERO(&rfds);
442  FD_SET(0, &rfds);
443  tv.tv_sec = 0;
444  tv.tv_usec = 0;
445  n = select(1, &rfds, NULL, NULL, &tv);
446  if (n > 0) {
447  n = read(0, &ch, 1);
448  if (n == 1)
449  return ch;
450 
451  return n;
452  }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455  static int is_pipe;
456  static HANDLE input_handle;
457  DWORD dw, nchars;
458  if(!input_handle){
459  input_handle = GetStdHandle(STD_INPUT_HANDLE);
460  is_pipe = !GetConsoleMode(input_handle, &dw);
461  }
462 
463  if (is_pipe) {
464  /* When running under a GUI, you will end here. */
465  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466  // input pipe may have been closed by the program that ran ffmpeg
467  return -1;
468  }
469  //Read it
470  if(nchars != 0) {
471  read(0, &ch, 1);
472  return ch;
473  }else{
474  return -1;
475  }
476  }
477 # endif
478  if(kbhit())
479  return(getch());
480 #endif
481  return -1;
482 }
483 
484 static int decode_interrupt_cb(void *ctx)
485 {
487 }
488 
490 
491 static void ffmpeg_cleanup(int ret)
492 {
493  int i, j;
494 
495  if (do_benchmark) {
496  int maxrss = getmaxrss() / 1024;
497  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
498  }
499 
500  for (i = 0; i < nb_filtergraphs; i++) {
501  FilterGraph *fg = filtergraphs[i];
503  for (j = 0; j < fg->nb_inputs; j++) {
504  InputFilter *ifilter = fg->inputs[j];
505  struct InputStream *ist = ifilter->ist;
506 
507  while (av_fifo_size(ifilter->frame_queue)) {
508  AVFrame *frame;
510  sizeof(frame), NULL);
512  }
513  av_fifo_freep(&ifilter->frame_queue);
514  if (ist->sub2video.sub_queue) {
515  while (av_fifo_size(ist->sub2video.sub_queue)) {
516  AVSubtitle sub;
518  &sub, sizeof(sub), NULL);
519  avsubtitle_free(&sub);
520  }
522  }
523  av_buffer_unref(&ifilter->hw_frames_ctx);
524  av_freep(&ifilter->name);
525  av_freep(&fg->inputs[j]);
526  }
527  av_freep(&fg->inputs);
528  for (j = 0; j < fg->nb_outputs; j++) {
529  OutputFilter *ofilter = fg->outputs[j];
530 
531  av_freep(&ofilter->name);
532  av_freep(&ofilter->formats);
533  av_freep(&ofilter->channel_layouts);
534  av_freep(&ofilter->sample_rates);
535  av_freep(&fg->outputs[j]);
536  }
537  av_freep(&fg->outputs);
538  av_freep(&fg->graph_desc);
539 
541  }
543 
545 
546  /* close files */
547  for (i = 0; i < nb_output_files; i++) {
548  OutputFile *of = output_files[i];
550  if (!of)
551  continue;
552  s = of->ctx;
553  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
554  avio_closep(&s->pb);
556  av_dict_free(&of->opts);
557 
559  }
560  for (i = 0; i < nb_output_streams; i++) {
562 
563  if (!ost)
564  continue;
565 
566  av_bsf_free(&ost->bsf_ctx);
567 
568  av_frame_free(&ost->filtered_frame);
569  av_frame_free(&ost->last_frame);
570  av_dict_free(&ost->encoder_opts);
571 
572  av_freep(&ost->forced_keyframes);
573  av_expr_free(ost->forced_keyframes_pexpr);
574  av_freep(&ost->avfilter);
575  av_freep(&ost->logfile_prefix);
576 
577  av_freep(&ost->audio_channels_map);
578  ost->audio_channels_mapped = 0;
579 
580  av_dict_free(&ost->sws_dict);
581  av_dict_free(&ost->swr_opts);
582 
583  avcodec_free_context(&ost->enc_ctx);
584  avcodec_parameters_free(&ost->ref_par);
585 
586  if (ost->muxing_queue) {
587  while (av_fifo_size(ost->muxing_queue)) {
588  AVPacket pkt;
589  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
591  }
592  av_fifo_freep(&ost->muxing_queue);
593  }
594 
596  }
597 #if HAVE_THREADS
598  free_input_threads();
599 #endif
600  for (i = 0; i < nb_input_files; i++) {
603  }
604  for (i = 0; i < nb_input_streams; i++) {
605  InputStream *ist = input_streams[i];
606 
609  av_dict_free(&ist->decoder_opts);
612  av_freep(&ist->filters);
613  av_freep(&ist->hwaccel_device);
614  av_freep(&ist->dts_buffer);
615 
617 
619  }
620 
621  if (vstats_file) {
622  if (fclose(vstats_file))
624  "Error closing vstats file, loss of information possible: %s\n",
625  av_err2str(AVERROR(errno)));
626  }
628 
633 
634  uninit_opts();
635 
637 
638  if (received_sigterm) {
639  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
640  (int) received_sigterm);
641  } else if (ret && atomic_load(&transcode_init_done)) {
642  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
643  }
644  term_exit();
645  ffmpeg_exited = 1;
646 }
647 
649 {
650  AVDictionaryEntry *t = NULL;
651 
652  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
654  }
655 }
656 
658 {
660  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
661  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
662  exit_program(1);
663  }
664 }
665 
666 static void abort_codec_experimental(AVCodec *c, int encoder)
667 {
668  exit_program(1);
669 }
670 
671 static void update_benchmark(const char *fmt, ...)
672 {
673  if (do_benchmark_all) {
675  va_list va;
676  char buf[1024];
677 
678  if (fmt) {
679  va_start(va, fmt);
680  vsnprintf(buf, sizeof(buf), fmt, va);
681  va_end(va);
683  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
686  t.real_usec - current_time.real_usec, buf);
687  }
688  current_time = t;
689  }
690 }
691 
693 {
694  int i;
695  for (i = 0; i < nb_output_streams; i++) {
696  OutputStream *ost2 = output_streams[i];
697  ost2->finished |= ost == ost2 ? this_stream : others;
698  }
699 }
700 
701 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
702 {
703  AVFormatContext *s = of->ctx;
704  AVStream *st = ost->st;
705  int ret;
706 
707  /*
708  * Audio encoders may split the packets -- #frames in != #packets out.
709  * But there is no reordering, so we can limit the number of output packets
710  * by simply dropping them here.
711  * Counting encoded video frames needs to be done separately because of
712  * reordering, see do_video_out().
713  * Do not count the packet when unqueued because it has been counted when queued.
714  */
715  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
716  if (ost->frame_number >= ost->max_frames) {
718  return;
719  }
720  ost->frame_number++;
721  }
722 
723  if (!of->header_written) {
724  AVPacket tmp_pkt = {0};
725  /* the muxer is not initialized yet, buffer the packet */
726  if (!av_fifo_space(ost->muxing_queue)) {
727  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
728  ost->max_muxing_queue_size);
729  if (new_size <= av_fifo_size(ost->muxing_queue)) {
731  "Too many packets buffered for output stream %d:%d.\n",
732  ost->file_index, ost->st->index);
733  exit_program(1);
734  }
735  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
736  if (ret < 0)
737  exit_program(1);
738  }
740  if (ret < 0)
741  exit_program(1);
742  av_packet_move_ref(&tmp_pkt, pkt);
743  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
744  return;
745  }
746 
749  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
750 
752  int i;
754  NULL);
755  ost->quality = sd ? AV_RL32(sd) : -1;
756  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
757 
758  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
759  if (sd && i < sd[5])
760  ost->error[i] = AV_RL64(sd + 8 + 8*i);
761  else
762  ost->error[i] = -1;
763  }
764 
765  if (ost->frame_rate.num && ost->is_cfr) {
766  if (pkt->duration > 0)
767  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
768  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
769  ost->mux_timebase);
770  }
771  }
772 
773  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
774 
775  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
776  if (pkt->dts != AV_NOPTS_VALUE &&
777  pkt->pts != AV_NOPTS_VALUE &&
778  pkt->dts > pkt->pts) {
779  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
780  pkt->dts, pkt->pts,
781  ost->file_index, ost->st->index);
782  pkt->pts =
783  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
784  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
785  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
786  }
788  pkt->dts != AV_NOPTS_VALUE &&
789  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
790  ost->last_mux_dts != AV_NOPTS_VALUE) {
791  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
792  if (pkt->dts < max) {
793  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
794  if (exit_on_error)
795  loglevel = AV_LOG_ERROR;
796  av_log(s, loglevel, "Non-monotonous DTS in output stream "
797  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
798  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
799  if (exit_on_error) {
800  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
801  exit_program(1);
802  }
803  av_log(s, loglevel, "changing to %"PRId64". This may result "
804  "in incorrect timestamps in the output file.\n",
805  max);
806  if (pkt->pts >= pkt->dts)
807  pkt->pts = FFMAX(pkt->pts, max);
808  pkt->dts = max;
809  }
810  }
811  }
812  ost->last_mux_dts = pkt->dts;
813 
814  ost->data_size += pkt->size;
815  ost->packets_written++;
816 
818 
819  if (debug_ts) {
820  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
821  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
822  av_get_media_type_string(ost->enc_ctx->codec_type),
825  pkt->size
826  );
827  }
828 
830  if (ret < 0) {
831  print_error("av_interleaved_write_frame()", ret);
832  main_return_code = 1;
834  }
836 }
837 
839 {
840  OutputFile *of = output_files[ost->file_index];
841 
842  ost->finished |= ENCODER_FINISHED;
843  if (of->shortest) {
844  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
846  }
847 }
848 
849 /*
850  * Send a single packet to the output, applying any bitstream filters
851  * associated with the output stream. This may result in any number
852  * of packets actually being written, depending on what bitstream
853  * filters are applied. The supplied packet is consumed and will be
854  * blank (as if newly-allocated) when this function returns.
855  *
856  * If eof is set, instead indicate EOF to all bitstream filters and
857  * therefore flush any delayed packets to the output. A blank packet
858  * must be supplied in this case.
859  */
861  OutputStream *ost, int eof)
862 {
863  int ret = 0;
864 
865  /* apply the output bitstream filters */
866  if (ost->bsf_ctx) {
867  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
868  if (ret < 0)
869  goto finish;
870  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
871  write_packet(of, pkt, ost, 0);
872  if (ret == AVERROR(EAGAIN))
873  ret = 0;
874  } else if (!eof)
875  write_packet(of, pkt, ost, 0);
876 
877 finish:
878  if (ret < 0 && ret != AVERROR_EOF) {
879  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
880  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
881  if(exit_on_error)
882  exit_program(1);
883  }
884 }
885 
887 {
888  OutputFile *of = output_files[ost->file_index];
889 
890  if (of->recording_time != INT64_MAX &&
891  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
892  AV_TIME_BASE_Q) >= 0) {
894  return 0;
895  }
896  return 1;
897 }
898 
900  AVFrame *frame)
901 {
902  AVCodecContext *enc = ost->enc_ctx;
903  AVPacket pkt;
904  int ret;
905 
907  pkt.data = NULL;
908  pkt.size = 0;
909 
911  return;
912 
913  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
914  frame->pts = ost->sync_opts;
915  ost->sync_opts = frame->pts + frame->nb_samples;
916  ost->samples_encoded += frame->nb_samples;
917  ost->frames_encoded++;
918 
919  av_assert0(pkt.size || !pkt.data);
921  if (debug_ts) {
922  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
923  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
924  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
925  enc->time_base.num, enc->time_base.den);
926  }
927 
928  ret = avcodec_send_frame(enc, frame);
929  if (ret < 0)
930  goto error;
931 
932  while (1) {
933  ret = avcodec_receive_packet(enc, &pkt);
934  if (ret == AVERROR(EAGAIN))
935  break;
936  if (ret < 0)
937  goto error;
938 
939  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
940 
941  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
942 
943  if (debug_ts) {
944  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
945  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
948  }
949 
950  output_packet(of, &pkt, ost, 0);
951  }
952 
953  return;
954 error:
955  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
956  exit_program(1);
957 }
958 
959 static void do_subtitle_out(OutputFile *of,
960  OutputStream *ost,
961  AVSubtitle *sub)
962 {
963  int subtitle_out_max_size = 1024 * 1024;
964  int subtitle_out_size, nb, i;
965  AVCodecContext *enc;
966  AVPacket pkt;
967  int64_t pts;
968 
969  if (sub->pts == AV_NOPTS_VALUE) {
970  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
971  if (exit_on_error)
972  exit_program(1);
973  return;
974  }
975 
976  enc = ost->enc_ctx;
977 
978  if (!subtitle_out) {
979  subtitle_out = av_malloc(subtitle_out_max_size);
980  if (!subtitle_out) {
981  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
982  exit_program(1);
983  }
984  }
985 
986  /* Note: DVB subtitle need one packet to draw them and one other
987  packet to clear them */
988  /* XXX: signal it in the codec context ? */
990  nb = 2;
991  else
992  nb = 1;
993 
994  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
995  pts = sub->pts;
996  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
997  pts -= output_files[ost->file_index]->start_time;
998  for (i = 0; i < nb; i++) {
999  unsigned save_num_rects = sub->num_rects;
1000 
1001  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1002  if (!check_recording_time(ost))
1003  return;
1004 
1005  sub->pts = pts;
1006  // start_display_time is required to be 0
1007  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1008  sub->end_display_time -= sub->start_display_time;
1009  sub->start_display_time = 0;
1010  if (i == 1)
1011  sub->num_rects = 0;
1012 
1013  ost->frames_encoded++;
1014 
1015  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1016  subtitle_out_max_size, sub);
1017  if (i == 1)
1018  sub->num_rects = save_num_rects;
1019  if (subtitle_out_size < 0) {
1020  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1021  exit_program(1);
1022  }
1023 
1024  av_init_packet(&pkt);
1025  pkt.data = subtitle_out;
1026  pkt.size = subtitle_out_size;
1027  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1028  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1030  /* XXX: the pts correction is handled here. Maybe handling
1031  it in the codec would be better */
1032  if (i == 0)
1033  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1034  else
1035  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1036  }
1037  pkt.dts = pkt.pts;
1038  output_packet(of, &pkt, ost, 0);
1039  }
1040 }
1041 
1042 static void do_video_out(OutputFile *of,
1043  OutputStream *ost,
1044  AVFrame *next_picture,
1045  double sync_ipts)
1046 {
1047  int ret, format_video_sync;
1048  AVPacket pkt;
1049  AVCodecContext *enc = ost->enc_ctx;
1050  AVCodecParameters *mux_par = ost->st->codecpar;
1051  AVRational frame_rate;
1052  int nb_frames, nb0_frames, i;
1053  double delta, delta0;
1054  double duration = 0;
1055  int frame_size = 0;
1056  InputStream *ist = NULL;
1057  AVFilterContext *filter = ost->filter->filter;
1058 
1059  if (ost->source_index >= 0)
1060  ist = input_streams[ost->source_index];
1061 
1062  frame_rate = av_buffersink_get_frame_rate(filter);
1063  if (frame_rate.num > 0 && frame_rate.den > 0)
1064  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1065 
1066  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1067  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1068 
1069  if (!ost->filters_script &&
1070  !ost->filters &&
1071  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1072  next_picture &&
1073  ist &&
1074  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1075  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1076  }
1077 
1078  if (!next_picture) {
1079  //end, flushing
1080  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1081  ost->last_nb0_frames[1],
1082  ost->last_nb0_frames[2]);
1083  } else {
1084  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1085  delta = delta0 + duration;
1086 
1087  /* by default, we output a single frame */
1088  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1089  nb_frames = 1;
1090 
1091  format_video_sync = video_sync_method;
1092  if (format_video_sync == VSYNC_AUTO) {
1093  if(!strcmp(of->ctx->oformat->name, "avi")) {
1094  format_video_sync = VSYNC_VFR;
1095  } else
1096  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1097  if ( ist
1098  && format_video_sync == VSYNC_CFR
1099  && input_files[ist->file_index]->ctx->nb_streams == 1
1100  && input_files[ist->file_index]->input_ts_offset == 0) {
1101  format_video_sync = VSYNC_VSCFR;
1102  }
1103  if (format_video_sync == VSYNC_CFR && copy_ts) {
1104  format_video_sync = VSYNC_VSCFR;
1105  }
1106  }
1107  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1108 
1109  if (delta0 < 0 &&
1110  delta > 0 &&
1111  format_video_sync != VSYNC_PASSTHROUGH &&
1112  format_video_sync != VSYNC_DROP) {
1113  if (delta0 < -0.6) {
1114  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1115  } else
1116  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1117  sync_ipts = ost->sync_opts;
1118  duration += delta0;
1119  delta0 = 0;
1120  }
1121 
1122  switch (format_video_sync) {
1123  case VSYNC_VSCFR:
1124  if (ost->frame_number == 0 && delta0 >= 0.5) {
1125  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1126  delta = duration;
1127  delta0 = 0;
1128  ost->sync_opts = llrint(sync_ipts);
1129  }
1130  case VSYNC_CFR:
1131  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1132  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1133  nb_frames = 0;
1134  } else if (delta < -1.1)
1135  nb_frames = 0;
1136  else if (delta > 1.1) {
1137  nb_frames = lrintf(delta);
1138  if (delta0 > 1.1)
1139  nb0_frames = llrintf(delta0 - 0.6);
1140  }
1141  break;
1142  case VSYNC_VFR:
1143  if (delta <= -0.6)
1144  nb_frames = 0;
1145  else if (delta > 0.6)
1146  ost->sync_opts = llrint(sync_ipts);
1147  break;
1148  case VSYNC_DROP:
1149  case VSYNC_PASSTHROUGH:
1150  ost->sync_opts = llrint(sync_ipts);
1151  break;
1152  default:
1153  av_assert0(0);
1154  }
1155  }
1156 
1157  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1158  nb0_frames = FFMIN(nb0_frames, nb_frames);
1159 
1160  memmove(ost->last_nb0_frames + 1,
1161  ost->last_nb0_frames,
1162  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1163  ost->last_nb0_frames[0] = nb0_frames;
1164 
1165  if (nb0_frames == 0 && ost->last_dropped) {
1166  nb_frames_drop++;
1168  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1169  ost->frame_number, ost->st->index, ost->last_frame->pts);
1170  }
1171  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1172  if (nb_frames > dts_error_threshold * 30) {
1173  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1174  nb_frames_drop++;
1175  return;
1176  }
1177  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1178  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1179  if (nb_frames_dup > dup_warning) {
1180  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1181  dup_warning *= 10;
1182  }
1183  }
1184  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1185 
1186  /* duplicates frame if needed */
1187  for (i = 0; i < nb_frames; i++) {
1188  AVFrame *in_picture;
1189  int forced_keyframe = 0;
1190  double pts_time;
1191  av_init_packet(&pkt);
1192  pkt.data = NULL;
1193  pkt.size = 0;
1194 
1195  if (i < nb0_frames && ost->last_frame) {
1196  in_picture = ost->last_frame;
1197  } else
1198  in_picture = next_picture;
1199 
1200  if (!in_picture)
1201  return;
1202 
1203  in_picture->pts = ost->sync_opts;
1204 
1205  if (!check_recording_time(ost))
1206  return;
1207 
1209  ost->top_field_first >= 0)
1210  in_picture->top_field_first = !!ost->top_field_first;
1211 
1212  if (in_picture->interlaced_frame) {
1213  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1214  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1215  else
1216  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1217  } else
1218  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1219 
1220  in_picture->quality = enc->global_quality;
1221  in_picture->pict_type = 0;
1222 
1223  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1224  in_picture->pts != AV_NOPTS_VALUE)
1225  ost->forced_kf_ref_pts = in_picture->pts;
1226 
1227  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1228  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1229  if (ost->forced_kf_index < ost->forced_kf_count &&
1230  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1231  ost->forced_kf_index++;
1232  forced_keyframe = 1;
1233  } else if (ost->forced_keyframes_pexpr) {
1234  double res;
1235  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1236  res = av_expr_eval(ost->forced_keyframes_pexpr,
1237  ost->forced_keyframes_expr_const_values, NULL);
1238  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1239  ost->forced_keyframes_expr_const_values[FKF_N],
1240  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1241  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1242  ost->forced_keyframes_expr_const_values[FKF_T],
1243  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1244  res);
1245  if (res) {
1246  forced_keyframe = 1;
1247  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1248  ost->forced_keyframes_expr_const_values[FKF_N];
1249  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1250  ost->forced_keyframes_expr_const_values[FKF_T];
1251  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1252  }
1253 
1254  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1255  } else if ( ost->forced_keyframes
1256  && !strncmp(ost->forced_keyframes, "source", 6)
1257  && in_picture->key_frame==1
1258  && !i) {
1259  forced_keyframe = 1;
1260  }
1261 
1262  if (forced_keyframe) {
1263  in_picture->pict_type = AV_PICTURE_TYPE_I;
1264  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1265  }
1266 
1268  if (debug_ts) {
1269  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1270  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1271  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1272  enc->time_base.num, enc->time_base.den);
1273  }
1274 
1275  ost->frames_encoded++;
1276 
1277  ret = avcodec_send_frame(enc, in_picture);
1278  if (ret < 0)
1279  goto error;
1280  // Make sure Closed Captions will not be duplicated
1282 
1283  while (1) {
1284  ret = avcodec_receive_packet(enc, &pkt);
1285  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1286  if (ret == AVERROR(EAGAIN))
1287  break;
1288  if (ret < 0)
1289  goto error;
1290 
1291  if (debug_ts) {
1292  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1293  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1296  }
1297 
1299  pkt.pts = ost->sync_opts;
1300 
1301  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1302 
1303  if (debug_ts) {
1304  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1305  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1306  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1307  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1308  }
1309 
1310  frame_size = pkt.size;
1311  output_packet(of, &pkt, ost, 0);
1312 
1313  /* if two pass, output log */
1314  if (ost->logfile && enc->stats_out) {
1315  fprintf(ost->logfile, "%s", enc->stats_out);
1316  }
1317  }
1318  ost->sync_opts++;
1319  /*
1320  * For video, number of frames in == number of packets out.
1321  * But there may be reordering, so we can't throw away frames on encoder
1322  * flush, we need to limit them here, before they go into encoder.
1323  */
1324  ost->frame_number++;
1325 
1326  if (vstats_filename && frame_size)
1328  }
1329 
1330  if (!ost->last_frame)
1331  ost->last_frame = av_frame_alloc();
1332  av_frame_unref(ost->last_frame);
1333  if (next_picture && ost->last_frame)
1334  av_frame_ref(ost->last_frame, next_picture);
1335  else
1336  av_frame_free(&ost->last_frame);
1337 
1338  return;
1339 error:
1340  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1341  exit_program(1);
1342 }
1343 
1344 static double psnr(double d)
1345 {
1346  return -10.0 * log10(d);
1347 }
1348 
1350 {
1351  AVCodecContext *enc;
1352  int frame_number;
1353  double ti1, bitrate, avg_bitrate;
1354 
1355  /* this is executed just the first time do_video_stats is called */
1356  if (!vstats_file) {
1357  vstats_file = fopen(vstats_filename, "w");
1358  if (!vstats_file) {
1359  perror("fopen");
1360  exit_program(1);
1361  }
1362  }
1363 
1364  enc = ost->enc_ctx;
1365  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1366  frame_number = ost->st->nb_frames;
1367  if (vstats_version <= 1) {
1368  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1369  ost->quality / (float)FF_QP2LAMBDA);
1370  } else {
1371  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1372  ost->quality / (float)FF_QP2LAMBDA);
1373  }
1374 
1375  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1376  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1377 
1378  fprintf(vstats_file,"f_size= %6d ", frame_size);
1379  /* compute pts value */
1380  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1381  if (ti1 < 0.01)
1382  ti1 = 0.01;
1383 
1384  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1385  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1386  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1387  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1388  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1389  }
1390 }
1391 
1392 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1393 
1395 {
1396  OutputFile *of = output_files[ost->file_index];
1397  int i;
1398 
1399  ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1400 
1401  if (of->shortest) {
1402  for (i = 0; i < of->ctx->nb_streams; i++)
1404  }
1405 }
1406 
1407 /**
1408  * Get and encode new output from any of the filtergraphs, without causing
1409  * activity.
1410  *
1411  * @return 0 for success, <0 for severe errors
1412  */
1413 static int reap_filters(int flush)
1414 {
1415  AVFrame *filtered_frame = NULL;
1416  int i;
1417 
1418  /* Reap all buffers present in the buffer sinks */
1419  for (i = 0; i < nb_output_streams; i++) {
1421  OutputFile *of = output_files[ost->file_index];
1423  AVCodecContext *enc = ost->enc_ctx;
1424  int ret = 0;
1425 
1426  if (!ost->filter || !ost->filter->graph->graph)
1427  continue;
1428  filter = ost->filter->filter;
1429 
1430  if (!ost->initialized) {
1431  char error[1024] = "";
1432  ret = init_output_stream(ost, error, sizeof(error));
1433  if (ret < 0) {
1434  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1435  ost->file_index, ost->index, error);
1436  exit_program(1);
1437  }
1438  }
1439 
1440  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1441  return AVERROR(ENOMEM);
1442  }
1443  filtered_frame = ost->filtered_frame;
1444 
1445  while (1) {
1446  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1447  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1449  if (ret < 0) {
1450  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1452  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1453  } else if (flush && ret == AVERROR_EOF) {
1456  }
1457  break;
1458  }
1459  if (ost->finished) {
1460  av_frame_unref(filtered_frame);
1461  continue;
1462  }
1463  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1464  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1466  AVRational tb = enc->time_base;
1467  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1468 
1469  tb.den <<= extra_bits;
1470  float_pts =
1471  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1473  float_pts /= 1 << extra_bits;
1474  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1475  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1476 
1477  filtered_frame->pts =
1478  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1480  }
1481 
1482  switch (av_buffersink_get_type(filter)) {
1483  case AVMEDIA_TYPE_VIDEO:
1484  if (!ost->frame_aspect_ratio.num)
1485  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1486 
1487  if (debug_ts) {
1488  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1489  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1490  float_pts,
1491  enc->time_base.num, enc->time_base.den);
1492  }
1493 
1494  do_video_out(of, ost, filtered_frame, float_pts);
1495  break;
1496  case AVMEDIA_TYPE_AUDIO:
1497  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1498  enc->channels != filtered_frame->channels) {
1500  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1501  break;
1502  }
1503  do_audio_out(of, ost, filtered_frame);
1504  break;
1505  default:
1506  // TODO support subtitle filters
1507  av_assert0(0);
1508  }
1509 
1510  av_frame_unref(filtered_frame);
1511  }
1512  }
1513 
1514  return 0;
1515 }
1516 
1517 static void print_final_stats(int64_t total_size)
1518 {
1519  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1520  uint64_t subtitle_size = 0;
1521  uint64_t data_size = 0;
1522  float percent = -1.0;
1523  int i, j;
1524  int pass1_used = 1;
1525 
1526  for (i = 0; i < nb_output_streams; i++) {
1528  switch (ost->enc_ctx->codec_type) {
1529  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1530  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1531  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1532  default: other_size += ost->data_size; break;
1533  }
1534  extra_size += ost->enc_ctx->extradata_size;
1535  data_size += ost->data_size;
1536  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1538  pass1_used = 0;
1539  }
1540 
1541  if (data_size && total_size>0 && total_size >= data_size)
1542  percent = 100.0 * (total_size - data_size) / data_size;
1543 
1544  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1545  video_size / 1024.0,
1546  audio_size / 1024.0,
1547  subtitle_size / 1024.0,
1548  other_size / 1024.0,
1549  extra_size / 1024.0);
1550  if (percent >= 0.0)
1551  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1552  else
1553  av_log(NULL, AV_LOG_INFO, "unknown");
1554  av_log(NULL, AV_LOG_INFO, "\n");
1555 
1556  /* print verbose per-stream stats */
1557  for (i = 0; i < nb_input_files; i++) {
1558  InputFile *f = input_files[i];
1559  uint64_t total_packets = 0, total_size = 0;
1560 
1561  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1562  i, f->ctx->url);
1563 
1564  for (j = 0; j < f->nb_streams; j++) {
1565  InputStream *ist = input_streams[f->ist_index + j];
1566  enum AVMediaType type = ist->dec_ctx->codec_type;
1567 
1568  total_size += ist->data_size;
1569  total_packets += ist->nb_packets;
1570 
1571  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1572  i, j, media_type_string(type));
1573  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1574  ist->nb_packets, ist->data_size);
1575 
1576  if (ist->decoding_needed) {
1577  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1578  ist->frames_decoded);
1579  if (type == AVMEDIA_TYPE_AUDIO)
1580  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1581  av_log(NULL, AV_LOG_VERBOSE, "; ");
1582  }
1583 
1584  av_log(NULL, AV_LOG_VERBOSE, "\n");
1585  }
1586 
1587  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1588  total_packets, total_size);
1589  }
1590 
1591  for (i = 0; i < nb_output_files; i++) {
1592  OutputFile *of = output_files[i];
1593  uint64_t total_packets = 0, total_size = 0;
1594 
1595  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1596  i, of->ctx->url);
1597 
1598  for (j = 0; j < of->ctx->nb_streams; j++) {
1600  enum AVMediaType type = ost->enc_ctx->codec_type;
1601 
1602  total_size += ost->data_size;
1603  total_packets += ost->packets_written;
1604 
1605  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1606  i, j, media_type_string(type));
1607  if (ost->encoding_needed) {
1608  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1609  ost->frames_encoded);
1610  if (type == AVMEDIA_TYPE_AUDIO)
1611  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1612  av_log(NULL, AV_LOG_VERBOSE, "; ");
1613  }
1614 
1615  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1616  ost->packets_written, ost->data_size);
1617 
1618  av_log(NULL, AV_LOG_VERBOSE, "\n");
1619  }
1620 
1621  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1622  total_packets, total_size);
1623  }
1624  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1625  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1626  if (pass1_used) {
1627  av_log(NULL, AV_LOG_WARNING, "\n");
1628  } else {
1629  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1630  }
1631  }
1632 }
1633 
1634 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1635 {
1636  AVBPrint buf, buf_script;
1637  OutputStream *ost;
1638  AVFormatContext *oc;
1639  int64_t total_size;
1640  AVCodecContext *enc;
1641  int frame_number, vid, i;
1642  double bitrate;
1643  double speed;
1644  int64_t pts = INT64_MIN + 1;
1645  static int64_t last_time = -1;
1646  static int qp_histogram[52];
1647  int hours, mins, secs, us;
1648  const char *hours_sign;
1649  int ret;
1650  float t;
1651 
1652  if (!print_stats && !is_last_report && !progress_avio)
1653  return;
1654 
1655  if (!is_last_report) {
1656  if (last_time == -1) {
1657  last_time = cur_time;
1658  return;
1659  }
1660  if ((cur_time - last_time) < 500000)
1661  return;
1662  last_time = cur_time;
1663  }
1664 
1665  t = (cur_time-timer_start) / 1000000.0;
1666 
1667 
1668  oc = output_files[0]->ctx;
1669 
1670  total_size = avio_size(oc->pb);
1671  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1672  total_size = avio_tell(oc->pb);
1673 
1674  vid = 0;
1676  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1677  for (i = 0; i < nb_output_streams; i++) {
1678  float q = -1;
1679  ost = output_streams[i];
1680  enc = ost->enc_ctx;
1681  if (!ost->stream_copy)
1682  q = ost->quality / (float) FF_QP2LAMBDA;
1683 
1684  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1685  av_bprintf(&buf, "q=%2.1f ", q);
1686  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1687  ost->file_index, ost->index, q);
1688  }
1689  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1690  float fps;
1691 
1692  frame_number = ost->frame_number;
1693  fps = t > 1 ? frame_number / t : 0;
1694  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1695  frame_number, fps < 9.95, fps, q);
1696  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1697  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1698  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699  ost->file_index, ost->index, q);
1700  if (is_last_report)
1701  av_bprintf(&buf, "L");
1702  if (qp_hist) {
1703  int j;
1704  int qp = lrintf(q);
1705  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1706  qp_histogram[qp]++;
1707  for (j = 0; j < 32; j++)
1708  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1709  }
1710 
1711  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1712  int j;
1713  double error, error_sum = 0;
1714  double scale, scale_sum = 0;
1715  double p;
1716  char type[3] = { 'Y','U','V' };
1717  av_bprintf(&buf, "PSNR=");
1718  for (j = 0; j < 3; j++) {
1719  if (is_last_report) {
1720  error = enc->error[j];
1721  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1722  } else {
1723  error = ost->error[j];
1724  scale = enc->width * enc->height * 255.0 * 255.0;
1725  }
1726  if (j)
1727  scale /= 4;
1728  error_sum += error;
1729  scale_sum += scale;
1730  p = psnr(error / scale);
1731  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1732  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1733  ost->file_index, ost->index, type[j] | 32, p);
1734  }
1735  p = psnr(error_sum / scale_sum);
1736  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1737  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1738  ost->file_index, ost->index, p);
1739  }
1740  vid = 1;
1741  }
1742  /* compute min output value */
1745  ost->st->time_base, AV_TIME_BASE_Q));
1746  if (is_last_report)
1747  nb_frames_drop += ost->last_dropped;
1748  }
1749 
1750  secs = FFABS(pts) / AV_TIME_BASE;
1751  us = FFABS(pts) % AV_TIME_BASE;
1752  mins = secs / 60;
1753  secs %= 60;
1754  hours = mins / 60;
1755  mins %= 60;
1756  hours_sign = (pts < 0) ? "-" : "";
1757 
1758  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1759  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1760 
1761  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1762  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1763  if (pts == AV_NOPTS_VALUE) {
1764  av_bprintf(&buf, "N/A ");
1765  } else {
1766  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1767  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1768  }
1769 
1770  if (bitrate < 0) {
1771  av_bprintf(&buf, "bitrate=N/A");
1772  av_bprintf(&buf_script, "bitrate=N/A\n");
1773  }else{
1774  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1775  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1776  }
1777 
1778  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1779  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1780  if (pts == AV_NOPTS_VALUE) {
1781  av_bprintf(&buf_script, "out_time_us=N/A\n");
1782  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1783  av_bprintf(&buf_script, "out_time=N/A\n");
1784  } else {
1785  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1786  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1787  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1788  hours_sign, hours, mins, secs, us);
1789  }
1790 
1792  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1793  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1794  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1795 
1796  if (speed < 0) {
1797  av_bprintf(&buf, " speed=N/A");
1798  av_bprintf(&buf_script, "speed=N/A\n");
1799  } else {
1800  av_bprintf(&buf, " speed=%4.3gx", speed);
1801  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1802  }
1803 
1804  if (print_stats || is_last_report) {
1805  const char end = is_last_report ? '\n' : '\r';
1806  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1807  fprintf(stderr, "%s %c", buf.str, end);
1808  } else
1809  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1810 
1811  fflush(stderr);
1812  }
1813  av_bprint_finalize(&buf, NULL);
1814 
1815  if (progress_avio) {
1816  av_bprintf(&buf_script, "progress=%s\n",
1817  is_last_report ? "end" : "continue");
1818  avio_write(progress_avio, buf_script.str,
1819  FFMIN(buf_script.len, buf_script.size - 1));
1821  av_bprint_finalize(&buf_script, NULL);
1822  if (is_last_report) {
1823  if ((ret = avio_closep(&progress_avio)) < 0)
1825  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1826  }
1827  }
1828 
1829  if (is_last_report)
1830  print_final_stats(total_size);
1831 }
1832 
1834 {
1835  // We never got any input. Set a fake format, which will
1836  // come from libavformat.
1837  ifilter->format = par->format;
1838  ifilter->sample_rate = par->sample_rate;
1839  ifilter->channels = par->channels;
1840  ifilter->channel_layout = par->channel_layout;
1841  ifilter->width = par->width;
1842  ifilter->height = par->height;
1843  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1844 }
1845 
1846 static void flush_encoders(void)
1847 {
1848  int i, ret;
1849 
1850  for (i = 0; i < nb_output_streams; i++) {
1852  AVCodecContext *enc = ost->enc_ctx;
1853  OutputFile *of = output_files[ost->file_index];
1854 
1855  if (!ost->encoding_needed)
1856  continue;
1857 
1858  // Try to enable encoding with no input frames.
1859  // Maybe we should just let encoding fail instead.
1860  if (!ost->initialized) {
1861  FilterGraph *fg = ost->filter->graph;
1862  char error[1024] = "";
1863 
1865  "Finishing stream %d:%d without any data written to it.\n",
1866  ost->file_index, ost->st->index);
1867 
1868  if (ost->filter && !fg->graph) {
1869  int x;
1870  for (x = 0; x < fg->nb_inputs; x++) {
1871  InputFilter *ifilter = fg->inputs[x];
1872  if (ifilter->format < 0)
1873  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1874  }
1875 
1877  continue;
1878 
1879  ret = configure_filtergraph(fg);
1880  if (ret < 0) {
1881  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1882  exit_program(1);
1883  }
1884 
1886  }
1887 
1888  ret = init_output_stream(ost, error, sizeof(error));
1889  if (ret < 0) {
1890  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1891  ost->file_index, ost->index, error);
1892  exit_program(1);
1893  }
1894  }
1895 
1897  continue;
1898 
1899  for (;;) {
1900  const char *desc = NULL;
1901  AVPacket pkt;
1902  int pkt_size;
1903 
1904  switch (enc->codec_type) {
1905  case AVMEDIA_TYPE_AUDIO:
1906  desc = "audio";
1907  break;
1908  case AVMEDIA_TYPE_VIDEO:
1909  desc = "video";
1910  break;
1911  default:
1912  av_assert0(0);
1913  }
1914 
1915  av_init_packet(&pkt);
1916  pkt.data = NULL;
1917  pkt.size = 0;
1918 
1920 
1921  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1922  ret = avcodec_send_frame(enc, NULL);
1923  if (ret < 0) {
1924  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1925  desc,
1926  av_err2str(ret));
1927  exit_program(1);
1928  }
1929  }
1930 
1931  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1932  if (ret < 0 && ret != AVERROR_EOF) {
1933  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1934  desc,
1935  av_err2str(ret));
1936  exit_program(1);
1937  }
1938  if (ost->logfile && enc->stats_out) {
1939  fprintf(ost->logfile, "%s", enc->stats_out);
1940  }
1941  if (ret == AVERROR_EOF) {
1942  output_packet(of, &pkt, ost, 1);
1943  break;
1944  }
1945  if (ost->finished & MUXER_FINISHED) {
1946  av_packet_unref(&pkt);
1947  continue;
1948  }
1949  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1950  pkt_size = pkt.size;
1951  output_packet(of, &pkt, ost, 0);
1952  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1953  do_video_stats(ost, pkt_size);
1954  }
1955  }
1956  }
1957 }
1958 
1959 /*
1960  * Check whether a packet from ist should be written into ost at this time
1961  */
1963 {
1964  OutputFile *of = output_files[ost->file_index];
1965  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1966 
1967  if (ost->source_index != ist_index)
1968  return 0;
1969 
1970  if (ost->finished)
1971  return 0;
1972 
1973  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1974  return 0;
1975 
1976  return 1;
1977 }
1978 
1980 {
1981  OutputFile *of = output_files[ost->file_index];
1982  InputFile *f = input_files [ist->file_index];
1983  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1984  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1985  AVPacket opkt;
1986 
1987  // EOF: flush output bitstream filters.
1988  if (!pkt) {
1989  av_init_packet(&opkt);
1990  opkt.data = NULL;
1991  opkt.size = 0;
1992  output_packet(of, &opkt, ost, 1);
1993  return;
1994  }
1995 
1996  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1997  !ost->copy_initial_nonkeyframes)
1998  return;
1999 
2000  if (!ost->frame_number && !ost->copy_prior_start) {
2001  int64_t comp_start = start_time;
2002  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2003  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2004  if (pkt->pts == AV_NOPTS_VALUE ?
2005  ist->pts < comp_start :
2006  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2007  return;
2008  }
2009 
2010  if (of->recording_time != INT64_MAX &&
2011  ist->pts >= of->recording_time + start_time) {
2013  return;
2014  }
2015 
2016  if (f->recording_time != INT64_MAX) {
2017  start_time = f->ctx->start_time;
2018  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2019  start_time += f->start_time;
2020  if (ist->pts >= f->recording_time + start_time) {
2022  return;
2023  }
2024  }
2025 
2026  /* force the input stream PTS */
2027  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2028  ost->sync_opts++;
2029 
2030  if (av_packet_ref(&opkt, pkt) < 0)
2031  exit_program(1);
2032 
2033  if (pkt->pts != AV_NOPTS_VALUE)
2034  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2035 
2036  if (pkt->dts == AV_NOPTS_VALUE) {
2037  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2038  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2040  if(!duration)
2041  duration = ist->dec_ctx->frame_size;
2042  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2043  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2044  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2045  /* dts will be set immediately afterwards to what pts is now */
2046  opkt.pts = opkt.dts - ost_tb_start_time;
2047  } else
2048  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2049  opkt.dts -= ost_tb_start_time;
2050 
2051  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2052 
2053  output_packet(of, &opkt, ost, 0);
2054 }
2055 
2057 {
2058  AVCodecContext *dec = ist->dec_ctx;
2059 
2060  if (!dec->channel_layout) {
2061  char layout_name[256];
2062 
2063  if (dec->channels > ist->guess_layout_max)
2064  return 0;
2065  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2066  if (!dec->channel_layout)
2067  return 0;
2068  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2069  dec->channels, dec->channel_layout);
2070  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2071  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2072  }
2073  return 1;
2074 }
2075 
2076 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2077 {
2078  if (*got_output || ret<0)
2079  decode_error_stat[ret<0] ++;
2080 
2081  if (ret < 0 && exit_on_error)
2082  exit_program(1);
2083 
2084  if (*got_output && ist) {
2087  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2088  if (exit_on_error)
2089  exit_program(1);
2090  }
2091  }
2092 }
2093 
2094 // Filters can be configured only if the formats of all inputs are known.
2096 {
2097  int i;
2098  for (i = 0; i < fg->nb_inputs; i++) {
2099  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2100  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2101  return 0;
2102  }
2103  return 1;
2104 }
2105 
2107 {
2108  FilterGraph *fg = ifilter->graph;
2109  int need_reinit, ret, i;
2110 
2111  /* determine if the parameters for this input changed */
2112  need_reinit = ifilter->format != frame->format;
2113 
2114  switch (ifilter->ist->st->codecpar->codec_type) {
2115  case AVMEDIA_TYPE_AUDIO:
2116  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2117  ifilter->channels != frame->channels ||
2118  ifilter->channel_layout != frame->channel_layout;
2119  break;
2120  case AVMEDIA_TYPE_VIDEO:
2121  need_reinit |= ifilter->width != frame->width ||
2122  ifilter->height != frame->height;
2123  break;
2124  }
2125 
2126  if (!ifilter->ist->reinit_filters && fg->graph)
2127  need_reinit = 0;
2128 
2129  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2130  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2131  need_reinit = 1;
2132 
2133  if (need_reinit) {
2135  if (ret < 0)
2136  return ret;
2137  }
2138 
2139  /* (re)init the graph if possible, otherwise buffer the frame and return */
2140  if (need_reinit || !fg->graph) {
2141  for (i = 0; i < fg->nb_inputs; i++) {
2142  if (!ifilter_has_all_input_formats(fg)) {
2144  if (!tmp)
2145  return AVERROR(ENOMEM);
2147 
2148  if (!av_fifo_space(ifilter->frame_queue)) {
2149  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2150  if (ret < 0) {
2151  av_frame_free(&tmp);
2152  return ret;
2153  }
2154  }
2155  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2156  return 0;
2157  }
2158  }
2159 
2160  ret = reap_filters(1);
2161  if (ret < 0 && ret != AVERROR_EOF) {
2162  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2163  return ret;
2164  }
2165 
2166  ret = configure_filtergraph(fg);
2167  if (ret < 0) {
2168  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2169  return ret;
2170  }
2171  }
2172 
2174  if (ret < 0) {
2175  if (ret != AVERROR_EOF)
2176  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2177  return ret;
2178  }
2179 
2180  return 0;
2181 }
2182 
2183 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2184 {
2185  int ret;
2186 
2187  ifilter->eof = 1;
2188 
2189  if (ifilter->filter) {
2191  if (ret < 0)
2192  return ret;
2193  } else {
2194  // the filtergraph was never configured
2195  if (ifilter->format < 0)
2196  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2197  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2198  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2199  return AVERROR_INVALIDDATA;
2200  }
2201  }
2202 
2203  return 0;
2204 }
2205 
2206 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2207 // There is the following difference: if you got a frame, you must call
2208 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2209 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2210 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2211 {
2212  int ret;
2213 
2214  *got_frame = 0;
2215 
2216  if (pkt) {
2217  ret = avcodec_send_packet(avctx, pkt);
2218  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2219  // decoded frames with avcodec_receive_frame() until done.
2220  if (ret < 0 && ret != AVERROR_EOF)
2221  return ret;
2222  }
2223 
2224  ret = avcodec_receive_frame(avctx, frame);
2225  if (ret < 0 && ret != AVERROR(EAGAIN))
2226  return ret;
2227  if (ret >= 0)
2228  *got_frame = 1;
2229 
2230  return 0;
2231 }
2232 
2234 {
2235  int i, ret;
2236  AVFrame *f;
2237 
2238  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2239  for (i = 0; i < ist->nb_filters; i++) {
2240  if (i < ist->nb_filters - 1) {
2241  f = ist->filter_frame;
2243  if (ret < 0)
2244  break;
2245  } else
2246  f = decoded_frame;
2247  ret = ifilter_send_frame(ist->filters[i], f);
2248  if (ret == AVERROR_EOF)
2249  ret = 0; /* ignore */
2250  if (ret < 0) {
2252  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2253  break;
2254  }
2255  }
2256  return ret;
2257 }
2258 
2260  int *decode_failed)
2261 {
2263  AVCodecContext *avctx = ist->dec_ctx;
2264  int ret, err = 0;
2265  AVRational decoded_frame_tb;
2266 
2267  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2268  return AVERROR(ENOMEM);
2269  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2270  return AVERROR(ENOMEM);
2272 
2274  ret = decode(avctx, decoded_frame, got_output, pkt);
2275  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2276  if (ret < 0)
2277  *decode_failed = 1;
2278 
2279  if (ret >= 0 && avctx->sample_rate <= 0) {
2280  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2282  }
2283 
2284  if (ret != AVERROR_EOF)
2286 
2287  if (!*got_output || ret < 0)
2288  return ret;
2289 
2291  ist->frames_decoded++;
2292 
2293  /* increment next_dts to use for the case where the input stream does not
2294  have timestamps or there are multiple frames in the packet */
2295  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2296  avctx->sample_rate;
2297  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2298  avctx->sample_rate;
2299 
2300  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2301  decoded_frame_tb = ist->st->time_base;
2302  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2303  decoded_frame->pts = pkt->pts;
2304  decoded_frame_tb = ist->st->time_base;
2305  }else {
2306  decoded_frame->pts = ist->dts;
2307  decoded_frame_tb = AV_TIME_BASE_Q;
2308  }
2310  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2311  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2312  (AVRational){1, avctx->sample_rate});
2315 
2318  return err < 0 ? err : ret;
2319 }
2320 
2321 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2322  int *decode_failed)
2323 {
2325  int i, ret = 0, err = 0;
2326  int64_t best_effort_timestamp;
2327  int64_t dts = AV_NOPTS_VALUE;
2328  AVPacket avpkt;
2329 
2330  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2331  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2332  // skip the packet.
2333  if (!eof && pkt && pkt->size == 0)
2334  return 0;
2335 
2336  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2337  return AVERROR(ENOMEM);
2338  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2339  return AVERROR(ENOMEM);
2341  if (ist->dts != AV_NOPTS_VALUE)
2342  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2343  if (pkt) {
2344  avpkt = *pkt;
2345  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2346  }
2347 
2348  // The old code used to set dts on the drain packet, which does not work
2349  // with the new API anymore.
2350  if (eof) {
2351  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2352  if (!new)
2353  return AVERROR(ENOMEM);
2354  ist->dts_buffer = new;
2355  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2356  }
2357 
2359  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2360  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2361  if (ret < 0)
2362  *decode_failed = 1;
2363 
2364  // The following line may be required in some cases where there is no parser
2365  // or the parser does not has_b_frames correctly
2366  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2367  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2368  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2369  } else
2371  "video_delay is larger in decoder than demuxer %d > %d.\n"
2372  "If you want to help, upload a sample "
2373  "of this file to https://streams.videolan.org/upload/ "
2374  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2375  ist->dec_ctx->has_b_frames,
2376  ist->st->codecpar->video_delay);
2377  }
2378 
2379  if (ret != AVERROR_EOF)
2381 
2382  if (*got_output && ret >= 0) {
2383  if (ist->dec_ctx->width != decoded_frame->width ||
2384  ist->dec_ctx->height != decoded_frame->height ||
2385  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2386  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2390  ist->dec_ctx->width,
2391  ist->dec_ctx->height,
2392  ist->dec_ctx->pix_fmt);
2393  }
2394  }
2395 
2396  if (!*got_output || ret < 0)
2397  return ret;
2398 
2399  if(ist->top_field_first>=0)
2401 
2402  ist->frames_decoded++;
2403 
2405  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2406  if (err < 0)
2407  goto fail;
2408  }
2410 
2411  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2412  *duration_pts = decoded_frame->pkt_duration;
2413 
2414  if (ist->framerate.num)
2415  best_effort_timestamp = ist->cfr_next_pts++;
2416 
2417  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2418  best_effort_timestamp = ist->dts_buffer[0];
2419 
2420  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2421  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2422  ist->nb_dts_buffer--;
2423  }
2424 
2425  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2426  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2427 
2428  if (ts != AV_NOPTS_VALUE)
2429  ist->next_pts = ist->pts = ts;
2430  }
2431 
2432  if (debug_ts) {
2433  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2434  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2435  ist->st->index, av_ts2str(decoded_frame->pts),
2437  best_effort_timestamp,
2438  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2440  ist->st->time_base.num, ist->st->time_base.den);
2441  }
2442 
2443  if (ist->st->sample_aspect_ratio.num)
2445 
2447 
2448 fail:
2451  return err < 0 ? err : ret;
2452 }
2453 
2455  int *decode_failed)
2456 {
2458  int free_sub = 1;
2459  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2460  &subtitle, got_output, pkt);
2461 
2463 
2464  if (ret < 0 || !*got_output) {
2465  *decode_failed = 1;
2466  if (!pkt->size)
2467  sub2video_flush(ist);
2468  return ret;
2469  }
2470 
2471  if (ist->fix_sub_duration) {
2472  int end = 1;
2473  if (ist->prev_sub.got_output) {
2475  1000, AV_TIME_BASE);
2476  if (end < ist->prev_sub.subtitle.end_display_time) {
2477  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2478  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2480  end <= 0 ? ", dropping it" : "");
2482  }
2483  }
2484  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2485  FFSWAP(int, ret, ist->prev_sub.ret);
2487  if (end <= 0)
2488  goto out;
2489  }
2490 
2491  if (!*got_output)
2492  return ret;
2493 
2494  if (ist->sub2video.frame) {
2495  sub2video_update(ist, INT64_MIN, &subtitle);
2496  } else if (ist->nb_filters) {
2497  if (!ist->sub2video.sub_queue)
2498  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2499  if (!ist->sub2video.sub_queue)
2500  exit_program(1);
2501  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2503  if (ret < 0)
2504  exit_program(1);
2505  }
2507  free_sub = 0;
2508  }
2509 
2510  if (!subtitle.num_rects)
2511  goto out;
2512 
2513  ist->frames_decoded++;
2514 
2515  for (i = 0; i < nb_output_streams; i++) {
2517 
2518  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2519  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2520  continue;
2521 
2522  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2523  }
2524 
2525 out:
2526  if (free_sub)
2528  return ret;
2529 }
2530 
2532 {
2533  int i, ret;
2534  /* TODO keep pts also in stream time base to avoid converting back */
2535  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2537 
2538  for (i = 0; i < ist->nb_filters; i++) {
2539  ret = ifilter_send_eof(ist->filters[i], pts);
2540  if (ret < 0)
2541  return ret;
2542  }
2543  return 0;
2544 }
2545 
2546 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2547 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2548 {
2549  int ret = 0, i;
2550  int repeating = 0;
2551  int eof_reached = 0;
2552 
2553  AVPacket avpkt;
2554  if (!ist->saw_first_ts) {
2555  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2556  ist->pts = 0;
2557  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2558  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2559  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2560  }
2561  ist->saw_first_ts = 1;
2562  }
2563 
2564  if (ist->next_dts == AV_NOPTS_VALUE)
2565  ist->next_dts = ist->dts;
2566  if (ist->next_pts == AV_NOPTS_VALUE)
2567  ist->next_pts = ist->pts;
2568 
2569  if (!pkt) {
2570  /* EOF handling */
2571  av_init_packet(&avpkt);
2572  avpkt.data = NULL;
2573  avpkt.size = 0;
2574  } else {
2575  avpkt = *pkt;
2576  }
2577 
2578  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2579  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2580  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2581  ist->next_pts = ist->pts = ist->dts;
2582  }
2583 
2584  // while we have more to decode or while the decoder did output something on EOF
2585  while (ist->decoding_needed) {
2586  int64_t duration_dts = 0;
2587  int64_t duration_pts = 0;
2588  int got_output = 0;
2589  int decode_failed = 0;
2590 
2591  ist->pts = ist->next_pts;
2592  ist->dts = ist->next_dts;
2593 
2594  switch (ist->dec_ctx->codec_type) {
2595  case AVMEDIA_TYPE_AUDIO:
2596  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2597  &decode_failed);
2598  break;
2599  case AVMEDIA_TYPE_VIDEO:
2600  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2601  &decode_failed);
2602  if (!repeating || !pkt || got_output) {
2603  if (pkt && pkt->duration) {
2604  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2605  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2607  duration_dts = ((int64_t)AV_TIME_BASE *
2608  ist->dec_ctx->framerate.den * ticks) /
2610  }
2611 
2612  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2613  ist->next_dts += duration_dts;
2614  }else
2615  ist->next_dts = AV_NOPTS_VALUE;
2616  }
2617 
2618  if (got_output) {
2619  if (duration_pts > 0) {
2620  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2621  } else {
2622  ist->next_pts += duration_dts;
2623  }
2624  }
2625  break;
2626  case AVMEDIA_TYPE_SUBTITLE:
2627  if (repeating)
2628  break;
2629  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2630  if (!pkt && ret >= 0)
2631  ret = AVERROR_EOF;
2632  break;
2633  default:
2634  return -1;
2635  }
2636 
2637  if (ret == AVERROR_EOF) {
2638  eof_reached = 1;
2639  break;
2640  }
2641 
2642  if (ret < 0) {
2643  if (decode_failed) {
2644  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2645  ist->file_index, ist->st->index, av_err2str(ret));
2646  } else {
2647  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2648  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2649  }
2650  if (!decode_failed || exit_on_error)
2651  exit_program(1);
2652  break;
2653  }
2654 
2655  if (got_output)
2656  ist->got_output = 1;
2657 
2658  if (!got_output)
2659  break;
2660 
2661  // During draining, we might get multiple output frames in this loop.
2662  // ffmpeg.c does not drain the filter chain on configuration changes,
2663  // which means if we send multiple frames at once to the filters, and
2664  // one of those frames changes configuration, the buffered frames will
2665  // be lost. This can upset certain FATE tests.
2666  // Decode only 1 frame per call on EOF to appease these FATE tests.
2667  // The ideal solution would be to rewrite decoding to use the new
2668  // decoding API in a better way.
2669  if (!pkt)
2670  break;
2671 
2672  repeating = 1;
2673  }
2674 
2675  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2676  /* except when looping we need to flush but not to send an EOF */
2677  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2678  int ret = send_filter_eof(ist);
2679  if (ret < 0) {
2680  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2681  exit_program(1);
2682  }
2683  }
2684 
2685  /* handle stream copy */
2686  if (!ist->decoding_needed && pkt) {
2687  ist->dts = ist->next_dts;
2688  switch (ist->dec_ctx->codec_type) {
2689  case AVMEDIA_TYPE_AUDIO:
2690  av_assert1(pkt->duration >= 0);
2691  if (ist->dec_ctx->sample_rate) {
2692  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2693  ist->dec_ctx->sample_rate;
2694  } else {
2696  }
2697  break;
2698  case AVMEDIA_TYPE_VIDEO:
2699  if (ist->framerate.num) {
2700  // TODO: Remove work-around for c99-to-c89 issue 7
2701  AVRational time_base_q = AV_TIME_BASE_Q;
2702  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2703  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2704  } else if (pkt->duration) {
2706  } else if(ist->dec_ctx->framerate.num != 0) {
2707  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2708  ist->next_dts += ((int64_t)AV_TIME_BASE *
2709  ist->dec_ctx->framerate.den * ticks) /
2711  }
2712  break;
2713  }
2714  ist->pts = ist->dts;
2715  ist->next_pts = ist->next_dts;
2716  }
2717  for (i = 0; i < nb_output_streams; i++) {
2719 
2720  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2721  continue;
2722 
2723  do_streamcopy(ist, ost, pkt);
2724  }
2725 
2726  return !eof_reached;
2727 }
2728 
2729 static void print_sdp(void)
2730 {
2731  char sdp[16384];
2732  int i;
2733  int j;
2734  AVIOContext *sdp_pb;
2735  AVFormatContext **avc;
2736 
2737  for (i = 0; i < nb_output_files; i++) {
2738  if (!output_files[i]->header_written)
2739  return;
2740  }
2741 
2742  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2743  if (!avc)
2744  exit_program(1);
2745  for (i = 0, j = 0; i < nb_output_files; i++) {
2746  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2747  avc[j] = output_files[i]->ctx;
2748  j++;
2749  }
2750  }
2751 
2752  if (!j)
2753  goto fail;
2754 
2755  av_sdp_create(avc, j, sdp, sizeof(sdp));
2756 
2757  if (!sdp_filename) {
2758  printf("SDP:\n%s\n", sdp);
2759  fflush(stdout);
2760  } else {
2761  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2762  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2763  } else {
2764  avio_print(sdp_pb, sdp);
2765  avio_closep(&sdp_pb);
2767  }
2768  }
2769 
2770 fail:
2771  av_freep(&avc);
2772 }
2773 
2775 {
2776  InputStream *ist = s->opaque;
2777  const enum AVPixelFormat *p;
2778  int ret;
2779 
2780  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2782  const AVCodecHWConfig *config = NULL;
2783  int i;
2784 
2785  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2786  break;
2787 
2788  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2789  ist->hwaccel_id == HWACCEL_AUTO) {
2790  for (i = 0;; i++) {
2791  config = avcodec_get_hw_config(s->codec, i);
2792  if (!config)
2793  break;
2794  if (!(config->methods &
2796  continue;
2797  if (config->pix_fmt == *p)
2798  break;
2799  }
2800  }
2801  if (config) {
2802  if (config->device_type != ist->hwaccel_device_type) {
2803  // Different hwaccel offered, ignore.
2804  continue;
2805  }
2806 
2808  if (ret < 0) {
2809  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2811  "%s hwaccel requested for input stream #%d:%d, "
2812  "but cannot be initialized.\n",
2814  ist->file_index, ist->st->index);
2815  return AV_PIX_FMT_NONE;
2816  }
2817  continue;
2818  }
2819  } else {
2820  const HWAccel *hwaccel = NULL;
2821  int i;
2822  for (i = 0; hwaccels[i].name; i++) {
2823  if (hwaccels[i].pix_fmt == *p) {
2824  hwaccel = &hwaccels[i];
2825  break;
2826  }
2827  }
2828  if (!hwaccel) {
2829  // No hwaccel supporting this pixfmt.
2830  continue;
2831  }
2832  if (hwaccel->id != ist->hwaccel_id) {
2833  // Does not match requested hwaccel.
2834  continue;
2835  }
2836 
2837  ret = hwaccel->init(s);
2838  if (ret < 0) {
2840  "%s hwaccel requested for input stream #%d:%d, "
2841  "but cannot be initialized.\n", hwaccel->name,
2842  ist->file_index, ist->st->index);
2843  return AV_PIX_FMT_NONE;
2844  }
2845  }
2846 
2847  if (ist->hw_frames_ctx) {
2848  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2849  if (!s->hw_frames_ctx)
2850  return AV_PIX_FMT_NONE;
2851  }
2852 
2853  ist->hwaccel_pix_fmt = *p;
2854  break;
2855  }
2856 
2857  return *p;
2858 }
2859 
2861 {
2862  InputStream *ist = s->opaque;
2863 
2864  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2865  return ist->hwaccel_get_buffer(s, frame, flags);
2866 
2868 }
2869 
2870 static int init_input_stream(int ist_index, char *error, int error_len)
2871 {
2872  int ret;
2873  InputStream *ist = input_streams[ist_index];
2874 
2875  if (ist->decoding_needed) {
2876  AVCodec *codec = ist->dec;
2877  if (!codec) {
2878  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2879  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2880  return AVERROR(EINVAL);
2881  }
2882 
2883  ist->dec_ctx->opaque = ist;
2884  ist->dec_ctx->get_format = get_format;
2885  ist->dec_ctx->get_buffer2 = get_buffer;
2886  ist->dec_ctx->thread_safe_callbacks = 1;
2887 
2888  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2889  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2890  (ist->decoding_needed & DECODING_FOR_OST)) {
2891  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2893  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2894  }
2895 
2896  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2897 
2898  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2899  * audio, and video decoders such as cuvid or mediacodec */
2900  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2901 
2902  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2903  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2904  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2906  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2907 
2909  if (ret < 0) {
2910  snprintf(error, error_len, "Device setup failed for "
2911  "decoder on input stream #%d:%d : %s",
2912  ist->file_index, ist->st->index, av_err2str(ret));
2913  return ret;
2914  }
2915 
2916  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2917  if (ret == AVERROR_EXPERIMENTAL)
2918  abort_codec_experimental(codec, 0);
2919 
2920  snprintf(error, error_len,
2921  "Error while opening decoder for input stream "
2922  "#%d:%d : %s",
2923  ist->file_index, ist->st->index, av_err2str(ret));
2924  return ret;
2925  }
2927  }
2928 
2929  ist->next_pts = AV_NOPTS_VALUE;
2930  ist->next_dts = AV_NOPTS_VALUE;
2931 
2932  return 0;
2933 }
2934 
2936 {
2937  if (ost->source_index >= 0)
2938  return input_streams[ost->source_index];
2939  return NULL;
2940 }
2941 
2942 static int compare_int64(const void *a, const void *b)
2943 {
2944  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2945 }
2946 
2947 /* open the muxer when all the streams are initialized */
2949 {
2950  int ret, i;
2951 
2952  for (i = 0; i < of->ctx->nb_streams; i++) {
2954  if (!ost->initialized)
2955  return 0;
2956  }
2957 
2958  of->ctx->interrupt_callback = int_cb;
2959 
2960  ret = avformat_write_header(of->ctx, &of->opts);
2961  if (ret < 0) {
2963  "Could not write header for output file #%d "
2964  "(incorrect codec parameters ?): %s\n",
2966  return ret;
2967  }
2968  //assert_avoptions(of->opts);
2969  of->header_written = 1;
2970 
2971  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2972 
2973  if (sdp_filename || want_sdp)
2974  print_sdp();
2975 
2976  /* flush the muxing queues */
2977  for (i = 0; i < of->ctx->nb_streams; i++) {
2979 
2980  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2981  if (!av_fifo_size(ost->muxing_queue))
2982  ost->mux_timebase = ost->st->time_base;
2983 
2984  while (av_fifo_size(ost->muxing_queue)) {
2985  AVPacket pkt;
2986  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2987  write_packet(of, &pkt, ost, 1);
2988  }
2989  }
2990 
2991  return 0;
2992 }
2993 
2995 {
2996  AVBSFContext *ctx = ost->bsf_ctx;
2997  int ret;
2998 
2999  if (!ctx)
3000  return 0;
3001 
3002  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3003  if (ret < 0)
3004  return ret;
3005 
3006  ctx->time_base_in = ost->st->time_base;
3007 
3008  ret = av_bsf_init(ctx);
3009  if (ret < 0) {
3010  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3011  ctx->filter->name);
3012  return ret;
3013  }
3014 
3015  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3016  if (ret < 0)
3017  return ret;
3018  ost->st->time_base = ctx->time_base_out;
3019 
3020  return 0;
3021 }
3022 
3024 {
3025  OutputFile *of = output_files[ost->file_index];
3027  AVCodecParameters *par_dst = ost->st->codecpar;
3028  AVCodecParameters *par_src = ost->ref_par;
3029  AVRational sar;
3030  int i, ret;
3031  uint32_t codec_tag = par_dst->codec_tag;
3032 
3033  av_assert0(ist && !ost->filter);
3034 
3035  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3036  if (ret >= 0)
3037  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3038  if (ret < 0) {
3040  "Error setting up codec context options.\n");
3041  return ret;
3042  }
3043 
3044  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3045  if (ret < 0) {
3047  "Error getting reference codec parameters.\n");
3048  return ret;
3049  }
3050 
3051  if (!codec_tag) {
3052  unsigned int codec_tag_tmp;
3053  if (!of->ctx->oformat->codec_tag ||
3054  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3055  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3056  codec_tag = par_src->codec_tag;
3057  }
3058 
3059  ret = avcodec_parameters_copy(par_dst, par_src);
3060  if (ret < 0)
3061  return ret;
3062 
3063  par_dst->codec_tag = codec_tag;
3064 
3065  if (!ost->frame_rate.num)
3066  ost->frame_rate = ist->framerate;
3067  ost->st->avg_frame_rate = ost->frame_rate;
3068 
3070  if (ret < 0)
3071  return ret;
3072 
3073  // copy timebase while removing common factors
3074  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3076 
3077  // copy estimated duration as a hint to the muxer
3078  if (ost->st->duration <= 0 && ist->st->duration > 0)
3079  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3080 
3081  // copy disposition
3082  ost->st->disposition = ist->st->disposition;
3083 
3084  if (ist->st->nb_side_data) {
3085  for (i = 0; i < ist->st->nb_side_data; i++) {
3086  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3087  uint8_t *dst_data;
3088 
3089  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3090  if (!dst_data)
3091  return AVERROR(ENOMEM);
3092  memcpy(dst_data, sd_src->data, sd_src->size);
3093  }
3094  }
3095 
3096  if (ost->rotate_overridden) {
3098  sizeof(int32_t) * 9);
3099  if (sd)
3100  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3101  }
3102 
3103  switch (par_dst->codec_type) {
3104  case AVMEDIA_TYPE_AUDIO:
3105  if (audio_volume != 256) {
3106  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3107  exit_program(1);
3108  }
3109  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3110  par_dst->block_align= 0;
3111  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3112  par_dst->block_align= 0;
3113  break;
3114  case AVMEDIA_TYPE_VIDEO:
3115  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3116  sar =
3117  av_mul_q(ost->frame_aspect_ratio,
3118  (AVRational){ par_dst->height, par_dst->width });
3119  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3120  "with stream copy may produce invalid files\n");
3121  }
3122  else if (ist->st->sample_aspect_ratio.num)
3123  sar = ist->st->sample_aspect_ratio;
3124  else
3125  sar = par_src->sample_aspect_ratio;
3126  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3127  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3128  ost->st->r_frame_rate = ist->st->r_frame_rate;
3129  break;
3130  }
3131 
3132  ost->mux_timebase = ist->st->time_base;
3133 
3134  return 0;
3135 }
3136 
3138 {
3139  AVDictionaryEntry *e;
3140 
3141  uint8_t *encoder_string;
3142  int encoder_string_len;
3143  int format_flags = 0;
3144  int codec_flags = ost->enc_ctx->flags;
3145 
3146  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3147  return;
3148 
3149  e = av_dict_get(of->opts, "fflags", NULL, 0);
3150  if (e) {
3151  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3152  if (!o)
3153  return;
3154  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3155  }
3156  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3157  if (e) {
3158  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3159  if (!o)
3160  return;
3161  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3162  }
3163 
3164  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3165  encoder_string = av_mallocz(encoder_string_len);
3166  if (!encoder_string)
3167  exit_program(1);
3168 
3169  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3170  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3171  else
3172  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3173  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3174  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3176 }
3177 
3179  AVCodecContext *avctx)
3180 {
3181  char *p;
3182  int n = 1, i, size, index = 0;
3183  int64_t t, *pts;
3184 
3185  for (p = kf; *p; p++)
3186  if (*p == ',')
3187  n++;
3188  size = n;
3189  pts = av_malloc_array(size, sizeof(*pts));
3190  if (!pts) {
3191  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3192  exit_program(1);
3193  }
3194 
3195  p = kf;
3196  for (i = 0; i < n; i++) {
3197  char *next = strchr(p, ',');
3198 
3199  if (next)
3200  *next++ = 0;
3201 
3202  if (!memcmp(p, "chapters", 8)) {
3203 
3204  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3205  int j;
3206 
3207  if (avf->nb_chapters > INT_MAX - size ||
3208  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3209  sizeof(*pts)))) {
3211  "Could not allocate forced key frames array.\n");
3212  exit_program(1);
3213  }
3214  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3215  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3216 
3217  for (j = 0; j < avf->nb_chapters; j++) {
3218  AVChapter *c = avf->chapters[j];
3219  av_assert1(index < size);
3220  pts[index++] = av_rescale_q(c->start, c->time_base,
3221  avctx->time_base) + t;
3222  }
3223 
3224  } else {
3225 
3226  t = parse_time_or_die("force_key_frames", p, 1);
3227  av_assert1(index < size);
3228  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3229 
3230  }
3231 
3232  p = next;
3233  }
3234 
3235  av_assert0(index == size);
3236  qsort(pts, size, sizeof(*pts), compare_int64);
3237  ost->forced_kf_count = size;
3238  ost->forced_kf_pts = pts;
3239 }
3240 
3241 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3242 {
3244  AVCodecContext *enc_ctx = ost->enc_ctx;
3245  AVFormatContext *oc;
3246 
3247  if (ost->enc_timebase.num > 0) {
3248  enc_ctx->time_base = ost->enc_timebase;
3249  return;
3250  }
3251 
3252  if (ost->enc_timebase.num < 0) {
3253  if (ist) {
3254  enc_ctx->time_base = ist->st->time_base;
3255  return;
3256  }
3257 
3258  oc = output_files[ost->file_index]->ctx;
3259  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3260  }
3261 
3262  enc_ctx->time_base = default_time_base;
3263 }
3264 
3266 {
3268  AVCodecContext *enc_ctx = ost->enc_ctx;
3270  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3271  int j, ret;
3272 
3273  set_encoder_id(output_files[ost->file_index], ost);
3274 
3275  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3276  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3277  // which have to be filtered out to prevent leaking them to output files.
3278  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3279 
3280  if (ist) {
3281  ost->st->disposition = ist->st->disposition;
3282 
3283  dec_ctx = ist->dec_ctx;
3284 
3286  } else {
3287  for (j = 0; j < oc->nb_streams; j++) {
3288  AVStream *st = oc->streams[j];
3289  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3290  break;
3291  }
3292  if (j == oc->nb_streams)
3293  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3296  }
3297 
3298  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3299  if (!ost->frame_rate.num)
3300  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3301  if (ist && !ost->frame_rate.num)
3302  ost->frame_rate = ist->framerate;
3303  if (ist && !ost->frame_rate.num)
3304  ost->frame_rate = ist->st->r_frame_rate;
3305  if (ist && !ost->frame_rate.num) {
3306  ost->frame_rate = (AVRational){25, 1};
3308  "No information "
3309  "about the input framerate is available. Falling "
3310  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3311  "if you want a different framerate.\n",
3312  ost->file_index, ost->index);
3313  }
3314 
3315  if (ost->enc->supported_framerates && !ost->force_fps) {
3316  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3317  ost->frame_rate = ost->enc->supported_framerates[idx];
3318  }
3319  // reduce frame rate for mpeg4 to be within the spec limits
3320  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3321  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3322  ost->frame_rate.num, ost->frame_rate.den, 65535);
3323  }
3324  }
3325 
3326  switch (enc_ctx->codec_type) {
3327  case AVMEDIA_TYPE_AUDIO:
3328  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3329  if (dec_ctx)
3331  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3332  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3333  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3334  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3335 
3337  break;
3338 
3339  case AVMEDIA_TYPE_VIDEO:
3340  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3341 
3342  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3343  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3344  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3346  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3347  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3348  }
3349 
3350  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3351  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3352  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3353  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3354  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3355  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3356 
3357  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3358  if (dec_ctx)
3360  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3361 
3362  enc_ctx->framerate = ost->frame_rate;
3363 
3364  ost->st->avg_frame_rate = ost->frame_rate;
3365 
3366  if (!dec_ctx ||
3367  enc_ctx->width != dec_ctx->width ||
3368  enc_ctx->height != dec_ctx->height ||
3369  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3371  }
3372 
3373  if (ost->top_field_first == 0) {
3374  enc_ctx->field_order = AV_FIELD_BB;
3375  } else if (ost->top_field_first == 1) {
3376  enc_ctx->field_order = AV_FIELD_TT;
3377  }
3378 
3379  if (ost->forced_keyframes) {
3380  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3381  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3383  if (ret < 0) {
3385  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3386  return ret;
3387  }
3388  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3389  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3390  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3391  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3392 
3393  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3394  // parse it only for static kf timings
3395  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3396  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3397  }
3398  }
3399  break;
3400  case AVMEDIA_TYPE_SUBTITLE:
3401  enc_ctx->time_base = AV_TIME_BASE_Q;
3402  if (!enc_ctx->width) {
3403  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3404  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3405  }
3406  break;
3407  case AVMEDIA_TYPE_DATA:
3408  break;
3409  default:
3410  abort();
3411  break;
3412  }
3413 
3414  ost->mux_timebase = enc_ctx->time_base;
3415 
3416  return 0;
3417 }
3418 
3419 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3420 {
3421  int ret = 0;
3422 
3423  if (ost->encoding_needed) {
3424  AVCodec *codec = ost->enc;
3425  AVCodecContext *dec = NULL;
3426  InputStream *ist;
3427 
3429  if (ret < 0)
3430  return ret;
3431 
3432  if ((ist = get_input_stream(ost)))
3433  dec = ist->dec_ctx;
3434  if (dec && dec->subtitle_header) {
3435  /* ASS code assumes this buffer is null terminated so add extra byte. */
3436  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3437  if (!ost->enc_ctx->subtitle_header)
3438  return AVERROR(ENOMEM);
3439  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3440  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3441  }
3442  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3443  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3444  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3445  !codec->defaults &&
3446  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3447  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3448  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3449 
3451  if (ret < 0) {
3452  snprintf(error, error_len, "Device setup failed for "
3453  "encoder on output stream #%d:%d : %s",
3454  ost->file_index, ost->index, av_err2str(ret));
3455  return ret;
3456  }
3457 
3458  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3459  int input_props = 0, output_props = 0;
3460  AVCodecDescriptor const *input_descriptor =
3461  avcodec_descriptor_get(dec->codec_id);
3462  AVCodecDescriptor const *output_descriptor =
3463  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3464  if (input_descriptor)
3465  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3466  if (output_descriptor)
3467  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3468  if (input_props && output_props && input_props != output_props) {
3469  snprintf(error, error_len,
3470  "Subtitle encoding currently only possible from text to text "
3471  "or bitmap to bitmap");
3472  return AVERROR_INVALIDDATA;
3473  }
3474  }
3475 
3476  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3477  if (ret == AVERROR_EXPERIMENTAL)
3478  abort_codec_experimental(codec, 1);
3479  snprintf(error, error_len,
3480  "Error while opening encoder for output stream #%d:%d - "
3481  "maybe incorrect parameters such as bit_rate, rate, width or height",
3482  ost->file_index, ost->index);
3483  return ret;
3484  }
3485  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3486  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3487  av_buffersink_set_frame_size(ost->filter->filter,
3488  ost->enc_ctx->frame_size);
3489  assert_avoptions(ost->encoder_opts);
3490  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3491  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3492  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3493  " It takes bits/s as argument, not kbits/s\n");
3494 
3496  if (ret < 0) {
3498  "Error initializing the output stream codec context.\n");
3499  exit_program(1);
3500  }
3501  /*
3502  * FIXME: ost->st->codec should't be needed here anymore.
3503  */
3504  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3505  if (ret < 0)
3506  return ret;
3507 
3508  if (ost->enc_ctx->nb_coded_side_data) {
3509  int i;
3510 
3511  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3512  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3513  uint8_t *dst_data;
3514 
3515  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3516  if (!dst_data)
3517  return AVERROR(ENOMEM);
3518  memcpy(dst_data, sd_src->data, sd_src->size);
3519  }
3520  }
3521 
3522  /*
3523  * Add global input side data. For now this is naive, and copies it
3524  * from the input stream's global side data. All side data should
3525  * really be funneled over AVFrame and libavfilter, then added back to
3526  * packet side data, and then potentially using the first packet for
3527  * global side data.
3528  */
3529  if (ist) {
3530  int i;
3531  for (i = 0; i < ist->st->nb_side_data; i++) {
3532  AVPacketSideData *sd = &ist->st->side_data[i];
3533  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3534  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3535  if (!dst)
3536  return AVERROR(ENOMEM);
3537  memcpy(dst, sd->data, sd->size);
3538  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3539  av_display_rotation_set((uint32_t *)dst, 0);
3540  }
3541  }
3542  }
3543 
3544  // copy timebase while removing common factors
3545  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3546  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3547 
3548  // copy estimated duration as a hint to the muxer
3549  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3550  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3551 
3552  ost->st->codec->codec= ost->enc_ctx->codec;
3553  } else if (ost->stream_copy) {
3555  if (ret < 0)
3556  return ret;
3557  }
3558 
3559  // parse user provided disposition, and update stream values
3560  if (ost->disposition) {
3561  static const AVOption opts[] = {
3562  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3563  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3564  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3565  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3566  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3567  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3568  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3569  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3570  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3571  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3572  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3573  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3574  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3575  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3576  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3577  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3578  { NULL },
3579  };
3580  static const AVClass class = {
3581  .class_name = "",
3582  .item_name = av_default_item_name,
3583  .option = opts,
3584  .version = LIBAVUTIL_VERSION_INT,
3585  };
3586  const AVClass *pclass = &class;
3587 
3588  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3589  if (ret < 0)
3590  return ret;
3591  }
3592 
3593  /* initialize bitstream filters for the output stream
3594  * needs to be done here, because the codec id for streamcopy is not
3595  * known until now */
3597  if (ret < 0)
3598  return ret;
3599 
3600  ost->initialized = 1;
3601 
3602  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3603  if (ret < 0)
3604  return ret;
3605 
3606  return ret;
3607 }
3608 
3609 static void report_new_stream(int input_index, AVPacket *pkt)
3610 {
3611  InputFile *file = input_files[input_index];
3612  AVStream *st = file->ctx->streams[pkt->stream_index];
3613 
3614  if (pkt->stream_index < file->nb_streams_warn)
3615  return;
3616  av_log(file->ctx, AV_LOG_WARNING,
3617  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3619  input_index, pkt->stream_index,
3620  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3621  file->nb_streams_warn = pkt->stream_index + 1;
3622 }
3623 
3624 static int transcode_init(void)
3625 {
3626  int ret = 0, i, j, k;
3627  AVFormatContext *oc;
3628  OutputStream *ost;
3629  InputStream *ist;
3630  char error[1024] = {0};
3631 
3632  for (i = 0; i < nb_filtergraphs; i++) {
3633  FilterGraph *fg = filtergraphs[i];
3634  for (j = 0; j < fg->nb_outputs; j++) {
3635  OutputFilter *ofilter = fg->outputs[j];
3636  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3637  continue;
3638  if (fg->nb_inputs != 1)
3639  continue;
3640  for (k = nb_input_streams-1; k >= 0 ; k--)
3641  if (fg->inputs[0]->ist == input_streams[k])
3642  break;
3643  ofilter->ost->source_index = k;
3644  }
3645  }
3646 
3647  /* init framerate emulation */
3648  for (i = 0; i < nb_input_files; i++) {
3650  if (ifile->rate_emu)
3651  for (j = 0; j < ifile->nb_streams; j++)
3652  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3653  }
3654 
3655  /* init input streams */
3656  for (i = 0; i < nb_input_streams; i++)
3657  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3658  for (i = 0; i < nb_output_streams; i++) {
3659  ost = output_streams[i];
3660  avcodec_close(ost->enc_ctx);
3661  }
3662  goto dump_format;
3663  }
3664 
3665  /* open each encoder */
3666  for (i = 0; i < nb_output_streams; i++) {
3667  // skip streams fed from filtergraphs until we have a frame for them
3668  if (output_streams[i]->filter)
3669  continue;
3670 
3672  if (ret < 0)
3673  goto dump_format;
3674  }
3675 
3676  /* discard unused programs */
3677  for (i = 0; i < nb_input_files; i++) {
3679  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3680  AVProgram *p = ifile->ctx->programs[j];
3681  int discard = AVDISCARD_ALL;
3682 
3683  for (k = 0; k < p->nb_stream_indexes; k++)
3684  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3685  discard = AVDISCARD_DEFAULT;
3686  break;
3687  }
3688  p->discard = discard;
3689  }
3690  }
3691 
3692  /* write headers for files with no streams */
3693  for (i = 0; i < nb_output_files; i++) {
3694  oc = output_files[i]->ctx;
3695  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3697  if (ret < 0)
3698  goto dump_format;
3699  }
3700  }
3701 
3702  dump_format:
3703  /* dump the stream mapping */
3704  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3705  for (i = 0; i < nb_input_streams; i++) {
3706  ist = input_streams[i];
3707 
3708  for (j = 0; j < ist->nb_filters; j++) {
3709  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3710  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3711  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3712  ist->filters[j]->name);
3713  if (nb_filtergraphs > 1)
3714  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3715  av_log(NULL, AV_LOG_INFO, "\n");
3716  }
3717  }
3718  }
3719 
3720  for (i = 0; i < nb_output_streams; i++) {
3721  ost = output_streams[i];
3722 
3723  if (ost->attachment_filename) {
3724  /* an attached file */
3725  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3726  ost->attachment_filename, ost->file_index, ost->index);
3727  continue;
3728  }
3729 
3730  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3731  /* output from a complex graph */
3732  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3733  if (nb_filtergraphs > 1)
3734  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3735 
3736  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3737  ost->index, ost->enc ? ost->enc->name : "?");
3738  continue;
3739  }
3740 
3741  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3742  input_streams[ost->source_index]->file_index,
3743  input_streams[ost->source_index]->st->index,
3744  ost->file_index,
3745  ost->index);
3746  if (ost->sync_ist != input_streams[ost->source_index])
3747  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3748  ost->sync_ist->file_index,
3749  ost->sync_ist->st->index);
3750  if (ost->stream_copy)
3751  av_log(NULL, AV_LOG_INFO, " (copy)");
3752  else {
3753  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3754  const AVCodec *out_codec = ost->enc;
3755  const char *decoder_name = "?";
3756  const char *in_codec_name = "?";
3757  const char *encoder_name = "?";
3758  const char *out_codec_name = "?";
3759  const AVCodecDescriptor *desc;
3760 
3761  if (in_codec) {
3762  decoder_name = in_codec->name;
3763  desc = avcodec_descriptor_get(in_codec->id);
3764  if (desc)
3765  in_codec_name = desc->name;
3766  if (!strcmp(decoder_name, in_codec_name))
3767  decoder_name = "native";
3768  }
3769 
3770  if (out_codec) {
3771  encoder_name = out_codec->name;
3772  desc = avcodec_descriptor_get(out_codec->id);
3773  if (desc)
3774  out_codec_name = desc->name;
3775  if (!strcmp(encoder_name, out_codec_name))
3776  encoder_name = "native";
3777  }
3778 
3779  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3780  in_codec_name, decoder_name,
3781  out_codec_name, encoder_name);
3782  }
3783  av_log(NULL, AV_LOG_INFO, "\n");
3784  }
3785 
3786  if (ret) {
3787  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3788  return ret;
3789  }
3790 
3792 
3793  return 0;
3794 }
3795 
3796 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3797 static int need_output(void)
3798 {
3799  int i;
3800 
3801  for (i = 0; i < nb_output_streams; i++) {
3803  OutputFile *of = output_files[ost->file_index];
3804  AVFormatContext *os = output_files[ost->file_index]->ctx;
3805 
3806  if (ost->finished ||
3807  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3808  continue;
3809  if (ost->frame_number >= ost->max_frames) {
3810  int j;
3811  for (j = 0; j < of->ctx->nb_streams; j++)
3813  continue;
3814  }
3815 
3816  return 1;
3817  }
3818 
3819  return 0;
3820 }
3821 
3822 /**
3823  * Select the output stream to process.
3824  *
3825  * @return selected output stream, or NULL if none available
3826  */
3828 {
3829  int i;
3830  int64_t opts_min = INT64_MAX;
3831  OutputStream *ost_min = NULL;
3832 
3833  for (i = 0; i < nb_output_streams; i++) {
3835  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3836  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3837  AV_TIME_BASE_Q);
3838  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3840  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3841  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3842 
3843  if (!ost->initialized && !ost->inputs_done)
3844  return ost;
3845 
3846  if (!ost->finished && opts < opts_min) {
3847  opts_min = opts;
3848  ost_min = ost->unavailable ? NULL : ost;
3849  }
3850  }
3851  return ost_min;
3852 }
3853 
3854 static void set_tty_echo(int on)
3855 {
3856 #if HAVE_TERMIOS_H
3857  struct termios tty;
3858  if (tcgetattr(0, &tty) == 0) {
3859  if (on) tty.c_lflag |= ECHO;
3860  else tty.c_lflag &= ~ECHO;
3861  tcsetattr(0, TCSANOW, &tty);
3862  }
3863 #endif
3864 }
3865 
3866 static int check_keyboard_interaction(int64_t cur_time)
3867 {
3868  int i, ret, key;
3869  static int64_t last_time;
3870  if (received_nb_signals)
3871  return AVERROR_EXIT;
3872  /* read_key() returns 0 on EOF */
3873  if(cur_time - last_time >= 100000 && !run_as_daemon){
3874  key = read_key();
3875  last_time = cur_time;
3876  }else
3877  key = -1;
3878  if (key == 'q')
3879  return AVERROR_EXIT;
3880  if (key == '+') av_log_set_level(av_log_get_level()+10);
3881  if (key == '-') av_log_set_level(av_log_get_level()-10);
3882  if (key == 's') qp_hist ^= 1;
3883  if (key == 'h'){
3884  if (do_hex_dump){
3885  do_hex_dump = do_pkt_dump = 0;
3886  } else if(do_pkt_dump){
3887  do_hex_dump = 1;
3888  } else
3889  do_pkt_dump = 1;
3891  }
3892  if (key == 'c' || key == 'C'){
3893  char buf[4096], target[64], command[256], arg[256] = {0};
3894  double time;
3895  int k, n = 0;
3896  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3897  i = 0;
3898  set_tty_echo(1);
3899  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3900  if (k > 0)
3901  buf[i++] = k;
3902  buf[i] = 0;
3903  set_tty_echo(0);
3904  fprintf(stderr, "\n");
3905  if (k > 0 &&
3906  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3907  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3908  target, time, command, arg);
3909  for (i = 0; i < nb_filtergraphs; i++) {
3910  FilterGraph *fg = filtergraphs[i];
3911  if (fg->graph) {
3912  if (time < 0) {
3913  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3914  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3915  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3916  } else if (key == 'c') {
3917  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3919  } else {
3920  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3921  if (ret < 0)
3922  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3923  }
3924  }
3925  }
3926  } else {
3928  "Parse error, at least 3 arguments were expected, "
3929  "only %d given in string '%s'\n", n, buf);
3930  }
3931  }
3932  if (key == 'd' || key == 'D'){
3933  int debug=0;
3934  if(key == 'D') {
3935  debug = input_streams[0]->st->codec->debug<<1;
3936  if(!debug) debug = 1;
3937  while(debug & (FF_DEBUG_DCT_COEFF
3938 #if FF_API_DEBUG_MV
3939  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3940 #endif
3941  )) //unsupported, would just crash
3942  debug += debug;
3943  }else{
3944  char buf[32];
3945  int k = 0;
3946  i = 0;
3947  set_tty_echo(1);
3948  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3949  if (k > 0)
3950  buf[i++] = k;
3951  buf[i] = 0;
3952  set_tty_echo(0);
3953  fprintf(stderr, "\n");
3954  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3955  fprintf(stderr,"error parsing debug value\n");
3956  }
3957  for(i=0;i<nb_input_streams;i++) {
3958  input_streams[i]->st->codec->debug = debug;
3959  }
3960  for(i=0;i<nb_output_streams;i++) {
3962  ost->enc_ctx->debug = debug;
3963  }
3964  if(debug) av_log_set_level(AV_LOG_DEBUG);
3965  fprintf(stderr,"debug=%d\n", debug);
3966  }
3967  if (key == '?'){
3968  fprintf(stderr, "key function\n"
3969  "? show this help\n"
3970  "+ increase verbosity\n"
3971  "- decrease verbosity\n"
3972  "c Send command to first matching filter supporting it\n"
3973  "C Send/Queue command to all matching filters\n"
3974  "D cycle through available debug modes\n"
3975  "h dump packets/hex press to cycle through the 3 states\n"
3976  "q quit\n"
3977  "s Show QP histogram\n"
3978  );
3979  }
3980  return 0;
3981 }
3982 
3983 #if HAVE_THREADS
3984 static void *input_thread(void *arg)
3985 {
3986  InputFile *f = arg;
3987  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3988  int ret = 0;
3989 
3990  while (1) {
3991  AVPacket pkt;
3992  ret = av_read_frame(f->ctx, &pkt);
3993 
3994  if (ret == AVERROR(EAGAIN)) {
3995  av_usleep(10000);
3996  continue;
3997  }
3998  if (ret < 0) {
3999  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4000  break;
4001  }
4002  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4003  if (flags && ret == AVERROR(EAGAIN)) {
4004  flags = 0;
4005  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4006  av_log(f->ctx, AV_LOG_WARNING,
4007  "Thread message queue blocking; consider raising the "
4008  "thread_queue_size option (current value: %d)\n",
4009  f->thread_queue_size);
4010  }
4011  if (ret < 0) {
4012  if (ret != AVERROR_EOF)
4013  av_log(f->ctx, AV_LOG_ERROR,
4014  "Unable to send packet to main thread: %s\n",
4015  av_err2str(ret));
4016  av_packet_unref(&pkt);
4017  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4018  break;
4019  }
4020  }
4021 
4022  return NULL;
4023 }
4024 
4025 static void free_input_thread(int i)
4026 {
4027  InputFile *f = input_files[i];
4028  AVPacket pkt;
4029 
4030  if (!f || !f->in_thread_queue)
4031  return;
4033  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4034  av_packet_unref(&pkt);
4035 
4036  pthread_join(f->thread, NULL);
4037  f->joined = 1;
4038  av_thread_message_queue_free(&f->in_thread_queue);
4039 }
4040 
4041 static void free_input_threads(void)
4042 {
4043  int i;
4044 
4045  for (i = 0; i < nb_input_files; i++)
4046  free_input_thread(i);
4047 }
4048 
4049 static int init_input_thread(int i)
4050 {
4051  int ret;
4052  InputFile *f = input_files[i];
4053 
4054  if (nb_input_files == 1)
4055  return 0;
4056 
4057  if (f->ctx->pb ? !f->ctx->pb->seekable :
4058  strcmp(f->ctx->iformat->name, "lavfi"))
4059  f->non_blocking = 1;
4060  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4061  f->thread_queue_size, sizeof(AVPacket));
4062  if (ret < 0)
4063  return ret;
4064 
4065  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4066  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4067  av_thread_message_queue_free(&f->in_thread_queue);
4068  return AVERROR(ret);
4069  }
4070 
4071  return 0;
4072 }
4073 
4074 static int init_input_threads(void)
4075 {
4076  int i, ret;
4077 
4078  for (i = 0; i < nb_input_files; i++) {
4079  ret = init_input_thread(i);
4080  if (ret < 0)
4081  return ret;
4082  }
4083  return 0;
4084 }
4085 
4086 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4087 {
4088  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4089  f->non_blocking ?
4091 }
4092 #endif
4093 
4095 {
4096  if (f->rate_emu) {
4097  int i;
4098  for (i = 0; i < f->nb_streams; i++) {
4099  InputStream *ist = input_streams[f->ist_index + i];
4100  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4101  int64_t now = av_gettime_relative() - ist->start;
4102  if (pts > now)
4103  return AVERROR(EAGAIN);
4104  }
4105  }
4106 
4107 #if HAVE_THREADS
4108  if (nb_input_files > 1)
4109  return get_input_packet_mt(f, pkt);
4110 #endif
4111  return av_read_frame(f->ctx, pkt);
4112 }
4113 
4114 static int got_eagain(void)
4115 {
4116  int i;
4117  for (i = 0; i < nb_output_streams; i++)
4118  if (output_streams[i]->unavailable)
4119  return 1;
4120  return 0;
4121 }
4122 
4123 static void reset_eagain(void)
4124 {
4125  int i;
4126  for (i = 0; i < nb_input_files; i++)
4127  input_files[i]->eagain = 0;
4128  for (i = 0; i < nb_output_streams; i++)
4129  output_streams[i]->unavailable = 0;
4130 }
4131 
4132 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4133 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4134  AVRational time_base)
4135 {
4136  int ret;
4137 
4138  if (!*duration) {
4139  *duration = tmp;
4140  return tmp_time_base;
4141  }
4142 
4143  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4144  if (ret < 0) {
4145  *duration = tmp;
4146  return tmp_time_base;
4147  }
4148 
4149  return time_base;
4150 }
4151 
4153 {
4154  InputStream *ist;
4155  AVCodecContext *avctx;
4156  int i, ret, has_audio = 0;
4157  int64_t duration = 0;
4158 
4159  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4160  if (ret < 0)
4161  return ret;
4162 
4163  for (i = 0; i < ifile->nb_streams; i++) {
4164  ist = input_streams[ifile->ist_index + i];
4165  avctx = ist->dec_ctx;
4166 
4167  /* duration is the length of the last frame in a stream
4168  * when audio stream is present we don't care about
4169  * last video frame length because it's not defined exactly */
4170  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4171  has_audio = 1;
4172  }
4173 
4174  for (i = 0; i < ifile->nb_streams; i++) {
4175  ist = input_streams[ifile->ist_index + i];
4176  avctx = ist->dec_ctx;
4177 
4178  if (has_audio) {
4179  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4180  AVRational sample_rate = {1, avctx->sample_rate};
4181 
4183  } else {
4184  continue;
4185  }
4186  } else {
4187  if (ist->framerate.num) {
4188  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4189  } else if (ist->st->avg_frame_rate.num) {
4191  } else {
4192  duration = 1;
4193  }
4194  }
4195  if (!ifile->duration)
4196  ifile->time_base = ist->st->time_base;
4197  /* the total duration of the stream, max_pts - min_pts is
4198  * the duration of the stream without the last frame */
4199  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4200  duration += ist->max_pts - ist->min_pts;
4201  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4202  ifile->time_base);
4203  }
4204 
4205  if (ifile->loop > 0)
4206  ifile->loop--;
4207 
4208  return ret;
4209 }
4210 
4211 /*
4212  * Return
4213  * - 0 -- one packet was read and processed
4214  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4215  * this function should be called again
4216  * - AVERROR_EOF -- this function should not be called again
4217  */
4218 static int process_input(int file_index)
4219 {
4220  InputFile *ifile = input_files[file_index];
4222  InputStream *ist;
4223  AVPacket pkt;
4224  int ret, thread_ret, i, j;
4225  int64_t duration;
4226  int64_t pkt_dts;
4227  int disable_discontinuity_correction = copy_ts;
4228 
4229  is = ifile->ctx;
4231 
4232  if (ret == AVERROR(EAGAIN)) {
4233  ifile->eagain = 1;
4234  return ret;
4235  }
4236  if (ret < 0 && ifile->loop) {
4237  AVCodecContext *avctx;
4238  for (i = 0; i < ifile->nb_streams; i++) {
4239  ist = input_streams[ifile->ist_index + i];
4240  avctx = ist->dec_ctx;
4241  if (ist->decoding_needed) {
4242  ret = process_input_packet(ist, NULL, 1);
4243  if (ret>0)
4244  return 0;
4245  avcodec_flush_buffers(avctx);
4246  }
4247  }
4248 #if HAVE_THREADS
4249  free_input_thread(file_index);
4250 #endif
4251  ret = seek_to_start(ifile, is);
4252 #if HAVE_THREADS
4253  thread_ret = init_input_thread(file_index);
4254  if (thread_ret < 0)
4255  return thread_ret;
4256 #endif
4257  if (ret < 0)
4258  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4259  else
4261  if (ret == AVERROR(EAGAIN)) {
4262  ifile->eagain = 1;
4263  return ret;
4264  }
4265  }
4266  if (ret < 0) {
4267  if (ret != AVERROR_EOF) {
4268  print_error(is->url, ret);
4269  if (exit_on_error)
4270  exit_program(1);
4271  }
4272 
4273  for (i = 0; i < ifile->nb_streams; i++) {
4274  ist = input_streams[ifile->ist_index + i];
4275  if (ist->decoding_needed) {
4276  ret = process_input_packet(ist, NULL, 0);
4277  if (ret>0)
4278  return 0;
4279  }
4280 
4281  /* mark all outputs that don't go through lavfi as finished */
4282  for (j = 0; j < nb_output_streams; j++) {
4284 
4285  if (ost->source_index == ifile->ist_index + i &&
4286  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4288  }
4289  }
4290 
4291  ifile->eof_reached = 1;
4292  return AVERROR(EAGAIN);
4293  }
4294 
4295  reset_eagain();
4296 
4297  if (do_pkt_dump) {
4299  is->streams[pkt.stream_index]);
4300  }
4301  /* the following test is needed in case new streams appear
4302  dynamically in stream : we ignore them */
4303  if (pkt.stream_index >= ifile->nb_streams) {
4304  report_new_stream(file_index, &pkt);
4305  goto discard_packet;
4306  }
4307 
4308  ist = input_streams[ifile->ist_index + pkt.stream_index];
4309 
4310  ist->data_size += pkt.size;
4311  ist->nb_packets++;
4312 
4313  if (ist->discard)
4314  goto discard_packet;
4315 
4316  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4318  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4319  if (exit_on_error)
4320  exit_program(1);
4321  }
4322 
4323  if (debug_ts) {
4324  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4325  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4333  }
4334 
4335  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4336  int64_t stime, stime2;
4337  // Correcting starttime based on the enabled streams
4338  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4339  // so we instead do it here as part of discontinuity handling
4340  if ( ist->next_dts == AV_NOPTS_VALUE
4341  && ifile->ts_offset == -is->start_time
4342  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4343  int64_t new_start_time = INT64_MAX;
4344  for (i=0; i<is->nb_streams; i++) {
4345  AVStream *st = is->streams[i];
4346  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4347  continue;
4348  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4349  }
4350  if (new_start_time > is->start_time) {
4351  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4352  ifile->ts_offset = -new_start_time;
4353  }
4354  }
4355 
4356  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4357  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4358  ist->wrap_correction_done = 1;
4359 
4360  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4361  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4362  ist->wrap_correction_done = 0;
4363  }
4364  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4365  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4366  ist->wrap_correction_done = 0;
4367  }
4368  }
4369 
4370  /* add the stream-global side data to the first packet */
4371  if (ist->nb_packets == 1) {
4372  for (i = 0; i < ist->st->nb_side_data; i++) {
4373  AVPacketSideData *src_sd = &ist->st->side_data[i];
4374  uint8_t *dst_data;
4375 
4376  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4377  continue;
4378 
4379  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4380  continue;
4381 
4382  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4383  if (!dst_data)
4384  exit_program(1);
4385 
4386  memcpy(dst_data, src_sd->data, src_sd->size);
4387  }
4388  }
4389 
4390  if (pkt.dts != AV_NOPTS_VALUE)
4391  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4392  if (pkt.pts != AV_NOPTS_VALUE)
4393  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4394 
4395  if (pkt.pts != AV_NOPTS_VALUE)
4396  pkt.pts *= ist->ts_scale;
4397  if (pkt.dts != AV_NOPTS_VALUE)
4398  pkt.dts *= ist->ts_scale;
4399 
4401  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4403  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4404  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4405  int64_t delta = pkt_dts - ifile->last_ts;
4406  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4408  ifile->ts_offset -= delta;
4410  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4411  delta, ifile->ts_offset);
4413  if (pkt.pts != AV_NOPTS_VALUE)
4415  }
4416  }
4417 
4418  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4419  if (pkt.pts != AV_NOPTS_VALUE) {
4420  pkt.pts += duration;
4421  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4422  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4423  }
4424 
4425  if (pkt.dts != AV_NOPTS_VALUE)
4426  pkt.dts += duration;
4427 
4429 
4430  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4431  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4432  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4433  ist->st->time_base, AV_TIME_BASE_Q,
4435  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4436  disable_discontinuity_correction = 0;
4437  }
4438 
4439  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4441  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4442  !disable_discontinuity_correction) {
4443  int64_t delta = pkt_dts - ist->next_dts;
4444  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4445  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4447  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4448  ifile->ts_offset -= delta;
4450  "timestamp discontinuity for stream #%d:%d "
4451  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4452  ist->file_index, ist->st->index, ist->st->id,
4454  delta, ifile->ts_offset);
4456  if (pkt.pts != AV_NOPTS_VALUE)
4458  }
4459  } else {
4460  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4462  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4464  }
4465  if (pkt.pts != AV_NOPTS_VALUE){
4466  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4467  delta = pkt_pts - ist->next_dts;
4468  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4470  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4472  }
4473  }
4474  }
4475  }
4476 
4477  if (pkt.dts != AV_NOPTS_VALUE)
4478  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4479 
4480  if (debug_ts) {
4481  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4487  }
4488 
4489  sub2video_heartbeat(ist, pkt.pts);
4490 
4491  process_input_packet(ist, &pkt, 0);
4492 
4493 discard_packet:
4494  av_packet_unref(&pkt);
4495 
4496  return 0;
4497 }
4498 
4499 /**
4500  * Perform a step of transcoding for the specified filter graph.
4501  *
4502  * @param[in] graph filter graph to consider
4503  * @param[out] best_ist input stream where a frame would allow to continue
4504  * @return 0 for success, <0 for error
4505  */
4506 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4507 {
4508  int i, ret;
4509  int nb_requests, nb_requests_max = 0;
4510  InputFilter *ifilter;
4511  InputStream *ist;
4512 
4513  *best_ist = NULL;
4515  if (ret >= 0)
4516  return reap_filters(0);
4517 
4518  if (ret == AVERROR_EOF) {
4519  ret = reap_filters(1);
4520  for (i = 0; i < graph->nb_outputs; i++)
4521  close_output_stream(graph->outputs[i]->ost);
4522  return ret;
4523  }
4524  if (ret != AVERROR(EAGAIN))
4525  return ret;
4526 
4527  for (i = 0; i < graph->nb_inputs; i++) {
4528  ifilter = graph->inputs[i];
4529  ist = ifilter->ist;
4530  if (input_files[ist->file_index]->eagain ||
4532  continue;
4533  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4534  if (nb_requests > nb_requests_max) {
4535  nb_requests_max = nb_requests;
4536  *best_ist = ist;
4537  }
4538  }
4539 
4540  if (!*best_ist)
4541  for (i = 0; i < graph->nb_outputs; i++)
4542  graph->outputs[i]->ost->unavailable = 1;
4543 
4544  return 0;
4545 }
4546 
4547 /**
4548  * Run a single step of transcoding.
4549  *
4550  * @return 0 for success, <0 for error
4551  */
4552 static int transcode_step(void)
4553 {
4554  OutputStream *ost;
4555  InputStream *ist = NULL;
4556  int ret;
4557 
4558  ost = choose_output();
4559  if (!ost) {
4560  if (got_eagain()) {
4561  reset_eagain();
4562  av_usleep(10000);
4563  return 0;
4564  }
4565  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4566  return AVERROR_EOF;
4567  }
4568 
4569  if (ost->filter && !ost->filter->graph->graph) {
4570  if (ifilter_has_all_input_formats(ost->filter->graph)) {
4571  ret = configure_filtergraph(ost->filter->graph);
4572  if (ret < 0) {
4573  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4574  return ret;
4575  }
4576  }
4577  }
4578 
4579  if (ost->filter && ost->filter->graph->graph) {
4580  if (!ost->initialized) {
4581  char error[1024] = {0};
4582  ret = init_output_stream(ost, error, sizeof(error));
4583  if (ret < 0) {
4584  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4585  ost->file_index, ost->index, error);
4586  exit_program(1);
4587  }
4588  }
4589  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4590  return ret;
4591  if (!ist)
4592  return 0;
4593  } else if (ost->filter) {
4594  int i;
4595  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4596  InputFilter *ifilter = ost->filter->graph->inputs[i];
4597  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4598  ist = ifilter->ist;
4599  break;
4600  }
4601  }
4602  if (!ist) {
4603  ost->inputs_done = 1;
4604  return 0;
4605  }
4606  } else {
4607  av_assert0(ost->source_index >= 0);
4608  ist = input_streams[ost->source_index];
4609  }
4610 
4611  ret = process_input(ist->file_index);
4612  if (ret == AVERROR(EAGAIN)) {
4613  if (input_files[ist->file_index]->eagain)
4614  ost->unavailable = 1;
4615  return 0;
4616  }
4617 
4618  if (ret < 0)
4619  return ret == AVERROR_EOF ? 0 : ret;
4620 
4621  return reap_filters(0);
4622 }
4623 
4624 /*
4625  * The following code is the main loop of the file converter
4626  */
4627 static int transcode(void)
4628 {
4629  int ret, i;
4630  AVFormatContext *os;
4631  OutputStream *ost;
4632  InputStream *ist;
4633  int64_t timer_start;
4634  int64_t total_packets_written = 0;
4635 
4636  ret = transcode_init();
4637  if (ret < 0)
4638  goto fail;
4639 
4640  if (stdin_interaction) {
4641  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4642  }
4643 
4644  timer_start = av_gettime_relative();
4645 
4646 #if HAVE_THREADS
4647  if ((ret = init_input_threads()) < 0)
4648  goto fail;
4649 #endif
4650 
4651  while (!received_sigterm) {
4652  int64_t cur_time= av_gettime_relative();
4653 
4654  /* if 'q' pressed, exits */
4655  if (stdin_interaction)
4656  if (check_keyboard_interaction(cur_time) < 0)
4657  break;
4658 
4659  /* check if there's any stream where output is still needed */
4660  if (!need_output()) {
4661  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4662  break;
4663  }
4664 
4665  ret = transcode_step();
4666  if (ret < 0 && ret != AVERROR_EOF) {
4667  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4668  break;
4669  }
4670 
4671  /* dump report by using the output first video and audio streams */
4672  print_report(0, timer_start, cur_time);
4673  }
4674 #if HAVE_THREADS
4675  free_input_threads();
4676 #endif
4677 
4678  /* at the end of stream, we must flush the decoder buffers */
4679  for (i = 0; i < nb_input_streams; i++) {
4680  ist = input_streams[i];
4681  if (!input_files[ist->file_index]->eof_reached) {
4682  process_input_packet(ist, NULL, 0);
4683  }
4684  }
4685  flush_encoders();
4686 
4687  term_exit();
4688 
4689  /* write the trailer if needed and close file */
4690  for (i = 0; i < nb_output_files; i++) {
4691  os = output_files[i]->ctx;
4692  if (!output_files[i]->header_written) {
4694  "Nothing was written into output file %d (%s), because "
4695  "at least one of its streams received no packets.\n",
4696  i, os->url);
4697  continue;
4698  }
4699  if ((ret = av_write_trailer(os)) < 0) {
4700  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4701  if (exit_on_error)
4702  exit_program(1);
4703  }
4704  }
4705 
4706  /* dump report by using the first video and audio streams */
4707  print_report(1, timer_start, av_gettime_relative());
4708 
4709  /* close each encoder */
4710  for (i = 0; i < nb_output_streams; i++) {
4711  ost = output_streams[i];
4712  if (ost->encoding_needed) {
4713  av_freep(&ost->enc_ctx->stats_in);
4714  }
4715  total_packets_written += ost->packets_written;
4716  if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4717  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4718  exit_program(1);
4719  }
4720  }
4721 
4722  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4723  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4724  exit_program(1);
4725  }
4726 
4727  /* close each decoder */
4728  for (i = 0; i < nb_input_streams; i++) {
4729  ist = input_streams[i];
4730  if (ist->decoding_needed) {
4731  avcodec_close(ist->dec_ctx);
4732  if (ist->hwaccel_uninit)
4733  ist->hwaccel_uninit(ist->dec_ctx);
4734  }
4735  }
4736 
4738 
4739  /* finished ! */
4740  ret = 0;
4741 
4742  fail:
4743 #if HAVE_THREADS
4744  free_input_threads();
4745 #endif
4746 
4747  if (output_streams) {
4748  for (i = 0; i < nb_output_streams; i++) {
4749  ost = output_streams[i];
4750  if (ost) {
4751  if (ost->logfile) {
4752  if (fclose(ost->logfile))
4754  "Error closing logfile, loss of information possible: %s\n",
4755  av_err2str(AVERROR(errno)));
4756  ost->logfile = NULL;
4757  }
4758  av_freep(&ost->forced_kf_pts);
4759  av_freep(&ost->apad);
4761  av_dict_free(&ost->encoder_opts);
4762  av_dict_free(&ost->sws_dict);
4763  av_dict_free(&ost->swr_opts);
4764  av_dict_free(&ost->resample_opts);
4765  }
4766  }
4767  }
4768  return ret;
4769 }
4770 
4772 {
4773  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4774 #if HAVE_GETRUSAGE
4775  struct rusage rusage;
4776 
4777  getrusage(RUSAGE_SELF, &rusage);
4778  time_stamps.user_usec =
4779  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4780  time_stamps.sys_usec =
4781  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4782 #elif HAVE_GETPROCESSTIMES
4783  HANDLE proc;
4784  FILETIME c, e, k, u;
4785  proc = GetCurrentProcess();
4786  GetProcessTimes(proc, &c, &e, &k, &u);
4787  time_stamps.user_usec =
4788  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4789  time_stamps.sys_usec =
4790  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4791 #else
4792  time_stamps.user_usec = time_stamps.sys_usec = 0;
4793 #endif
4794  return time_stamps;
4795 }
4796 
4797 static int64_t getmaxrss(void)
4798 {
4799 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4800  struct rusage rusage;
4801  getrusage(RUSAGE_SELF, &rusage);
4802  return (int64_t)rusage.ru_maxrss * 1024;
4803 #elif HAVE_GETPROCESSMEMORYINFO
4804  HANDLE proc;
4805  PROCESS_MEMORY_COUNTERS memcounters;
4806  proc = GetCurrentProcess();
4807  memcounters.cb = sizeof(memcounters);
4808  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4809  return memcounters.PeakPagefileUsage;
4810 #else
4811  return 0;
4812 #endif
4813 }
4814 
4815 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4816 {
4817 }
4818 
4819 int main(int argc, char **argv)
4820 {
4821  int i, ret;
4823 
4824  init_dynload();
4825 
4827 
4828  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4829 
4831  parse_loglevel(argc, argv, options);
4832 
4833  if(argc>1 && !strcmp(argv[1], "-d")){
4834  run_as_daemon=1;
4836  argc--;
4837  argv++;
4838  }
4839 
4840 #if CONFIG_AVDEVICE
4842 #endif
4844 
4845  show_banner(argc, argv, options);
4846 
4847  /* parse options and open all input/output files */
4848  ret = ffmpeg_parse_options(argc, argv);
4849  if (ret < 0)
4850  exit_program(1);
4851 
4852  if (nb_output_files <= 0 && nb_input_files == 0) {
4853  show_usage();
4854  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4855  exit_program(1);
4856  }
4857 
4858  /* file converter / grab */
4859  if (nb_output_files <= 0) {
4860  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4861  exit_program(1);
4862  }
4863 
4864  for (i = 0; i < nb_output_files; i++) {
4865  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4866  want_sdp = 0;
4867  }
4868 
4870  if (transcode() < 0)
4871  exit_program(1);
4872  if (do_benchmark) {
4873  int64_t utime, stime, rtime;
4875  utime = current_time.user_usec - ti.user_usec;
4876  stime = current_time.sys_usec - ti.sys_usec;
4877  rtime = current_time.real_usec - ti.real_usec;
4879  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4880  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4881  }
4882  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4885  exit_program(69);
4886 
4888  return main_return_code;
4889 }
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:29
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:191
AVSubtitle
Definition: avcodec.h:2694
print_sdp
static void print_sdp(void)
Definition: ffmpeg.c:2729
avcodec_close
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1121
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:347
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:247
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
AVCodec
AVCodec.
Definition: codec.h:190
pthread_join
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:94
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:691
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ifilter_parameters_from_codecpar
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1833
need_output
static int need_output(void)
Definition: ffmpeg.c:3797
audio_sync_method
int audio_sync_method
Definition: ffmpeg_opt.c:152
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1962
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:1344
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:72
level
uint8_t level
Definition: svq3.c:209
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:413
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:159
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:428
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:4123
InputStream::hwaccel_device
char * hwaccel_device
Definition: ffmpeg.h:366
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:422
seek_to_start
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4152
AVOutputFormat::name
const char * name
Definition: avformat.h:491
VSYNC_PASSTHROUGH
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:150
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2699
opt.h
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1011
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:345
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:778
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
forced_keyframes_const_names
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:42
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
libm.h
video_sync_method
int video_sync_method
Definition: ffmpeg_opt.c:153
InputFilter::width
int width
Definition: ffmpeg.h:246
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1262
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
AVCodecHWConfig::methods
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:439
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1186
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
FKF_PREV_FORCED_N
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:427
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:283
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1564
AVCodecContext::thread_safe_callbacks
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:1814
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
InputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:380
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
thread.h
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVFMT_VARIABLE_FPS
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:465
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:288
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:833
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:920
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:117
sub2video_heartbeat
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:280
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2082
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:648
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:154
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:303
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:462
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
transcode_step
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4552
BenchmarkTimeStamps::user_usec
int64_t user_usec
Definition: ffmpeg.c:125
AVSubtitleRect
Definition: avcodec.h:2659
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2698
AV_DISPOSITION_DEFAULT
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:810
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
av_bsf_init
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:144
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:559
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
ffmpeg_parse_options
int ffmpeg_parse_options(int argc, char **argv)
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:211
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AV_THREAD_MESSAGE_NONBLOCK
@ AV_THREAD_MESSAGE_NONBLOCK
Perform non-blocking operation.
Definition: threadmessage.h:31
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1403
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
AVFrame::width
int width
Definition: frame.h:358
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:513
AVPacketSideData
Definition: packet.h:298
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:209
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:978
FKF_T
@ FKF_T
Definition: ffmpeg.h:429
AVPacket::data
uint8_t * data
Definition: packet.h:355
current_time
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:142
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
finish_output_stream
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1394
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1183
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:452
AVOption
AVOption.
Definition: opt.h:246
ATOMIC_VAR_INIT
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
FilterGraph::index
int index
Definition: ffmpeg.h:282
AVStream::avg_frame_rate
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:938
VSYNC_VSCFR
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
AVStream::cur_dts
int64_t cur_dts
Definition: avformat.h:1068
InputStream::nb_filters
int nb_filters
Definition: ffmpeg.h:359
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:579
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
transcode
static int transcode(void)
Definition: ffmpeg.c:4627
VSYNC_AUTO
#define VSYNC_AUTO
Definition: ffmpeg.h:49
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
BenchmarkTimeStamps::sys_usec
int64_t sys_usec
Definition: ffmpeg.c:126
progress_avio
AVIOContext * progress_avio
Definition: ffmpeg.c:143
show_usage
void show_usage(void)
Definition: ffmpeg_opt.c:3261
do_audio_out
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:899
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:289
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:373
AVCodecParameters::codec_tag
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:64
max
#define max(a, b)
Definition: cuda_runtime.h:33
mathematics.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:30
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:537
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
OutputFilter::sample_rates
int * sample_rates
Definition: ffmpeg.h:278
check_recording_time
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:886
VSYNC_CFR
#define VSYNC_CFR
Definition: ffmpeg.h:51
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1773
decode_audio
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2259
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
InputStream::hwaccel_get_buffer
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:372
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
InputStream::decoding_needed
int decoding_needed
Definition: ffmpeg.h:299
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
flush_encoders
static void flush_encoders(void)
Definition: ffmpeg.c:1846
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
ost
static AVStream * ost
Definition: vaapi_transcode.c:45
os_support.h
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
sample_rate
sample_rate
Definition: ffmpeg_filter.c:192
get_input_packet
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4094
qp_hist
int qp_hist
Definition: ffmpeg_opt.c:167
term_exit_sigsafe
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:328
AVBSFContext
The bitstream filter state.
Definition: bsf.h:49
ECHO
#define ECHO(name, type, min, max)
Definition: af_aecho.c:188
want_sdp
static int want_sdp
Definition: ffmpeg.c:140
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:120
InputFilter::channel_layout
uint64_t channel_layout
Definition: ffmpeg.h:251
InputFilter::ist
struct InputStream * ist
Definition: ffmpeg.h:236
do_video_out
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1042
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
InputFile::eof_reached
int eof_reached
Definition: ffmpeg.h:395
exit_program
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
InputStream
Definition: ffmpeg.h:294
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:2069
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:163
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4491
got_eagain
static int got_eagain(void)
Definition: ffmpeg.c:4114
ifilter_send_eof
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2183
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1613
nb_frames_drop
static int nb_frames_drop
Definition: ffmpeg.c:137
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1085
av_buffersink_set_frame_size
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
dts_delta_threshold
float dts_delta_threshold
Definition: ffmpeg_opt.c:148
AVCodecParameters::channels
int channels
Audio only.
Definition: codec_par.h:166
fifo.h
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:39
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1141
finish
static void finish(void)
Definition: movenc.c:345
avcodec_parameters_free
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:2053
vstats_version
int vstats_version
Definition: ffmpeg_opt.c:173
hwaccels
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:133
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:123
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:239
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
samplefmt.h
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
InputStream::decoder_opts
AVDictionary * decoder_opts
Definition: ffmpeg.h:331
AVProgram::discard
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1260
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
check_init_output_file
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2948
InputStream::filter_in_rescale_delta_last
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:318
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
AVChapter
Definition: avformat.h:1292
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:176
InputStream::nb_packets
uint64_t nb_packets
Definition: ffmpeg.h:382
AV_DISPOSITION_FORCED
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:822
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:647
av_thread_message_queue_recv
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
Definition: threadmessage.c:172
hw_device_setup_for_decode
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:303
InputFilter::frame_queue
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:241
us
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:276
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
avcodec_copy_context
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:216
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:411
AVStream::duration
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:914
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
av_codec_get_tag2
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
AV_FIELD_TB
@ AV_FIELD_TB
Definition: codec_par.h:41
OutputFile::opts
AVDictionary * opts
Definition: ffmpeg.h:556
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputStream::sub2video::last_pts
int64_t last_pts
Definition: ffmpeg.h:346
loop
static int loop
Definition: ffplay.c:341
do_pkt_dump
int do_pkt_dump
Definition: ffmpeg_opt.c:159
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5049
InputFile
Definition: ffmpeg.h:393
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
init_output_stream_streamcopy
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3023
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1102
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AV_CODEC_ID_DVB_SUBTITLE
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:509
AVCodecContext::get_buffer2
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1341
AV_DISPOSITION_CLEAN_EFFECTS
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:825
ffmpeg_cleanup
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:491
InputStream::hwaccel_pix_fmt
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:374
OutputFile::shortest
int shortest
Definition: ffmpeg.h:562
avassert.h
InputStream::dts
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:312
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_fifo_space
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:434
AV_PKT_FLAG_CORRUPT
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:389
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:600
av_thread_message_queue_send
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
Definition: threadmessage.c:156
choose_output
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3827
AVStream::first_dts
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1067
BenchmarkTimeStamps::real_usec
int64_t real_usec
Definition: ffmpeg.c:124
media_type_string
#define media_type_string
Definition: cmdutils.h:617
duration
int64_t duration
Definition: movenc.c:63
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5551
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
av_opt_set_dict
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1655
HWACCEL_GENERIC
@ HWACCEL_GENERIC
Definition: ffmpeg.h:61
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
init_output_stream
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3419
input_streams
InputStream ** input_streams
Definition: ffmpeg.c:147
llrintf
#define llrintf(x)
Definition: libm.h:399
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
AVCodecDescriptor
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
InputStream::cfr_next_pts
int64_t cfr_next_pts
Definition: ffmpeg.h:325
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:592
get_benchmark_time_stamps
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4771
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:290
vstats_filename
char * vstats_filename
Definition: ffmpeg_opt.c:144
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2095
close_output_stream
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:838
InputStream::framerate
AVRational framerate
Definition: ffmpeg.h:332
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:200
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:606
AVFormatContext::chapters
AVChapter ** chapters
Definition: avformat.h:1565
AVDictionaryEntry::key
char * key
Definition: dict.h:82
ENCODER_FINISHED
@ ENCODER_FINISHED
Definition: ffmpeg.h:439
frame_size
int frame_size
Definition: mxfenc.c:2139
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:217
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:126
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:658
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
InputFilter
Definition: ffmpeg.h:234
get_input_stream
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2935
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
term_init
void term_init(void)
Definition: ffmpeg.c:395
do_streamcopy
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1979
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:260
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AVPacketSideData::data
uint8_t * data
Definition: packet.h:299
MUXER_FINISHED
@ MUXER_FINISHED
Definition: ffmpeg.h:440
ctx
AVFormatContext * ctx
Definition: movenc.c:48
InputStream::sub2video::sub_queue
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:348
abort_codec_experimental
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:666
InputStream::filters
InputFilter ** filters
Definition: ffmpeg.h:358
limits.h
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:239
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
max_error_rate
float max_error_rate
Definition: ffmpeg_opt.c:170
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:434
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2700
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:40
OutputFile::header_written
int header_written
Definition: ffmpeg.h:564
on
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going on
Definition: writing_filters.txt:34
term_exit
void term_exit(void)
Definition: ffmpeg.c:336
AVOutputFormat::codec_tag
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:516
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
compare_int64
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2942
InputStream::hwaccel_retrieve_data
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:373
AV_CODEC_ID_CODEC2
@ AV_CODEC_ID_CODEC2
Definition: codec_id.h:477
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:237
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1709
key
const char * key
Definition: hwcontext_opencl.c:168
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
av_fifo_realloc2
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AV_FIELD_BT
@ AV_FIELD_BT
Definition: codec_par.h:42
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
assert_avoptions
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:657
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
process_input_packet
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2547
av_rescale_delta
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:871
process_input
static int process_input(int file_index)
Definition: ffmpeg.c:4218
avformat_write_header
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:505
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
int32_t
int32_t
Definition: audio_convert.c:194
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1660
arg
const char * arg
Definition: jacosubdec.c:66
pthread_create
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:80
AVCodecDescriptor::props
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
avio_flush
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:233
AVCodecParserContext::repeat_pict
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:3371
output_streams
OutputStream ** output_streams
Definition: ffmpeg.c:152
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
transcode_from_filter
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4506
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
InputStream::pts
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:315
AVFormatContext
Format I/O context.
Definition: avformat.h:1335
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:33
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:117
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1012
run_as_daemon
static int run_as_daemon
Definition: ffmpeg.c:134
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
hours
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally hours
Definition: fate.txt:145
print_final_stats
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1517
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:262
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:894
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
InputStream::sub2video::w
int w
Definition: ffmpeg.h:350
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
HWAccel::id
enum HWAccelID id
Definition: ffmpeg.h:69
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
InputStream::top_field_first
int top_field_first
Definition: ffmpeg.h:333
InputStream::st
AVStream * st
Definition: ffmpeg.h:296
main
int main(int argc, char **argv)
Definition: ffmpeg.c:4819
update_benchmark
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:671
avio_print
#define avio_print(s,...)
Write strings (const char *) to the context.
Definition: avio.h:594
HWAccel
Definition: ffmpeg.h:66
AVCodec::type
enum AVMediaType type
Definition: codec.h:203
send_frame_to_filters
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2233
decode_video
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2321
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
getmaxrss
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4797
InputStream::next_pts
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:314
frame_bits_per_raw_sample
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:169
AV_RL64
#define AV_RL64
Definition: intreadwrite.h:173
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: packet.h:301
AV_DISPOSITION_COMMENT
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:813
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:393
check_keyboard_interaction
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3866
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1377
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:445
src
#define src
Definition: vp8dsp.c:254
AV_CODEC_PROP_BITMAP_SUB
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
parseutils.h
InputStream::hwaccel_id
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:364
InputFilter::channels
int channels
Definition: ffmpeg.h:250
mathops.h
duration_max
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4133
InputStream::dec
AVCodec * dec
Definition: ffmpeg.h:304
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1261
main_return_code
static int main_return_code
Definition: ffmpeg.c:346
vstats_file
static FILE * vstats_file
Definition: ffmpeg.c:112
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:929
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
InputFilter::eof
int eof
Definition: ffmpeg.h:255
InputStream::fix_sub_duration
int fix_sub_duration
Definition: ffmpeg.h:338
AV_DISPOSITION_METADATA
#define AV_DISPOSITION_METADATA
Definition: avformat.h:847
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:122
transcode_init
static int transcode_init(void)
Definition: ffmpeg.c:3624
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1399
get_format
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2774
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
avcodec_open2
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:548
sub2video_push_ref
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:222
time.h
close_all_output_streams
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:692
received_nb_signals
static volatile int received_nb_signals
Definition: ffmpeg.c:343
do_benchmark_all
int do_benchmark_all
Definition: ffmpeg_opt.c:157
AV_DISPOSITION_ORIGINAL
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:812
nb_input_streams
int nb_input_streams
Definition: ffmpeg.c:148
av_buffersink_get_channel_layout
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:614
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:663
InputStream::min_pts
int64_t min_pts
Definition: ffmpeg.h:320
HWAccel::name
const char * name
Definition: ffmpeg.h:67
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:291
swresample.h
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
InputStream::sub2video::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:351
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:170
AVFormatContext::oformat
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1354
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
input_files
InputFile ** input_files
Definition: ffmpeg.c:149
AVStream::nb_frames
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:916
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
InputStream::frames_decoded
uint64_t frames_decoded
Definition: ffmpeg.h:384
OutputFilter::formats
int * formats
Definition: ffmpeg.h:276
InputStream::next_dts
int64_t next_dts
Definition: ffmpeg.h:311
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:564
FilterGraph
Definition: ffmpeg.h:281
print_stats
int print_stats
Definition: ffmpeg_opt.c:166
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1391
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:235
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:510
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
VSYNC_VFR
#define VSYNC_VFR
Definition: ffmpeg.h:52
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:484
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:649
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:255
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1623
options
const OptionDef options[]
AV_DISPOSITION_CAPTIONS
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:845
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1545
desc
const char * desc
Definition: nvenc.c:79
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
InputStream::hwaccel_device_type
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:365
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
InputStream::decoded_frame
AVFrame * decoded_frame
Definition: ffmpeg.h:305
AVPacket::size
int size
Definition: packet.h:356
InputStream::wrap_correction_done
int wrap_correction_done
Definition: ffmpeg.h:316
InputStream::start
int64_t start
Definition: ffmpeg.h:308
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:231
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
InputStream::filter_frame
AVFrame * filter_frame
Definition: ffmpeg.h:306
threadmessage.h
InputStream::file_index
int file_index
Definition: ffmpeg.h:295
output_files
OutputFile ** output_files
Definition: ffmpeg.c:154
parse_forced_key_frames
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3178
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:423
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
received_sigterm
static volatile int received_sigterm
Definition: ffmpeg.c:342
start_time
static int64_t start_time
Definition: ffplay.c:332
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:285
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1431
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
send_filter_eof
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2531
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2083
InputStream::got_output
int got_output
Definition: ffmpeg.h:340
AVCodec::defaults
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: codec.h:257
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:87
size
int size
Definition: twinvq_data.h:11134
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:160
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
subtitle_out
static uint8_t * subtitle_out
Definition: ffmpeg.c:145
copy_tb
int copy_tb
Definition: ffmpeg_opt.c:162
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1319
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2543
InputStream::prev_sub
struct InputStream::@2 prev_sub
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
InputStream::hwaccel_retrieved_pix_fmt
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:375
hwaccel_decode_init
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:516
av_stream_get_codec_timebase
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5833
OutputStream::source_index
int source_index
Definition: ffmpeg.h:446
DECODING_FOR_OST
#define DECODING_FOR_OST
Definition: ffmpeg.h:300
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
sub2video_update
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:240
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:458
AV_DISPOSITION_DUB
#define AV_DISPOSITION_DUB
Definition: avformat.h:811
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
AVStream::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:927
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2697
avdevice.h
AVFMT_NOSTREAMS
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:467
AV_DISPOSITION_HEARING_IMPAIRED
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:823
OSTFinished
OSTFinished
Definition: ffmpeg.h:438
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:813
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:354
avio_write
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:213
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:370
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
InputStream::samples_decoded
uint64_t samples_decoded
Definition: ffmpeg.h:385
OutputFile::limit_filesize
uint64_t limit_filesize
Definition: ffmpeg.h:560
dup_warning
static unsigned dup_warning
Definition: ffmpeg.c:136
AVPacketSideData::size
int size
Definition: packet.h:300
av_sdp_create
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:842
InputStream::max_pts
int64_t max_pts
Definition: ffmpeg.h:321
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
av_packet_make_refcounted
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:671
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
do_benchmark
int do_benchmark
Definition: ffmpeg_opt.c:156
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
get_buffer
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2860
bitrate
int64_t bitrate
Definition: h264_levels.c:131
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:712
av_buffersink_get_type
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
ifilter_send_frame
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2106
av_log2
#define av_log2
Definition: intmath.h:83
r
#define r
Definition: input.c:40
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2642
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
av_thread_message_queue_alloc
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1187
decode
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2210
AVStream::side_data
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:967
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
guess_input_channel_layout
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2056
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1184
write_packet
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:701
do_subtitle_out
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:959
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
register_exit
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:128
do_video_stats
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1349
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1184
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
HWACCEL_AUTO
@ HWACCEL_AUTO
Definition: ffmpeg.h:60
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:447
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
InputStream::guess_layout_max
int guess_layout_max
Definition: ffmpeg.h:334
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1649
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1170
av_write_trailer
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1251
av_log_set_level
void av_log_set_level(int level)
Set the log level.
Definition: log.c:440
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
bprint.h
DECODING_FOR_FILTER
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:301
lrintf
#define lrintf(x)
Definition: libm_mips.h:70
av_bsf_receive_packet
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:223
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
InputStream::ret
int ret
Definition: ffmpeg.h:341
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:525
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
sub2video_flush
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:312
internal.h
AVCodecParameters::height
int height
Definition: codec_par.h:127
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2139
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVCodecParameters::block_align
int block_align
Audio only.
Definition: codec_par.h:177
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:155
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
display.h
av_thread_message_queue_set_err_send
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
Definition: threadmessage.c:188
vsnprintf
#define vsnprintf
Definition: snprintf.h:36
exit_on_error
int exit_on_error
Definition: ffmpeg_opt.c:164
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffmpeg.c:349
OutputFile::ost_index
int ost_index
Definition: ffmpeg.h:557
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
delta
float delta
Definition: vorbis_enc_data.h:457
InputStream::hwaccel_uninit
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:371
AV_DISPOSITION_KARAOKE
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:815
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:394
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
av_get_audio_frame_duration
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1756
transcode_subtitles
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2454
tb
#define tb
Definition: regdef.h:68
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1257
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
AV_DISPOSITION_DEPENDENT
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:848
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:489
AVCodecContext::height
int height
Definition: avcodec.h:699
OutputFilter::channel_layouts
uint64_t * channel_layouts
Definition: ffmpeg.h:277
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:392
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
nb_output_files
int nb_output_files
Definition: ffmpeg.c:155
AVCodecParameters::field_order
enum AVFieldOrder field_order
Video only.
Definition: codec_par.h:141
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:503
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
AVFMT_TS_NONSTRICT
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:472
av_opt_eval_flags
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
AVStream::disposition
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:918
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
AV_DISPOSITION_VISUAL_IMPAIRED
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:824
decode_error_stat
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:872
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:595
AVFMT_FLAG_BITEXACT
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1483
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:865
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
abort_on_flags
int abort_on_flags
Definition: ffmpeg_opt.c:165
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1058
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
output_packet
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:860
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:238
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
normalize.ifile
ifile
Definition: normalize.py:6
sdp_filename
char * sdp_filename
Definition: ffmpeg_opt.c:145
AVStream::nb_side_data
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:971
AV_CODEC_PROP_TEXT_SUB
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
AVCodecContext::opaque
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:568
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:361
init_output_stream_encode
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3265
hw_device_free_all
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:274
avformat.h
InputFile::eagain
int eagain
Definition: ffmpeg.h:396
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:388
InputFile::ist_index
int ist_index
Definition: ffmpeg.h:397
AV_DISPOSITION_DESCRIPTIONS
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:846
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:366
av_bsf_send_packet
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:197
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
InputFilter::sample_rate
int sample_rate
Definition: ffmpeg.h:249
file
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration file
Definition: fate.txt:125
HWAccel::init
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:68
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5061
ifilter_parameters_from_frame
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1183
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
AVCodecContext
main external API structure.
Definition: avcodec.h:526
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2064
AVFrame::height
int height
Definition: frame.h:358
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:866
transcode_init_done
static atomic_int transcode_init_done
Definition: ffmpeg.c:344
BenchmarkTimeStamps
Definition: ffmpeg.c:123
avformat_transfer_internal_stream_timing_info
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5771
hw_device_setup_for_encode
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:419
channel_layout.h
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
InputFilter::format
int format
Definition: ffmpeg.h:244
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
AV_RL32
#define AV_RL32
Definition: intreadwrite.h:146
OutputStream::finished
OSTFinished finished
Definition: ffmpeg.h:512
report_new_stream
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3609
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
config.h
av_stream_get_end_pts
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:136
check_decode_result
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2076
avfilter.h
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4448
av_buffersink_get_channels
int av_buffersink_get_channels(const AVFilterContext *ctx)
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:989
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
InputStream::nb_samples
int64_t nb_samples
Definition: ffmpeg.h:327
InputFilter::height
int height
Definition: ffmpeg.h:246
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:357
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1803
InputFile::ts_offset
int64_t ts_offset
Definition: ffmpeg.h:404
InputStream::discard
int discard
Definition: ffmpeg.h:297
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
print_report
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1634
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
OutputFilter
Definition: ffmpeg.h:258
nb_frames_dup
static int nb_frames_dup
Definition: ffmpeg.c:135
InputStream::sub2video::frame
AVFrame * frame
Definition: ffmpeg.h:349
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:534
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:287
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
audio_volume
int audio_volume
Definition: ffmpeg_opt.c:151
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
OutputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:555
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:225
AVCodecParameters::video_delay
int video_delay
Video only.
Definition: codec_par.h:155
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
InputStream::sub2video::h
int h
Definition: ffmpeg.h:350
llrint
#define llrint(x)
Definition: libm.h:394
set_encoder_id
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3137
AVCodecParameters::format
int format
Definition: codec_par.h:84
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
InputFilter::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:253
InputStream::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:376
av_fifo_freep
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
FKF_N_FORCED
@ FKF_N_FORCED
Definition: ffmpeg.h:426
AVDictionaryEntry
Definition: dict.h:81
InputStream::sub2video::end_pts
int64_t end_pts
Definition: ffmpeg.h:347
av_add_q
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
stdin_interaction
int stdin_interaction
Definition: ffmpeg_opt.c:168
do_hex_dump
int do_hex_dump
Definition: ffmpeg_opt.c:158
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
AVCodecParameters::codec_id
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
InputStream::ts_scale
double ts_scale
Definition: ffmpeg.h:329
AVPacket
This structure stores compressed data.
Definition: packet.h:332
av_thread_message_queue_free
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
av_interleaved_write_frame
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1236
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
init_input_stream
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2870
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:375
InputFile::input_ts_offset
int64_t input_ts_offset
Definition: ffmpeg.h:402
AVCodecParameters::channel_layout
uint64_t channel_layout
Audio only.
Definition: codec_par.h:162
av_bsf_free
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
InputStream::dts_buffer
int64_t * dts_buffer
Definition: ffmpeg.h:387
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:158
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:222
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:162
imgutils.h
AV_PKT_DATA_QUALITY_STATS
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: packet.h:132
timestamp.h
OutputStream
Definition: muxing.c:53
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:564
hwcontext.h
av_thread_message_queue_set_err_recv
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
Definition: threadmessage.c:199
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1289
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
codec_flags
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:149
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
av_stream_get_parser
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:144
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
AVCodecHWConfig
Definition: codec.h:425
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
av_pkt_dump_log2
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:114
avcodec_descriptor_get
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3394
InputStream::nb_dts_buffer
int nb_dts_buffer
Definition: ffmpeg.h:388
InputStream::saw_first_ts
int saw_first_ts
Definition: ffmpeg.h:330
AVDictionaryEntry::value
char * value
Definition: dict.h:83
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:904
set_tty_echo
static void set_tty_echo(int on)
Definition: ffmpeg.c:3854
avstring.h
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
InputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:409
FKF_N
@ FKF_N
Definition: ffmpeg.h:425
AVStream::pts_wrap_bits
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1057
log_callback_null
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4815
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
snprintf
#define snprintf
Definition: snprintf.h:34
ABORT_ON_FLAG_EMPTY_OUTPUT
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:433
read_key
static int read_key(void)
Definition: ffmpeg.c:433
reap_filters
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1413
VSYNC_DROP
#define VSYNC_DROP
Definition: ffmpeg.h:54
buffersrc.h
AVCodecHWConfig::device_type
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:446
InputStream::subtitle
AVSubtitle subtitle
Definition: ffmpeg.h:342
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:905
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
init_output_bsfs
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:2994
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2696
filtergraph_is_simple
int filtergraph_is_simple(FilterGraph *fg)
Definition: ffmpeg_filter.c:1215
init_encoder_time_base
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3241
dec_ctx
static AVCodecContext * dec_ctx
Definition: filtering_audio.c:43
nb_output_streams
int nb_output_streams
Definition: ffmpeg.c:153
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
OutputFile
Definition: ffmpeg.h:554
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
AV_DISPOSITION_LYRICS
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:814
InputStream::autorotate
int autorotate
Definition: ffmpeg.h:336
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65
FF_API_DEBUG_MV
#define FF_API_DEBUG_MV
Definition: version.h:58