FFmpeg  4.3
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "dct.h"
43 #include "idctdsp.h"
44 #include "mpeg12.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
47 #include "h261.h"
48 #include "h263.h"
49 #include "h263data.h"
50 #include "mjpegenc_common.h"
51 #include "mathops.h"
52 #include "mpegutils.h"
53 #include "mjpegenc.h"
54 #include "msmpeg4.h"
55 #include "pixblockdsp.h"
56 #include "qpeldsp.h"
57 #include "faandct.h"
58 #include "thread.h"
59 #include "aandcttab.h"
60 #include "flv.h"
61 #include "mpeg4video.h"
62 #include "internal.h"
63 #include "bytestream.h"
64 #include "wmv2.h"
65 #include "rv10.h"
66 #include "packet_internal.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
103  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
168  av_log(s->avctx, AV_LOG_INFO,
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
262  s->me.mv_penalty = default_mv_penalty;
263  s->fcode_tab = default_fcode_tab;
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
275  ff_h263dsp_init(&s->h263dsp);
276  if (!s->dct_quantize)
277  s->dct_quantize = ff_dct_quantize_c;
278  if (!s->denoise_dct)
279  s->denoise_dct = denoise_dct_c;
280  s->fast_dct_quantize = s->dct_quantize;
281  if (s->avctx->trellis)
282  s->dct_quantize = dct_quantize_trellis_c;
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
301  "only YUV420 and YUV422 are supported\n");
302  return AVERROR(EINVAL);
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
316  format_supported = 1;
317  /* MPEG color space */
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return AVERROR(EINVAL);
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return AVERROR(EINVAL);
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
339  s->chroma_format = CHROMA_444;
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
343  s->chroma_format = CHROMA_422;
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
348  s->chroma_format = CHROMA_420;
349  break;
350  }
351 
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
357  s->rtp_payload_size = avctx->rtp_payload_size;
359  s->me_penalty_compensation = avctx->me_penalty_compensation;
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
384  s->strict_std_compliance = avctx->strict_std_compliance;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
387  s->intra_dc_precision = avctx->intra_dc_precision;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
409  s->user_specified_pts = AV_NOPTS_VALUE;
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418  /* Fixed QSCALE */
419  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
420 
421  s->adaptive_quant = (s->avctx->lumi_masking ||
422  s->avctx->dark_masking ||
423  s->avctx->temporal_cplx_masking ||
424  s->avctx->spatial_cplx_masking ||
425  s->avctx->p_masking ||
426  s->border_masking ||
427  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
428  !s->fixed_qscale;
429 
430  s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
431 
433  switch(avctx->codec_id) {
436  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
437  break;
438  case AV_CODEC_ID_MPEG4:
442  if (avctx->rc_max_rate >= 15000000) {
443  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444  } else if(avctx->rc_max_rate >= 2000000) {
445  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446  } else if(avctx->rc_max_rate >= 384000) {
447  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
448  } else
449  avctx->rc_buffer_size = 40;
450  avctx->rc_buffer_size *= 16384;
451  break;
452  }
453  if (avctx->rc_buffer_size) {
454  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455  }
456  }
457 
458  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460  return AVERROR(EINVAL);
461  }
462 
465  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
466  }
467 
469  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470  return AVERROR(EINVAL);
471  }
472 
474  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475  return AVERROR(EINVAL);
476  }
477 
478  if (avctx->rc_max_rate &&
482  "impossible bitrate constraints, this will fail\n");
483  }
484 
485  if (avctx->rc_buffer_size &&
486  avctx->bit_rate * (int64_t)avctx->time_base.num >
487  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489  return AVERROR(EINVAL);
490  }
491 
492  if (!s->fixed_qscale &&
496  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
498  }
499 
500  if (s->avctx->rc_max_rate &&
501  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
502  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
503  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
504  90000LL * (avctx->rc_buffer_size - 1) >
505  s->avctx->rc_max_rate * 0xFFFFLL) {
507  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
508  "specified vbv buffer is too large for the given bitrate!\n");
509  }
510 
511  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
512  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
513  s->codec_id != AV_CODEC_ID_FLV1) {
514  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
515  return AVERROR(EINVAL);
516  }
517 
518  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
520  "OBMC is only supported with simple mb decision\n");
521  return AVERROR(EINVAL);
522  }
523 
524  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
525  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
526  return AVERROR(EINVAL);
527  }
528 
529  if (s->max_b_frames &&
530  s->codec_id != AV_CODEC_ID_MPEG4 &&
531  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
532  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
533  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
534  return AVERROR(EINVAL);
535  }
536  if (s->max_b_frames < 0) {
538  "max b frames must be 0 or positive for mpegvideo based encoders\n");
539  return AVERROR(EINVAL);
540  }
541 
542  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
543  s->codec_id == AV_CODEC_ID_H263 ||
544  s->codec_id == AV_CODEC_ID_H263P) &&
545  (avctx->sample_aspect_ratio.num > 255 ||
546  avctx->sample_aspect_ratio.den > 255)) {
548  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
552  }
553 
554  if ((s->codec_id == AV_CODEC_ID_H263 ||
555  s->codec_id == AV_CODEC_ID_H263P) &&
556  (avctx->width > 2048 ||
557  avctx->height > 1152 )) {
558  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
559  return AVERROR(EINVAL);
560  }
561  if ((s->codec_id == AV_CODEC_ID_H263 ||
562  s->codec_id == AV_CODEC_ID_H263P) &&
563  ((avctx->width &3) ||
564  (avctx->height&3) )) {
565  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
566  return AVERROR(EINVAL);
567  }
568 
569  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
570  (avctx->width > 4095 ||
571  avctx->height > 4095 )) {
572  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
573  return AVERROR(EINVAL);
574  }
575 
576  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
577  (avctx->width > 16383 ||
578  avctx->height > 16383 )) {
579  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
580  return AVERROR(EINVAL);
581  }
582 
583  if (s->codec_id == AV_CODEC_ID_RV10 &&
584  (avctx->width &15 ||
585  avctx->height&15 )) {
586  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
587  return AVERROR(EINVAL);
588  }
589 
590  if (s->codec_id == AV_CODEC_ID_RV20 &&
591  (avctx->width &3 ||
592  avctx->height&3 )) {
593  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
594  return AVERROR(EINVAL);
595  }
596 
597  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
598  s->codec_id == AV_CODEC_ID_WMV2) &&
599  avctx->width & 1) {
600  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
601  return AVERROR(EINVAL);
602  }
603 
604  if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
605  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
606  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
607  return AVERROR(EINVAL);
608  }
609 
610 #if FF_API_PRIVATE_OPT
612  if (avctx->mpeg_quant)
613  s->mpeg_quant = avctx->mpeg_quant;
615 #endif
616 
617  // FIXME mpeg2 uses that too
618  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
619  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
621  "mpeg2 style quantization not supported by codec\n");
622  return AVERROR(EINVAL);
623  }
624 
625  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
626  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
627  return AVERROR(EINVAL);
628  }
629 
630  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
631  s->avctx->mb_decision != FF_MB_DECISION_RD) {
632  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
633  return AVERROR(EINVAL);
634  }
635 
636  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
637  (s->codec_id == AV_CODEC_ID_AMV ||
638  s->codec_id == AV_CODEC_ID_MJPEG)) {
639  // Used to produce garbage with MJPEG.
641  "QP RD is no longer compatible with MJPEG or AMV\n");
642  return AVERROR(EINVAL);
643  }
644 
645 #if FF_API_PRIVATE_OPT
648  s->scenechange_threshold = avctx->scenechange_threshold;
650 #endif
651 
652  if (s->scenechange_threshold < 1000000000 &&
653  (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
655  "closed gop with scene change detection are not supported yet, "
656  "set threshold to 1000000000\n");
657  return AVERROR_PATCHWELCOME;
658  }
659 
660  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
661  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
662  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
664  "low delay forcing is only available for mpeg2, "
665  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
666  return AVERROR(EINVAL);
667  }
668  if (s->max_b_frames != 0) {
670  "B-frames cannot be used with low delay\n");
671  return AVERROR(EINVAL);
672  }
673  }
674 
675  if (s->q_scale_type == 1) {
676  if (avctx->qmax > 28) {
678  "non linear quant only supports qmax <= 28 currently\n");
679  return AVERROR_PATCHWELCOME;
680  }
681  }
682 
683  if (avctx->slices > 1 &&
685  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
686  return AVERROR(EINVAL);
687  }
688 
689  if (s->avctx->thread_count > 1 &&
690  s->codec_id != AV_CODEC_ID_MPEG4 &&
691  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
692  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
693  s->codec_id != AV_CODEC_ID_MJPEG &&
694  (s->codec_id != AV_CODEC_ID_H263P)) {
696  "multi threaded encoding not supported by codec\n");
697  return AVERROR_PATCHWELCOME;
698  }
699 
700  if (s->avctx->thread_count < 1) {
702  "automatic thread number detection not supported by codec, "
703  "patch welcome\n");
704  return AVERROR_PATCHWELCOME;
705  }
706 
707  if (!avctx->time_base.den || !avctx->time_base.num) {
708  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
709  return AVERROR(EINVAL);
710  }
711 
712 #if FF_API_PRIVATE_OPT
714  if (avctx->b_frame_strategy)
715  s->b_frame_strategy = avctx->b_frame_strategy;
716  if (avctx->b_sensitivity != 40)
717  s->b_sensitivity = avctx->b_sensitivity;
719 #endif
720 
721  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
723  "notice: b_frame_strategy only affects the first pass\n");
724  s->b_frame_strategy = 0;
725  }
726 
728  if (i > 1) {
729  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
730  avctx->time_base.den /= i;
731  avctx->time_base.num /= i;
732  //return -1;
733  }
734 
735  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
736  // (a + x * 3 / 8) / x
737  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
738  s->inter_quant_bias = 0;
739  } else {
740  s->intra_quant_bias = 0;
741  // (a - x / 4) / x
742  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
743  }
744 
745  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
746  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
747  return AVERROR(EINVAL);
748  }
749 
750  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
751 
752  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
753  s->avctx->time_base.den > (1 << 16) - 1) {
755  "timebase %d/%d not supported by MPEG 4 standard, "
756  "the maximum admitted value for the timebase denominator "
757  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
758  (1 << 16) - 1);
759  return AVERROR(EINVAL);
760  }
761  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
762 
763  switch (avctx->codec->id) {
765  s->out_format = FMT_MPEG1;
766  s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
767  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
768  break;
770  s->out_format = FMT_MPEG1;
771  s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
772  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
773  s->rtp_mode = 1;
774  break;
775  case AV_CODEC_ID_MJPEG:
776  case AV_CODEC_ID_AMV:
777  s->out_format = FMT_MJPEG;
778  s->intra_only = 1; /* force intra only for jpeg */
781  if ((ret = ff_mjpeg_encode_init(s)) < 0)
782  return ret;
783  avctx->delay = 0;
784  s->low_delay = 1;
785  break;
786  case AV_CODEC_ID_H261:
787  if (!CONFIG_H261_ENCODER)
789  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
791  "The specified picture size of %dx%d is not valid for the "
792  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
793  s->width, s->height);
794  return AVERROR(EINVAL);
795  }
796  s->out_format = FMT_H261;
797  avctx->delay = 0;
798  s->low_delay = 1;
799  s->rtp_mode = 0; /* Sliced encoding not supported */
800  break;
801  case AV_CODEC_ID_H263:
802  if (!CONFIG_H263_ENCODER)
805  s->width, s->height) == 8) {
807  "The specified picture size of %dx%d is not valid for "
808  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
809  "352x288, 704x576, and 1408x1152. "
810  "Try H.263+.\n", s->width, s->height);
811  return AVERROR(EINVAL);
812  }
813  s->out_format = FMT_H263;
814  avctx->delay = 0;
815  s->low_delay = 1;
816  break;
817  case AV_CODEC_ID_H263P:
818  s->out_format = FMT_H263;
819  s->h263_plus = 1;
820  /* Fx */
821  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
822  s->modified_quant = s->h263_aic;
823  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
824  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
825 
826  /* /Fx */
827  /* These are just to be sure */
828  avctx->delay = 0;
829  s->low_delay = 1;
830  break;
831  case AV_CODEC_ID_FLV1:
832  s->out_format = FMT_H263;
833  s->h263_flv = 2; /* format = 1; 11-bit codes */
834  s->unrestricted_mv = 1;
835  s->rtp_mode = 0; /* don't allow GOB */
836  avctx->delay = 0;
837  s->low_delay = 1;
838  break;
839  case AV_CODEC_ID_RV10:
840  s->out_format = FMT_H263;
841  avctx->delay = 0;
842  s->low_delay = 1;
843  break;
844  case AV_CODEC_ID_RV20:
845  s->out_format = FMT_H263;
846  avctx->delay = 0;
847  s->low_delay = 1;
848  s->modified_quant = 1;
849  s->h263_aic = 1;
850  s->h263_plus = 1;
851  s->loop_filter = 1;
852  s->unrestricted_mv = 0;
853  break;
854  case AV_CODEC_ID_MPEG4:
855  s->out_format = FMT_H263;
856  s->h263_pred = 1;
857  s->unrestricted_mv = 1;
858  s->low_delay = s->max_b_frames ? 0 : 1;
859  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
860  break;
862  s->out_format = FMT_H263;
863  s->h263_pred = 1;
864  s->unrestricted_mv = 1;
865  s->msmpeg4_version = 2;
866  avctx->delay = 0;
867  s->low_delay = 1;
868  break;
870  s->out_format = FMT_H263;
871  s->h263_pred = 1;
872  s->unrestricted_mv = 1;
873  s->msmpeg4_version = 3;
874  s->flipflop_rounding = 1;
875  avctx->delay = 0;
876  s->low_delay = 1;
877  break;
878  case AV_CODEC_ID_WMV1:
879  s->out_format = FMT_H263;
880  s->h263_pred = 1;
881  s->unrestricted_mv = 1;
882  s->msmpeg4_version = 4;
883  s->flipflop_rounding = 1;
884  avctx->delay = 0;
885  s->low_delay = 1;
886  break;
887  case AV_CODEC_ID_WMV2:
888  s->out_format = FMT_H263;
889  s->h263_pred = 1;
890  s->unrestricted_mv = 1;
891  s->msmpeg4_version = 5;
892  s->flipflop_rounding = 1;
893  avctx->delay = 0;
894  s->low_delay = 1;
895  break;
896  default:
897  return AVERROR(EINVAL);
898  }
899 
900 #if FF_API_PRIVATE_OPT
902  if (avctx->noise_reduction)
903  s->noise_reduction = avctx->noise_reduction;
905 #endif
906 
907  avctx->has_b_frames = !s->low_delay;
908 
909  s->encoding = 1;
910 
911  s->progressive_frame =
912  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
914  s->alternate_scan);
915 
916  /* init */
918  if ((ret = ff_mpv_common_init(s)) < 0)
919  return ret;
920 
921  ff_fdctdsp_init(&s->fdsp, avctx);
922  ff_me_cmp_init(&s->mecc, avctx);
923  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
924  ff_pixblockdsp_init(&s->pdsp, avctx);
925  ff_qpeldsp_init(&s->qdsp);
926 
927  if (s->msmpeg4_version) {
928  FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
929  2 * 2 * (MAX_LEVEL + 1) *
930  (MAX_RUN + 1) * 2 * sizeof(int), fail);
931  }
932  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
933 
934  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
935  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
936  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
937  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
938  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
939  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
940  FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
941  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
942  FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
943  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
944 
945 
946  if (s->noise_reduction) {
947  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
948  2 * 64 * sizeof(uint16_t), fail);
949  }
950 
952 
953  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
954  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
955 
956  if (s->slice_context_count > 1) {
957  s->rtp_mode = 1;
958 
960  s->h263_slice_structured = 1;
961  }
962 
963  s->quant_precision = 5;
964 
965 #if FF_API_PRIVATE_OPT
968  s->frame_skip_threshold = avctx->frame_skip_threshold;
970  s->frame_skip_factor = avctx->frame_skip_factor;
971  if (avctx->frame_skip_exp)
972  s->frame_skip_exp = avctx->frame_skip_exp;
974  s->frame_skip_cmp = avctx->frame_skip_cmp;
976 #endif
977 
978  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
979  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
980 
981  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
983  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
985  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
986  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
987  return ret;
989  && s->out_format == FMT_MPEG1)
991 
992  /* init q matrix */
993  for (i = 0; i < 64; i++) {
994  int j = s->idsp.idct_permutation[i];
995  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
996  s->mpeg_quant) {
997  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
998  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
999  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1000  s->intra_matrix[j] =
1001  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1002  } else {
1003  /* MPEG-1/2 */
1004  s->chroma_intra_matrix[j] =
1005  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1006  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1007  }
1008  if (s->avctx->intra_matrix)
1009  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1010  if (s->avctx->inter_matrix)
1011  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1012  }
1013 
1014  /* precompute matrix */
1015  /* for mjpeg, we do include qscale in the matrix */
1016  if (s->out_format != FMT_MJPEG) {
1017  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1018  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1019  31, 1);
1020  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1021  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1022  31, 0);
1023  }
1024 
1025  if ((ret = ff_rate_control_init(s)) < 0)
1026  return ret;
1027 
1028 #if FF_API_PRIVATE_OPT
1030  if (avctx->brd_scale)
1031  s->brd_scale = avctx->brd_scale;
1032 
1033  if (avctx->prediction_method)
1034  s->pred = avctx->prediction_method + 1;
1036 #endif
1037 
1038  if (s->b_frame_strategy == 2) {
1039  for (i = 0; i < s->max_b_frames + 2; i++) {
1040  s->tmp_frames[i] = av_frame_alloc();
1041  if (!s->tmp_frames[i])
1042  return AVERROR(ENOMEM);
1043 
1044  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1045  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1046  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1047 
1048  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1049  if (ret < 0)
1050  return ret;
1051  }
1052  }
1053 
1054  cpb_props = ff_add_cpb_side_data(avctx);
1055  if (!cpb_props)
1056  return AVERROR(ENOMEM);
1057  cpb_props->max_bitrate = avctx->rc_max_rate;
1058  cpb_props->min_bitrate = avctx->rc_min_rate;
1059  cpb_props->avg_bitrate = avctx->bit_rate;
1060  cpb_props->buffer_size = avctx->rc_buffer_size;
1061 
1062  return 0;
1063 fail:
1064  return AVERROR_UNKNOWN;
1065 }
1066 
1068 {
1070  int i;
1071 
1073 
1075  if (CONFIG_MJPEG_ENCODER &&
1076  s->out_format == FMT_MJPEG)
1078 
1080 
1081  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1082  av_frame_free(&s->tmp_frames[i]);
1083 
1084  ff_free_picture_tables(&s->new_picture);
1085  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1086 
1087  av_freep(&s->avctx->stats_out);
1088  av_freep(&s->ac_stats);
1089 
1090  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1091  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1092  s->q_chroma_intra_matrix= NULL;
1093  s->q_chroma_intra_matrix16= NULL;
1094  av_freep(&s->q_intra_matrix);
1095  av_freep(&s->q_inter_matrix);
1096  av_freep(&s->q_intra_matrix16);
1097  av_freep(&s->q_inter_matrix16);
1098  av_freep(&s->input_picture);
1099  av_freep(&s->reordered_input_picture);
1100  av_freep(&s->dct_offset);
1101 
1102  return 0;
1103 }
1104 
1105 static int get_sae(uint8_t *src, int ref, int stride)
1106 {
1107  int x,y;
1108  int acc = 0;
1109 
1110  for (y = 0; y < 16; y++) {
1111  for (x = 0; x < 16; x++) {
1112  acc += FFABS(src[x + y * stride] - ref);
1113  }
1114  }
1115 
1116  return acc;
1117 }
1118 
1120  uint8_t *ref, int stride)
1121 {
1122  int x, y, w, h;
1123  int acc = 0;
1124 
1125  w = s->width & ~15;
1126  h = s->height & ~15;
1127 
1128  for (y = 0; y < h; y += 16) {
1129  for (x = 0; x < w; x += 16) {
1130  int offset = x + y * stride;
1131  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1132  stride, 16);
1133  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1134  int sae = get_sae(src + offset, mean, stride);
1135 
1136  acc += sae + 500 < sad;
1137  }
1138  }
1139  return acc;
1140 }
1141 
1142 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1143 {
1144  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1145  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1146  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1147  &s->linesize, &s->uvlinesize);
1148 }
1149 
1150 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1151 {
1152  Picture *pic = NULL;
1153  int64_t pts;
1154  int i, display_picture_number = 0, ret;
1155  int encoding_delay = s->max_b_frames ? s->max_b_frames
1156  : (s->low_delay ? 0 : 1);
1157  int flush_offset = 1;
1158  int direct = 1;
1159 
1160  if (pic_arg) {
1161  pts = pic_arg->pts;
1162  display_picture_number = s->input_picture_number++;
1163 
1164  if (pts != AV_NOPTS_VALUE) {
1165  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1166  int64_t last = s->user_specified_pts;
1167 
1168  if (pts <= last) {
1169  av_log(s->avctx, AV_LOG_ERROR,
1170  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1171  pts, last);
1172  return AVERROR(EINVAL);
1173  }
1174 
1175  if (!s->low_delay && display_picture_number == 1)
1176  s->dts_delta = pts - last;
1177  }
1178  s->user_specified_pts = pts;
1179  } else {
1180  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1181  s->user_specified_pts =
1182  pts = s->user_specified_pts + 1;
1183  av_log(s->avctx, AV_LOG_INFO,
1184  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1185  pts);
1186  } else {
1187  pts = display_picture_number;
1188  }
1189  }
1190 
1191  if (!pic_arg->buf[0] ||
1192  pic_arg->linesize[0] != s->linesize ||
1193  pic_arg->linesize[1] != s->uvlinesize ||
1194  pic_arg->linesize[2] != s->uvlinesize)
1195  direct = 0;
1196  if ((s->width & 15) || (s->height & 15))
1197  direct = 0;
1198  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1199  direct = 0;
1200  if (s->linesize & (STRIDE_ALIGN-1))
1201  direct = 0;
1202 
1203  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1204  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1205 
1206  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1207  if (i < 0)
1208  return i;
1209 
1210  pic = &s->picture[i];
1211  pic->reference = 3;
1212 
1213  if (direct) {
1214  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1215  return ret;
1216  }
1217  ret = alloc_picture(s, pic, direct);
1218  if (ret < 0)
1219  return ret;
1220 
1221  if (!direct) {
1222  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1223  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1224  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1225  // empty
1226  } else {
1227  int h_chroma_shift, v_chroma_shift;
1228  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1229  &h_chroma_shift,
1230  &v_chroma_shift);
1231 
1232  for (i = 0; i < 3; i++) {
1233  int src_stride = pic_arg->linesize[i];
1234  int dst_stride = i ? s->uvlinesize : s->linesize;
1235  int h_shift = i ? h_chroma_shift : 0;
1236  int v_shift = i ? v_chroma_shift : 0;
1237  int w = s->width >> h_shift;
1238  int h = s->height >> v_shift;
1239  uint8_t *src = pic_arg->data[i];
1240  uint8_t *dst = pic->f->data[i];
1241  int vpad = 16;
1242 
1243  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1244  && !s->progressive_sequence
1245  && FFALIGN(s->height, 32) - s->height > 16)
1246  vpad = 32;
1247 
1248  if (!s->avctx->rc_buffer_size)
1249  dst += INPLACE_OFFSET;
1250 
1251  if (src_stride == dst_stride)
1252  memcpy(dst, src, src_stride * h);
1253  else {
1254  int h2 = h;
1255  uint8_t *dst2 = dst;
1256  while (h2--) {
1257  memcpy(dst2, src, w);
1258  dst2 += dst_stride;
1259  src += src_stride;
1260  }
1261  }
1262  if ((s->width & 15) || (s->height & (vpad-1))) {
1263  s->mpvencdsp.draw_edges(dst, dst_stride,
1264  w, h,
1265  16 >> h_shift,
1266  vpad >> v_shift,
1267  EDGE_BOTTOM);
1268  }
1269  }
1270  emms_c();
1271  }
1272  }
1273  ret = av_frame_copy_props(pic->f, pic_arg);
1274  if (ret < 0)
1275  return ret;
1276 
1277  pic->f->display_picture_number = display_picture_number;
1278  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1279  } else {
1280  /* Flushing: When we have not received enough input frames,
1281  * ensure s->input_picture[0] contains the first picture */
1282  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1283  if (s->input_picture[flush_offset])
1284  break;
1285 
1286  if (flush_offset <= 1)
1287  flush_offset = 1;
1288  else
1289  encoding_delay = encoding_delay - flush_offset + 1;
1290  }
1291 
1292  /* shift buffer entries */
1293  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1294  s->input_picture[i - flush_offset] = s->input_picture[i];
1295 
1296  s->input_picture[encoding_delay] = (Picture*) pic;
1297 
1298  return 0;
1299 }
1300 
1302 {
1303  int x, y, plane;
1304  int score = 0;
1305  int64_t score64 = 0;
1306 
1307  for (plane = 0; plane < 3; plane++) {
1308  const int stride = p->f->linesize[plane];
1309  const int bw = plane ? 1 : 2;
1310  for (y = 0; y < s->mb_height * bw; y++) {
1311  for (x = 0; x < s->mb_width * bw; x++) {
1312  int off = p->shared ? 0 : 16;
1313  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1314  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1315  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1316 
1317  switch (FFABS(s->frame_skip_exp)) {
1318  case 0: score = FFMAX(score, v); break;
1319  case 1: score += FFABS(v); break;
1320  case 2: score64 += v * (int64_t)v; break;
1321  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1322  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1323  }
1324  }
1325  }
1326  }
1327  emms_c();
1328 
1329  if (score)
1330  score64 = score;
1331  if (s->frame_skip_exp < 0)
1332  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1333  -1.0/s->frame_skip_exp);
1334 
1335  if (score64 < s->frame_skip_threshold)
1336  return 1;
1337  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1338  return 1;
1339  return 0;
1340 }
1341 
1343 {
1344  AVPacket pkt = { 0 };
1345  int ret;
1346  int size = 0;
1347 
1348  av_init_packet(&pkt);
1349 
1351  if (ret < 0)
1352  return ret;
1353 
1354  do {
1356  if (ret >= 0) {
1357  size += pkt.size;
1358  av_packet_unref(&pkt);
1359  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1360  return ret;
1361  } while (ret >= 0);
1362 
1363  return size;
1364 }
1365 
1367 {
1368  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1369  const int scale = s->brd_scale;
1370  int width = s->width >> scale;
1371  int height = s->height >> scale;
1372  int i, j, out_size, p_lambda, b_lambda, lambda2;
1373  int64_t best_rd = INT64_MAX;
1374  int best_b_count = -1;
1375  int ret = 0;
1376 
1377  av_assert0(scale >= 0 && scale <= 3);
1378 
1379  //emms_c();
1380  //s->next_picture_ptr->quality;
1381  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1382  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1383  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1384  if (!b_lambda) // FIXME we should do this somewhere else
1385  b_lambda = p_lambda;
1386  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1388 
1389  for (i = 0; i < s->max_b_frames + 2; i++) {
1390  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1391  s->next_picture_ptr;
1392  uint8_t *data[4];
1393 
1394  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1395  pre_input = *pre_input_ptr;
1396  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1397 
1398  if (!pre_input.shared && i) {
1399  data[0] += INPLACE_OFFSET;
1400  data[1] += INPLACE_OFFSET;
1401  data[2] += INPLACE_OFFSET;
1402  }
1403 
1404  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1405  s->tmp_frames[i]->linesize[0],
1406  data[0],
1407  pre_input.f->linesize[0],
1408  width, height);
1409  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1410  s->tmp_frames[i]->linesize[1],
1411  data[1],
1412  pre_input.f->linesize[1],
1413  width >> 1, height >> 1);
1414  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1415  s->tmp_frames[i]->linesize[2],
1416  data[2],
1417  pre_input.f->linesize[2],
1418  width >> 1, height >> 1);
1419  }
1420  }
1421 
1422  for (j = 0; j < s->max_b_frames + 1; j++) {
1423  AVCodecContext *c;
1424  int64_t rd = 0;
1425 
1426  if (!s->input_picture[j])
1427  break;
1428 
1430  if (!c)
1431  return AVERROR(ENOMEM);
1432 
1433  c->width = width;
1434  c->height = height;
1436  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1437  c->mb_decision = s->avctx->mb_decision;
1438  c->me_cmp = s->avctx->me_cmp;
1439  c->mb_cmp = s->avctx->mb_cmp;
1440  c->me_sub_cmp = s->avctx->me_sub_cmp;
1441  c->pix_fmt = AV_PIX_FMT_YUV420P;
1442  c->time_base = s->avctx->time_base;
1443  c->max_b_frames = s->max_b_frames;
1444 
1445  ret = avcodec_open2(c, codec, NULL);
1446  if (ret < 0)
1447  goto fail;
1448 
1449  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1450  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1451 
1452  out_size = encode_frame(c, s->tmp_frames[0]);
1453  if (out_size < 0) {
1454  ret = out_size;
1455  goto fail;
1456  }
1457 
1458  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1459 
1460  for (i = 0; i < s->max_b_frames + 1; i++) {
1461  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1462 
1463  s->tmp_frames[i + 1]->pict_type = is_p ?
1465  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1466 
1467  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1468  if (out_size < 0) {
1469  ret = out_size;
1470  goto fail;
1471  }
1472 
1473  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1474  }
1475 
1476  /* get the delayed frames */
1478  if (out_size < 0) {
1479  ret = out_size;
1480  goto fail;
1481  }
1482  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1483 
1484  rd += c->error[0] + c->error[1] + c->error[2];
1485 
1486  if (rd < best_rd) {
1487  best_rd = rd;
1488  best_b_count = j;
1489  }
1490 
1491 fail:
1493  if (ret < 0)
1494  return ret;
1495  }
1496 
1497  return best_b_count;
1498 }
1499 
1501 {
1502  int i, ret;
1503 
1504  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1505  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1506  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1507 
1508  /* set next picture type & ordering */
1509  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1510  if (s->frame_skip_threshold || s->frame_skip_factor) {
1511  if (s->picture_in_gop_number < s->gop_size &&
1512  s->next_picture_ptr &&
1513  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1514  // FIXME check that the gop check above is +-1 correct
1515  av_frame_unref(s->input_picture[0]->f);
1516 
1517  ff_vbv_update(s, 0);
1518 
1519  goto no_output_pic;
1520  }
1521  }
1522 
1523  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1524  !s->next_picture_ptr || s->intra_only) {
1525  s->reordered_input_picture[0] = s->input_picture[0];
1526  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1527  s->reordered_input_picture[0]->f->coded_picture_number =
1528  s->coded_picture_number++;
1529  } else {
1530  int b_frames = 0;
1531 
1532  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1533  for (i = 0; i < s->max_b_frames + 1; i++) {
1534  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1535 
1536  if (pict_num >= s->rc_context.num_entries)
1537  break;
1538  if (!s->input_picture[i]) {
1539  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1540  break;
1541  }
1542 
1543  s->input_picture[i]->f->pict_type =
1544  s->rc_context.entry[pict_num].new_pict_type;
1545  }
1546  }
1547 
1548  if (s->b_frame_strategy == 0) {
1549  b_frames = s->max_b_frames;
1550  while (b_frames && !s->input_picture[b_frames])
1551  b_frames--;
1552  } else if (s->b_frame_strategy == 1) {
1553  for (i = 1; i < s->max_b_frames + 1; i++) {
1554  if (s->input_picture[i] &&
1555  s->input_picture[i]->b_frame_score == 0) {
1556  s->input_picture[i]->b_frame_score =
1558  s->input_picture[i ]->f->data[0],
1559  s->input_picture[i - 1]->f->data[0],
1560  s->linesize) + 1;
1561  }
1562  }
1563  for (i = 0; i < s->max_b_frames + 1; i++) {
1564  if (!s->input_picture[i] ||
1565  s->input_picture[i]->b_frame_score - 1 >
1566  s->mb_num / s->b_sensitivity)
1567  break;
1568  }
1569 
1570  b_frames = FFMAX(0, i - 1);
1571 
1572  /* reset scores */
1573  for (i = 0; i < b_frames + 1; i++) {
1574  s->input_picture[i]->b_frame_score = 0;
1575  }
1576  } else if (s->b_frame_strategy == 2) {
1577  b_frames = estimate_best_b_count(s);
1578  if (b_frames < 0)
1579  return b_frames;
1580  }
1581 
1582  emms_c();
1583 
1584  for (i = b_frames - 1; i >= 0; i--) {
1585  int type = s->input_picture[i]->f->pict_type;
1586  if (type && type != AV_PICTURE_TYPE_B)
1587  b_frames = i;
1588  }
1589  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1590  b_frames == s->max_b_frames) {
1591  av_log(s->avctx, AV_LOG_ERROR,
1592  "warning, too many B-frames in a row\n");
1593  }
1594 
1595  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1596  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1597  s->gop_size > s->picture_in_gop_number) {
1598  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1599  } else {
1600  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1601  b_frames = 0;
1602  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1603  }
1604  }
1605 
1606  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1607  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1608  b_frames--;
1609 
1610  s->reordered_input_picture[0] = s->input_picture[b_frames];
1611  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1612  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1613  s->reordered_input_picture[0]->f->coded_picture_number =
1614  s->coded_picture_number++;
1615  for (i = 0; i < b_frames; i++) {
1616  s->reordered_input_picture[i + 1] = s->input_picture[i];
1617  s->reordered_input_picture[i + 1]->f->pict_type =
1619  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1620  s->coded_picture_number++;
1621  }
1622  }
1623  }
1624 no_output_pic:
1625  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1626 
1627  if (s->reordered_input_picture[0]) {
1628  s->reordered_input_picture[0]->reference =
1629  s->reordered_input_picture[0]->f->pict_type !=
1630  AV_PICTURE_TYPE_B ? 3 : 0;
1631 
1632  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1633  return ret;
1634 
1635  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1636  // input is a shared pix, so we can't modify it -> allocate a new
1637  // one & ensure that the shared one is reuseable
1638 
1639  Picture *pic;
1640  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1641  if (i < 0)
1642  return i;
1643  pic = &s->picture[i];
1644 
1645  pic->reference = s->reordered_input_picture[0]->reference;
1646  if (alloc_picture(s, pic, 0) < 0) {
1647  return -1;
1648  }
1649 
1650  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1651  if (ret < 0)
1652  return ret;
1653 
1654  /* mark us unused / free shared pic */
1655  av_frame_unref(s->reordered_input_picture[0]->f);
1656  s->reordered_input_picture[0]->shared = 0;
1657 
1658  s->current_picture_ptr = pic;
1659  } else {
1660  // input is not a shared pix -> reuse buffer for current_pix
1661  s->current_picture_ptr = s->reordered_input_picture[0];
1662  for (i = 0; i < 4; i++) {
1663  s->new_picture.f->data[i] += INPLACE_OFFSET;
1664  }
1665  }
1666  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1667  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1668  s->current_picture_ptr)) < 0)
1669  return ret;
1670 
1671  s->picture_number = s->new_picture.f->display_picture_number;
1672  }
1673  return 0;
1674 }
1675 
1677 {
1678  if (s->unrestricted_mv &&
1679  s->current_picture.reference &&
1680  !s->intra_only) {
1681  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1682  int hshift = desc->log2_chroma_w;
1683  int vshift = desc->log2_chroma_h;
1684  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1685  s->current_picture.f->linesize[0],
1686  s->h_edge_pos, s->v_edge_pos,
1688  EDGE_TOP | EDGE_BOTTOM);
1689  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1690  s->current_picture.f->linesize[1],
1691  s->h_edge_pos >> hshift,
1692  s->v_edge_pos >> vshift,
1693  EDGE_WIDTH >> hshift,
1694  EDGE_WIDTH >> vshift,
1695  EDGE_TOP | EDGE_BOTTOM);
1696  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1697  s->current_picture.f->linesize[2],
1698  s->h_edge_pos >> hshift,
1699  s->v_edge_pos >> vshift,
1700  EDGE_WIDTH >> hshift,
1701  EDGE_WIDTH >> vshift,
1702  EDGE_TOP | EDGE_BOTTOM);
1703  }
1704 
1705  emms_c();
1706 
1707  s->last_pict_type = s->pict_type;
1708  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1709  if (s->pict_type!= AV_PICTURE_TYPE_B)
1710  s->last_non_b_pict_type = s->pict_type;
1711 
1712 #if FF_API_CODED_FRAME
1714  av_frame_unref(s->avctx->coded_frame);
1715  av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1717 #endif
1718 #if FF_API_ERROR_FRAME
1720  memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1721  sizeof(s->current_picture.encoding_error));
1723 #endif
1724 }
1725 
1727 {
1728  int intra, i;
1729 
1730  for (intra = 0; intra < 2; intra++) {
1731  if (s->dct_count[intra] > (1 << 16)) {
1732  for (i = 0; i < 64; i++) {
1733  s->dct_error_sum[intra][i] >>= 1;
1734  }
1735  s->dct_count[intra] >>= 1;
1736  }
1737 
1738  for (i = 0; i < 64; i++) {
1739  s->dct_offset[intra][i] = (s->noise_reduction *
1740  s->dct_count[intra] +
1741  s->dct_error_sum[intra][i] / 2) /
1742  (s->dct_error_sum[intra][i] + 1);
1743  }
1744  }
1745 }
1746 
1748 {
1749  int ret;
1750 
1751  /* mark & release old frames */
1752  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1753  s->last_picture_ptr != s->next_picture_ptr &&
1754  s->last_picture_ptr->f->buf[0]) {
1755  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1756  }
1757 
1758  s->current_picture_ptr->f->pict_type = s->pict_type;
1759  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1760 
1761  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1762  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1763  s->current_picture_ptr)) < 0)
1764  return ret;
1765 
1766  if (s->pict_type != AV_PICTURE_TYPE_B) {
1767  s->last_picture_ptr = s->next_picture_ptr;
1768  if (!s->droppable)
1769  s->next_picture_ptr = s->current_picture_ptr;
1770  }
1771 
1772  if (s->last_picture_ptr) {
1773  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1774  if (s->last_picture_ptr->f->buf[0] &&
1775  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1776  s->last_picture_ptr)) < 0)
1777  return ret;
1778  }
1779  if (s->next_picture_ptr) {
1780  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1781  if (s->next_picture_ptr->f->buf[0] &&
1782  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1783  s->next_picture_ptr)) < 0)
1784  return ret;
1785  }
1786 
1787  if (s->picture_structure!= PICT_FRAME) {
1788  int i;
1789  for (i = 0; i < 4; i++) {
1790  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1791  s->current_picture.f->data[i] +=
1792  s->current_picture.f->linesize[i];
1793  }
1794  s->current_picture.f->linesize[i] *= 2;
1795  s->last_picture.f->linesize[i] *= 2;
1796  s->next_picture.f->linesize[i] *= 2;
1797  }
1798  }
1799 
1800  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1801  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1802  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1803  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1804  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1805  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1806  } else {
1807  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1808  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1809  }
1810 
1811  if (s->dct_error_sum) {
1812  av_assert2(s->noise_reduction && s->encoding);
1814  }
1815 
1816  return 0;
1817 }
1818 
1820  const AVFrame *pic_arg, int *got_packet)
1821 {
1823  int i, stuffing_count, ret;
1824  int context_count = s->slice_context_count;
1825 
1826  s->vbv_ignore_qmax = 0;
1827 
1828  s->picture_in_gop_number++;
1829 
1830  if (load_input_picture(s, pic_arg) < 0)
1831  return -1;
1832 
1833  if (select_input_picture(s) < 0) {
1834  return -1;
1835  }
1836 
1837  /* output? */
1838  if (s->new_picture.f->data[0]) {
1839  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1840  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1841  :
1842  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1843  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1844  return ret;
1845  if (s->mb_info) {
1846  s->mb_info_ptr = av_packet_new_side_data(pkt,
1848  s->mb_width*s->mb_height*12);
1849  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1850  }
1851 
1852  for (i = 0; i < context_count; i++) {
1853  int start_y = s->thread_context[i]->start_mb_y;
1854  int end_y = s->thread_context[i]-> end_mb_y;
1855  int h = s->mb_height;
1856  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1857  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1858 
1859  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1860  }
1861 
1862  s->pict_type = s->new_picture.f->pict_type;
1863  //emms_c();
1864  ret = frame_start(s);
1865  if (ret < 0)
1866  return ret;
1867 vbv_retry:
1868  ret = encode_picture(s, s->picture_number);
1869  if (growing_buffer) {
1870  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1871  pkt->data = s->pb.buf;
1873  }
1874  if (ret < 0)
1875  return -1;
1876 
1877 #if FF_API_STAT_BITS
1879  avctx->header_bits = s->header_bits;
1880  avctx->mv_bits = s->mv_bits;
1881  avctx->misc_bits = s->misc_bits;
1882  avctx->i_tex_bits = s->i_tex_bits;
1883  avctx->p_tex_bits = s->p_tex_bits;
1884  avctx->i_count = s->i_count;
1885  // FIXME f/b_count in avctx
1886  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1887  avctx->skip_count = s->skip_count;
1889 #endif
1890 
1891  frame_end(s);
1892 
1893  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1894  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1895 
1896  if (avctx->rc_buffer_size) {
1897  RateControlContext *rcc = &s->rc_context;
1898  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1899  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1900  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1901 
1902  if (put_bits_count(&s->pb) > max_size &&
1903  s->lambda < s->lmax) {
1904  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1905  (s->qscale + 1) / s->qscale);
1906  if (s->adaptive_quant) {
1907  int i;
1908  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1909  s->lambda_table[i] =
1910  FFMAX(s->lambda_table[i] + min_step,
1911  s->lambda_table[i] * (s->qscale + 1) /
1912  s->qscale);
1913  }
1914  s->mb_skipped = 0; // done in frame_start()
1915  // done in encode_picture() so we must undo it
1916  if (s->pict_type == AV_PICTURE_TYPE_P) {
1917  if (s->flipflop_rounding ||
1918  s->codec_id == AV_CODEC_ID_H263P ||
1919  s->codec_id == AV_CODEC_ID_MPEG4)
1920  s->no_rounding ^= 1;
1921  }
1922  if (s->pict_type != AV_PICTURE_TYPE_B) {
1923  s->time_base = s->last_time_base;
1924  s->last_non_b_time = s->time - s->pp_time;
1925  }
1926  for (i = 0; i < context_count; i++) {
1927  PutBitContext *pb = &s->thread_context[i]->pb;
1928  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1929  }
1930  s->vbv_ignore_qmax = 1;
1931  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1932  goto vbv_retry;
1933  }
1934 
1935  av_assert0(s->avctx->rc_max_rate);
1936  }
1937 
1938  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1940 
1941  for (i = 0; i < 4; i++) {
1942  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1943  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1944  }
1945  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1946  s->current_picture_ptr->encoding_error,
1947  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1948  s->pict_type);
1949 
1950  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1951  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1952  s->misc_bits + s->i_tex_bits +
1953  s->p_tex_bits);
1954  flush_put_bits(&s->pb);
1955  s->frame_bits = put_bits_count(&s->pb);
1956 
1957  stuffing_count = ff_vbv_update(s, s->frame_bits);
1958  s->stuffing_bits = 8*stuffing_count;
1959  if (stuffing_count) {
1960  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1961  stuffing_count + 50) {
1962  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1963  return -1;
1964  }
1965 
1966  switch (s->codec_id) {
1969  while (stuffing_count--) {
1970  put_bits(&s->pb, 8, 0);
1971  }
1972  break;
1973  case AV_CODEC_ID_MPEG4:
1974  put_bits(&s->pb, 16, 0);
1975  put_bits(&s->pb, 16, 0x1C3);
1976  stuffing_count -= 4;
1977  while (stuffing_count--) {
1978  put_bits(&s->pb, 8, 0xFF);
1979  }
1980  break;
1981  default:
1982  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1983  }
1984  flush_put_bits(&s->pb);
1985  s->frame_bits = put_bits_count(&s->pb);
1986  }
1987 
1988  /* update MPEG-1/2 vbv_delay for CBR */
1989  if (s->avctx->rc_max_rate &&
1990  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1991  s->out_format == FMT_MPEG1 &&
1992  90000LL * (avctx->rc_buffer_size - 1) <=
1993  s->avctx->rc_max_rate * 0xFFFFLL) {
1994  AVCPBProperties *props;
1995  size_t props_size;
1996 
1997  int vbv_delay, min_delay;
1998  double inbits = s->avctx->rc_max_rate *
1999  av_q2d(s->avctx->time_base);
2000  int minbits = s->frame_bits - 8 *
2001  (s->vbv_delay_ptr - s->pb.buf - 1);
2002  double bits = s->rc_context.buffer_index + minbits - inbits;
2003 
2004  if (bits < 0)
2005  av_log(s->avctx, AV_LOG_ERROR,
2006  "Internal error, negative bits\n");
2007 
2008  av_assert1(s->repeat_first_field == 0);
2009 
2010  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2011  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2012  s->avctx->rc_max_rate;
2013 
2014  vbv_delay = FFMAX(vbv_delay, min_delay);
2015 
2016  av_assert0(vbv_delay < 0xFFFF);
2017 
2018  s->vbv_delay_ptr[0] &= 0xF8;
2019  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2020  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2021  s->vbv_delay_ptr[2] &= 0x07;
2022  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2023 
2024  props = av_cpb_properties_alloc(&props_size);
2025  if (!props)
2026  return AVERROR(ENOMEM);
2027  props->vbv_delay = vbv_delay * 300;
2028 
2030  (uint8_t*)props, props_size);
2031  if (ret < 0) {
2032  av_freep(&props);
2033  return ret;
2034  }
2035 
2036 #if FF_API_VBV_DELAY
2038  avctx->vbv_delay = vbv_delay * 300;
2040 #endif
2041  }
2042  s->total_bits += s->frame_bits;
2043 #if FF_API_STAT_BITS
2045  avctx->frame_bits = s->frame_bits;
2047 #endif
2048 
2049 
2050  pkt->pts = s->current_picture.f->pts;
2051  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2052  if (!s->current_picture.f->coded_picture_number)
2053  pkt->dts = pkt->pts - s->dts_delta;
2054  else
2055  pkt->dts = s->reordered_pts;
2056  s->reordered_pts = pkt->pts;
2057  } else
2058  pkt->dts = pkt->pts;
2059  if (s->current_picture.f->key_frame)
2061  if (s->mb_info)
2063  } else {
2064  s->frame_bits = 0;
2065  }
2066 
2067  /* release non-reference frames */
2068  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2069  if (!s->picture[i].reference)
2070  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2071  }
2072 
2073  av_assert1((s->frame_bits & 7) == 0);
2074 
2075  pkt->size = s->frame_bits / 8;
2076  *got_packet = !!pkt->size;
2077  return 0;
2078 }
2079 
2081  int n, int threshold)
2082 {
2083  static const char tab[64] = {
2084  3, 2, 2, 1, 1, 1, 1, 1,
2085  1, 1, 1, 1, 1, 1, 1, 1,
2086  1, 1, 1, 1, 1, 1, 1, 1,
2087  0, 0, 0, 0, 0, 0, 0, 0,
2088  0, 0, 0, 0, 0, 0, 0, 0,
2089  0, 0, 0, 0, 0, 0, 0, 0,
2090  0, 0, 0, 0, 0, 0, 0, 0,
2091  0, 0, 0, 0, 0, 0, 0, 0
2092  };
2093  int score = 0;
2094  int run = 0;
2095  int i;
2096  int16_t *block = s->block[n];
2097  const int last_index = s->block_last_index[n];
2098  int skip_dc;
2099 
2100  if (threshold < 0) {
2101  skip_dc = 0;
2102  threshold = -threshold;
2103  } else
2104  skip_dc = 1;
2105 
2106  /* Are all we could set to zero already zero? */
2107  if (last_index <= skip_dc - 1)
2108  return;
2109 
2110  for (i = 0; i <= last_index; i++) {
2111  const int j = s->intra_scantable.permutated[i];
2112  const int level = FFABS(block[j]);
2113  if (level == 1) {
2114  if (skip_dc && i == 0)
2115  continue;
2116  score += tab[run];
2117  run = 0;
2118  } else if (level > 1) {
2119  return;
2120  } else {
2121  run++;
2122  }
2123  }
2124  if (score >= threshold)
2125  return;
2126  for (i = skip_dc; i <= last_index; i++) {
2127  const int j = s->intra_scantable.permutated[i];
2128  block[j] = 0;
2129  }
2130  if (block[0])
2131  s->block_last_index[n] = 0;
2132  else
2133  s->block_last_index[n] = -1;
2134 }
2135 
2136 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2137  int last_index)
2138 {
2139  int i;
2140  const int maxlevel = s->max_qcoeff;
2141  const int minlevel = s->min_qcoeff;
2142  int overflow = 0;
2143 
2144  if (s->mb_intra) {
2145  i = 1; // skip clipping of intra dc
2146  } else
2147  i = 0;
2148 
2149  for (; i <= last_index; i++) {
2150  const int j = s->intra_scantable.permutated[i];
2151  int level = block[j];
2152 
2153  if (level > maxlevel) {
2154  level = maxlevel;
2155  overflow++;
2156  } else if (level < minlevel) {
2157  level = minlevel;
2158  overflow++;
2159  }
2160 
2161  block[j] = level;
2162  }
2163 
2164  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2165  av_log(s->avctx, AV_LOG_INFO,
2166  "warning, clipping %d dct coefficients to %d..%d\n",
2167  overflow, minlevel, maxlevel);
2168 }
2169 
2170 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2171 {
2172  int x, y;
2173  // FIXME optimize
2174  for (y = 0; y < 8; y++) {
2175  for (x = 0; x < 8; x++) {
2176  int x2, y2;
2177  int sum = 0;
2178  int sqr = 0;
2179  int count = 0;
2180 
2181  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2182  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2183  int v = ptr[x2 + y2 * stride];
2184  sum += v;
2185  sqr += v * v;
2186  count++;
2187  }
2188  }
2189  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2190  }
2191  }
2192 }
2193 
2195  int motion_x, int motion_y,
2196  int mb_block_height,
2197  int mb_block_width,
2198  int mb_block_count)
2199 {
2200  int16_t weight[12][64];
2201  int16_t orig[12][64];
2202  const int mb_x = s->mb_x;
2203  const int mb_y = s->mb_y;
2204  int i;
2205  int skip_dct[12];
2206  int dct_offset = s->linesize * 8; // default for progressive frames
2207  int uv_dct_offset = s->uvlinesize * 8;
2208  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2209  ptrdiff_t wrap_y, wrap_c;
2210 
2211  for (i = 0; i < mb_block_count; i++)
2212  skip_dct[i] = s->skipdct;
2213 
2214  if (s->adaptive_quant) {
2215  const int last_qp = s->qscale;
2216  const int mb_xy = mb_x + mb_y * s->mb_stride;
2217 
2218  s->lambda = s->lambda_table[mb_xy];
2219  update_qscale(s);
2220 
2221  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2222  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2223  s->dquant = s->qscale - last_qp;
2224 
2225  if (s->out_format == FMT_H263) {
2226  s->dquant = av_clip(s->dquant, -2, 2);
2227 
2228  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2229  if (!s->mb_intra) {
2230  if (s->pict_type == AV_PICTURE_TYPE_B) {
2231  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2232  s->dquant = 0;
2233  }
2234  if (s->mv_type == MV_TYPE_8X8)
2235  s->dquant = 0;
2236  }
2237  }
2238  }
2239  }
2240  ff_set_qscale(s, last_qp + s->dquant);
2241  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2242  ff_set_qscale(s, s->qscale + s->dquant);
2243 
2244  wrap_y = s->linesize;
2245  wrap_c = s->uvlinesize;
2246  ptr_y = s->new_picture.f->data[0] +
2247  (mb_y * 16 * wrap_y) + mb_x * 16;
2248  ptr_cb = s->new_picture.f->data[1] +
2249  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2250  ptr_cr = s->new_picture.f->data[2] +
2251  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2252 
2253  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2254  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2255  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2256  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2257  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2258  wrap_y, wrap_y,
2259  16, 16, mb_x * 16, mb_y * 16,
2260  s->width, s->height);
2261  ptr_y = ebuf;
2262  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2263  wrap_c, wrap_c,
2264  mb_block_width, mb_block_height,
2265  mb_x * mb_block_width, mb_y * mb_block_height,
2266  cw, ch);
2267  ptr_cb = ebuf + 16 * wrap_y;
2268  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2269  wrap_c, wrap_c,
2270  mb_block_width, mb_block_height,
2271  mb_x * mb_block_width, mb_y * mb_block_height,
2272  cw, ch);
2273  ptr_cr = ebuf + 16 * wrap_y + 16;
2274  }
2275 
2276  if (s->mb_intra) {
2277  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2278  int progressive_score, interlaced_score;
2279 
2280  s->interlaced_dct = 0;
2281  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2282  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2283  NULL, wrap_y, 8) - 400;
2284 
2285  if (progressive_score > 0) {
2286  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2287  NULL, wrap_y * 2, 8) +
2288  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2289  NULL, wrap_y * 2, 8);
2290  if (progressive_score > interlaced_score) {
2291  s->interlaced_dct = 1;
2292 
2293  dct_offset = wrap_y;
2294  uv_dct_offset = wrap_c;
2295  wrap_y <<= 1;
2296  if (s->chroma_format == CHROMA_422 ||
2297  s->chroma_format == CHROMA_444)
2298  wrap_c <<= 1;
2299  }
2300  }
2301  }
2302 
2303  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2304  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2305  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2306  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2307 
2308  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2309  skip_dct[4] = 1;
2310  skip_dct[5] = 1;
2311  } else {
2312  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2313  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2314  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2315  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2316  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2317  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2318  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2319  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2320  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2321  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2322  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2323  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2324  }
2325  }
2326  } else {
2327  op_pixels_func (*op_pix)[4];
2328  qpel_mc_func (*op_qpix)[16];
2329  uint8_t *dest_y, *dest_cb, *dest_cr;
2330 
2331  dest_y = s->dest[0];
2332  dest_cb = s->dest[1];
2333  dest_cr = s->dest[2];
2334 
2335  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2336  op_pix = s->hdsp.put_pixels_tab;
2337  op_qpix = s->qdsp.put_qpel_pixels_tab;
2338  } else {
2339  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2340  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2341  }
2342 
2343  if (s->mv_dir & MV_DIR_FORWARD) {
2344  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2345  s->last_picture.f->data,
2346  op_pix, op_qpix);
2347  op_pix = s->hdsp.avg_pixels_tab;
2348  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2349  }
2350  if (s->mv_dir & MV_DIR_BACKWARD) {
2351  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2352  s->next_picture.f->data,
2353  op_pix, op_qpix);
2354  }
2355 
2356  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2357  int progressive_score, interlaced_score;
2358 
2359  s->interlaced_dct = 0;
2360  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2361  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2362  ptr_y + wrap_y * 8,
2363  wrap_y, 8) - 400;
2364 
2365  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2366  progressive_score -= 400;
2367 
2368  if (progressive_score > 0) {
2369  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2370  wrap_y * 2, 8) +
2371  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2372  ptr_y + wrap_y,
2373  wrap_y * 2, 8);
2374 
2375  if (progressive_score > interlaced_score) {
2376  s->interlaced_dct = 1;
2377 
2378  dct_offset = wrap_y;
2379  uv_dct_offset = wrap_c;
2380  wrap_y <<= 1;
2381  if (s->chroma_format == CHROMA_422)
2382  wrap_c <<= 1;
2383  }
2384  }
2385  }
2386 
2387  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2388  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2389  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2390  dest_y + dct_offset, wrap_y);
2391  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2392  dest_y + dct_offset + 8, wrap_y);
2393 
2394  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2395  skip_dct[4] = 1;
2396  skip_dct[5] = 1;
2397  } else {
2398  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2399  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2400  if (!s->chroma_y_shift) { /* 422 */
2401  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2402  dest_cb + uv_dct_offset, wrap_c);
2403  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2404  dest_cr + uv_dct_offset, wrap_c);
2405  }
2406  }
2407  /* pre quantization */
2408  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2409  2 * s->qscale * s->qscale) {
2410  // FIXME optimize
2411  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2412  skip_dct[0] = 1;
2413  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2414  skip_dct[1] = 1;
2415  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2416  wrap_y, 8) < 20 * s->qscale)
2417  skip_dct[2] = 1;
2418  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2419  wrap_y, 8) < 20 * s->qscale)
2420  skip_dct[3] = 1;
2421  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2422  skip_dct[4] = 1;
2423  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2424  skip_dct[5] = 1;
2425  if (!s->chroma_y_shift) { /* 422 */
2426  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2427  dest_cb + uv_dct_offset,
2428  wrap_c, 8) < 20 * s->qscale)
2429  skip_dct[6] = 1;
2430  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2431  dest_cr + uv_dct_offset,
2432  wrap_c, 8) < 20 * s->qscale)
2433  skip_dct[7] = 1;
2434  }
2435  }
2436  }
2437 
2438  if (s->quantizer_noise_shaping) {
2439  if (!skip_dct[0])
2440  get_visual_weight(weight[0], ptr_y , wrap_y);
2441  if (!skip_dct[1])
2442  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2443  if (!skip_dct[2])
2444  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2445  if (!skip_dct[3])
2446  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2447  if (!skip_dct[4])
2448  get_visual_weight(weight[4], ptr_cb , wrap_c);
2449  if (!skip_dct[5])
2450  get_visual_weight(weight[5], ptr_cr , wrap_c);
2451  if (!s->chroma_y_shift) { /* 422 */
2452  if (!skip_dct[6])
2453  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2454  wrap_c);
2455  if (!skip_dct[7])
2456  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2457  wrap_c);
2458  }
2459  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2460  }
2461 
2462  /* DCT & quantize */
2463  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2464  {
2465  for (i = 0; i < mb_block_count; i++) {
2466  if (!skip_dct[i]) {
2467  int overflow;
2468  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2469  // FIXME we could decide to change to quantizer instead of
2470  // clipping
2471  // JS: I don't think that would be a good idea it could lower
2472  // quality instead of improve it. Just INTRADC clipping
2473  // deserves changes in quantizer
2474  if (overflow)
2475  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2476  } else
2477  s->block_last_index[i] = -1;
2478  }
2479  if (s->quantizer_noise_shaping) {
2480  for (i = 0; i < mb_block_count; i++) {
2481  if (!skip_dct[i]) {
2482  s->block_last_index[i] =
2483  dct_quantize_refine(s, s->block[i], weight[i],
2484  orig[i], i, s->qscale);
2485  }
2486  }
2487  }
2488 
2489  if (s->luma_elim_threshold && !s->mb_intra)
2490  for (i = 0; i < 4; i++)
2491  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2492  if (s->chroma_elim_threshold && !s->mb_intra)
2493  for (i = 4; i < mb_block_count; i++)
2494  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2495 
2496  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2497  for (i = 0; i < mb_block_count; i++) {
2498  if (s->block_last_index[i] == -1)
2499  s->coded_score[i] = INT_MAX / 256;
2500  }
2501  }
2502  }
2503 
2504  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2505  s->block_last_index[4] =
2506  s->block_last_index[5] = 0;
2507  s->block[4][0] =
2508  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2509  if (!s->chroma_y_shift) { /* 422 / 444 */
2510  for (i=6; i<12; i++) {
2511  s->block_last_index[i] = 0;
2512  s->block[i][0] = s->block[4][0];
2513  }
2514  }
2515  }
2516 
2517  // non c quantize code returns incorrect block_last_index FIXME
2518  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2519  for (i = 0; i < mb_block_count; i++) {
2520  int j;
2521  if (s->block_last_index[i] > 0) {
2522  for (j = 63; j > 0; j--) {
2523  if (s->block[i][s->intra_scantable.permutated[j]])
2524  break;
2525  }
2526  s->block_last_index[i] = j;
2527  }
2528  }
2529  }
2530 
2531  /* huffman encode */
2532  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2536  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2537  break;
2538  case AV_CODEC_ID_MPEG4:
2540  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2541  break;
2542  case AV_CODEC_ID_MSMPEG4V2:
2543  case AV_CODEC_ID_MSMPEG4V3:
2544  case AV_CODEC_ID_WMV1:
2546  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2547  break;
2548  case AV_CODEC_ID_WMV2:
2549  if (CONFIG_WMV2_ENCODER)
2550  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2551  break;
2552  case AV_CODEC_ID_H261:
2553  if (CONFIG_H261_ENCODER)
2554  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2555  break;
2556  case AV_CODEC_ID_H263:
2557  case AV_CODEC_ID_H263P:
2558  case AV_CODEC_ID_FLV1:
2559  case AV_CODEC_ID_RV10:
2560  case AV_CODEC_ID_RV20:
2561  if (CONFIG_H263_ENCODER)
2562  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2563  break;
2564  case AV_CODEC_ID_MJPEG:
2565  case AV_CODEC_ID_AMV:
2567  ff_mjpeg_encode_mb(s, s->block);
2568  break;
2569  default:
2570  av_assert1(0);
2571  }
2572 }
2573 
2574 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2575 {
2576  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2577  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2578  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2579 }
2580 
2582  int i;
2583 
2584  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2585 
2586  /* MPEG-1 */
2587  d->mb_skip_run= s->mb_skip_run;
2588  for(i=0; i<3; i++)
2589  d->last_dc[i] = s->last_dc[i];
2590 
2591  /* statistics */
2592  d->mv_bits= s->mv_bits;
2593  d->i_tex_bits= s->i_tex_bits;
2594  d->p_tex_bits= s->p_tex_bits;
2595  d->i_count= s->i_count;
2596  d->f_count= s->f_count;
2597  d->b_count= s->b_count;
2598  d->skip_count= s->skip_count;
2599  d->misc_bits= s->misc_bits;
2600  d->last_bits= 0;
2601 
2602  d->mb_skipped= 0;
2603  d->qscale= s->qscale;
2604  d->dquant= s->dquant;
2605 
2606  d->esc3_level_length= s->esc3_level_length;
2607 }
2608 
2610  int i;
2611 
2612  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2613  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2614 
2615  /* MPEG-1 */
2616  d->mb_skip_run= s->mb_skip_run;
2617  for(i=0; i<3; i++)
2618  d->last_dc[i] = s->last_dc[i];
2619 
2620  /* statistics */
2621  d->mv_bits= s->mv_bits;
2622  d->i_tex_bits= s->i_tex_bits;
2623  d->p_tex_bits= s->p_tex_bits;
2624  d->i_count= s->i_count;
2625  d->f_count= s->f_count;
2626  d->b_count= s->b_count;
2627  d->skip_count= s->skip_count;
2628  d->misc_bits= s->misc_bits;
2629 
2630  d->mb_intra= s->mb_intra;
2631  d->mb_skipped= s->mb_skipped;
2632  d->mv_type= s->mv_type;
2633  d->mv_dir= s->mv_dir;
2634  d->pb= s->pb;
2635  if(s->data_partitioning){
2636  d->pb2= s->pb2;
2637  d->tex_pb= s->tex_pb;
2638  }
2639  d->block= s->block;
2640  for(i=0; i<8; i++)
2641  d->block_last_index[i]= s->block_last_index[i];
2642  d->interlaced_dct= s->interlaced_dct;
2643  d->qscale= s->qscale;
2644 
2645  d->esc3_level_length= s->esc3_level_length;
2646 }
2647 
2648 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2650  int *dmin, int *next_block, int motion_x, int motion_y)
2651 {
2652  int score;
2653  uint8_t *dest_backup[3];
2654 
2655  copy_context_before_encode(s, backup, type);
2656 
2657  s->block= s->blocks[*next_block];
2658  s->pb= pb[*next_block];
2659  if(s->data_partitioning){
2660  s->pb2 = pb2 [*next_block];
2661  s->tex_pb= tex_pb[*next_block];
2662  }
2663 
2664  if(*next_block){
2665  memcpy(dest_backup, s->dest, sizeof(s->dest));
2666  s->dest[0] = s->sc.rd_scratchpad;
2667  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2668  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2669  av_assert0(s->linesize >= 32); //FIXME
2670  }
2671 
2672  encode_mb(s, motion_x, motion_y);
2673 
2674  score= put_bits_count(&s->pb);
2675  if(s->data_partitioning){
2676  score+= put_bits_count(&s->pb2);
2677  score+= put_bits_count(&s->tex_pb);
2678  }
2679 
2680  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2681  ff_mpv_reconstruct_mb(s, s->block);
2682 
2683  score *= s->lambda2;
2684  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2685  }
2686 
2687  if(*next_block){
2688  memcpy(s->dest, dest_backup, sizeof(s->dest));
2689  }
2690 
2691  if(score<*dmin){
2692  *dmin= score;
2693  *next_block^=1;
2694 
2696  }
2697 }
2698 
2699 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2700  const uint32_t *sq = ff_square_tab + 256;
2701  int acc=0;
2702  int x,y;
2703 
2704  if(w==16 && h==16)
2705  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2706  else if(w==8 && h==8)
2707  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2708 
2709  for(y=0; y<h; y++){
2710  for(x=0; x<w; x++){
2711  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2712  }
2713  }
2714 
2715  av_assert2(acc>=0);
2716 
2717  return acc;
2718 }
2719 
2720 static int sse_mb(MpegEncContext *s){
2721  int w= 16;
2722  int h= 16;
2723 
2724  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2725  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2726 
2727  if(w==16 && h==16)
2728  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2729  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2730  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2731  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2732  }else{
2733  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2734  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2735  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2736  }
2737  else
2738  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2739  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2740  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2741 }
2742 
2744  MpegEncContext *s= *(void**)arg;
2745 
2746 
2747  s->me.pre_pass=1;
2748  s->me.dia_size= s->avctx->pre_dia_size;
2749  s->first_slice_line=1;
2750  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2751  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2752  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2753  }
2754  s->first_slice_line=0;
2755  }
2756 
2757  s->me.pre_pass=0;
2758 
2759  return 0;
2760 }
2761 
2763  MpegEncContext *s= *(void**)arg;
2764 
2766 
2767  s->me.dia_size= s->avctx->dia_size;
2768  s->first_slice_line=1;
2769  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2770  s->mb_x=0; //for block init below
2772  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2773  s->block_index[0]+=2;
2774  s->block_index[1]+=2;
2775  s->block_index[2]+=2;
2776  s->block_index[3]+=2;
2777 
2778  /* compute motion vector & mb_type and store in context */
2779  if(s->pict_type==AV_PICTURE_TYPE_B)
2780  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2781  else
2782  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2783  }
2784  s->first_slice_line=0;
2785  }
2786  return 0;
2787 }
2788 
2789 static int mb_var_thread(AVCodecContext *c, void *arg){
2790  MpegEncContext *s= *(void**)arg;
2791  int mb_x, mb_y;
2792 
2794 
2795  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2796  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2797  int xx = mb_x * 16;
2798  int yy = mb_y * 16;
2799  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2800  int varc;
2801  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2802 
2803  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2804  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2805 
2806  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2807  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2808  s->me.mb_var_sum_temp += varc;
2809  }
2810  }
2811  return 0;
2812 }
2813 
2815  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2816  if(s->partitioned_frame){
2818  }
2819 
2820  ff_mpeg4_stuffing(&s->pb);
2821  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2823  }
2824 
2825  avpriv_align_put_bits(&s->pb);
2826  flush_put_bits(&s->pb);
2827 
2828  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2829  s->misc_bits+= get_bits_diff(s);
2830 }
2831 
2833 {
2834  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2835  int offset = put_bits_count(&s->pb);
2836  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2837  int gobn = s->mb_y / s->gob_index;
2838  int pred_x, pred_y;
2839  if (CONFIG_H263_ENCODER)
2840  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2841  bytestream_put_le32(&ptr, offset);
2842  bytestream_put_byte(&ptr, s->qscale);
2843  bytestream_put_byte(&ptr, gobn);
2844  bytestream_put_le16(&ptr, mba);
2845  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2846  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2847  /* 4MV not implemented */
2848  bytestream_put_byte(&ptr, 0); /* hmv2 */
2849  bytestream_put_byte(&ptr, 0); /* vmv2 */
2850 }
2851 
2852 static void update_mb_info(MpegEncContext *s, int startcode)
2853 {
2854  if (!s->mb_info)
2855  return;
2856  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2857  s->mb_info_size += 12;
2858  s->prev_mb_info = s->last_mb_info;
2859  }
2860  if (startcode) {
2861  s->prev_mb_info = put_bits_count(&s->pb)/8;
2862  /* This might have incremented mb_info_size above, and we return without
2863  * actually writing any info into that slot yet. But in that case,
2864  * this will be called again at the start of the after writing the
2865  * start code, actually writing the mb info. */
2866  return;
2867  }
2868 
2869  s->last_mb_info = put_bits_count(&s->pb)/8;
2870  if (!s->mb_info_size)
2871  s->mb_info_size += 12;
2872  write_mb_info(s);
2873 }
2874 
2875 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2876 {
2877  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2878  && s->slice_context_count == 1
2879  && s->pb.buf == s->avctx->internal->byte_buffer) {
2880  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2881  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2882 
2883  uint8_t *new_buffer = NULL;
2884  int new_buffer_size = 0;
2885 
2886  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2887  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2888  return AVERROR(ENOMEM);
2889  }
2890 
2891  emms_c();
2892 
2893  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2894  s->avctx->internal->byte_buffer_size + size_increase);
2895  if (!new_buffer)
2896  return AVERROR(ENOMEM);
2897 
2898  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2899  av_free(s->avctx->internal->byte_buffer);
2900  s->avctx->internal->byte_buffer = new_buffer;
2901  s->avctx->internal->byte_buffer_size = new_buffer_size;
2902  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2903  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2904  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2905  }
2906  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2907  return AVERROR(EINVAL);
2908  return 0;
2909 }
2910 
2911 static int encode_thread(AVCodecContext *c, void *arg){
2912  MpegEncContext *s= *(void**)arg;
2913  int mb_x, mb_y;
2914  int chr_h= 16>>s->chroma_y_shift;
2915  int i, j;
2916  MpegEncContext best_s = { 0 }, backup_s;
2917  uint8_t bit_buf[2][MAX_MB_BYTES];
2918  uint8_t bit_buf2[2][MAX_MB_BYTES];
2919  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2920  PutBitContext pb[2], pb2[2], tex_pb[2];
2921 
2923 
2924  for(i=0; i<2; i++){
2925  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2926  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2927  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2928  }
2929 
2930  s->last_bits= put_bits_count(&s->pb);
2931  s->mv_bits=0;
2932  s->misc_bits=0;
2933  s->i_tex_bits=0;
2934  s->p_tex_bits=0;
2935  s->i_count=0;
2936  s->f_count=0;
2937  s->b_count=0;
2938  s->skip_count=0;
2939 
2940  for(i=0; i<3; i++){
2941  /* init last dc values */
2942  /* note: quant matrix value (8) is implied here */
2943  s->last_dc[i] = 128 << s->intra_dc_precision;
2944 
2945  s->current_picture.encoding_error[i] = 0;
2946  }
2947  if(s->codec_id==AV_CODEC_ID_AMV){
2948  s->last_dc[0] = 128*8/13;
2949  s->last_dc[1] = 128*8/14;
2950  s->last_dc[2] = 128*8/14;
2951  }
2952  s->mb_skip_run = 0;
2953  memset(s->last_mv, 0, sizeof(s->last_mv));
2954 
2955  s->last_mv_dir = 0;
2956 
2957  switch(s->codec_id){
2958  case AV_CODEC_ID_H263:
2959  case AV_CODEC_ID_H263P:
2960  case AV_CODEC_ID_FLV1:
2961  if (CONFIG_H263_ENCODER)
2962  s->gob_index = H263_GOB_HEIGHT(s->height);
2963  break;
2964  case AV_CODEC_ID_MPEG4:
2965  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2967  break;
2968  }
2969 
2970  s->resync_mb_x=0;
2971  s->resync_mb_y=0;
2972  s->first_slice_line = 1;
2973  s->ptr_lastgob = s->pb.buf;
2974  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2975  s->mb_x=0;
2976  s->mb_y= mb_y;
2977 
2978  ff_set_qscale(s, s->qscale);
2980 
2981  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2982  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2983  int mb_type= s->mb_type[xy];
2984 // int d;
2985  int dmin= INT_MAX;
2986  int dir;
2987  int size_increase = s->avctx->internal->byte_buffer_size/4
2988  + s->mb_width*MAX_MB_BYTES;
2989 
2991  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2992  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2993  return -1;
2994  }
2995  if(s->data_partitioning){
2996  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2997  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2998  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2999  return -1;
3000  }
3001  }
3002 
3003  s->mb_x = mb_x;
3004  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3006 
3007  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3009  xy= s->mb_y*s->mb_stride + s->mb_x;
3010  mb_type= s->mb_type[xy];
3011  }
3012 
3013  /* write gob / video packet header */
3014  if(s->rtp_mode){
3015  int current_packet_size, is_gob_start;
3016 
3017  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3018 
3019  is_gob_start = s->rtp_payload_size &&
3020  current_packet_size >= s->rtp_payload_size &&
3021  mb_y + mb_x > 0;
3022 
3023  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3024 
3025  switch(s->codec_id){
3026  case AV_CODEC_ID_H263:
3027  case AV_CODEC_ID_H263P:
3028  if(!s->h263_slice_structured)
3029  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3030  break;
3032  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3034  if(s->mb_skip_run) is_gob_start=0;
3035  break;
3036  case AV_CODEC_ID_MJPEG:
3037  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3038  break;
3039  }
3040 
3041  if(is_gob_start){
3042  if(s->start_mb_y != mb_y || mb_x!=0){
3043  write_slice_end(s);
3044 
3045  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3047  }
3048  }
3049 
3050  av_assert2((put_bits_count(&s->pb)&7) == 0);
3051  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3052 
3053  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3054  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3055  int d = 100 / s->error_rate;
3056  if(r % d == 0){
3057  current_packet_size=0;
3058  s->pb.buf_ptr= s->ptr_lastgob;
3059  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3060  }
3061  }
3062 
3063 #if FF_API_RTP_CALLBACK
3065  if (s->avctx->rtp_callback){
3066  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3067  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3068  }
3070 #endif
3071  update_mb_info(s, 1);
3072 
3073  switch(s->codec_id){
3074  case AV_CODEC_ID_MPEG4:
3075  if (CONFIG_MPEG4_ENCODER) {
3078  }
3079  break;
3085  }
3086  break;
3087  case AV_CODEC_ID_H263:
3088  case AV_CODEC_ID_H263P:
3089  if (CONFIG_H263_ENCODER)
3091  break;
3092  }
3093 
3094  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3095  int bits= put_bits_count(&s->pb);
3096  s->misc_bits+= bits - s->last_bits;
3097  s->last_bits= bits;
3098  }
3099 
3100  s->ptr_lastgob += current_packet_size;
3101  s->first_slice_line=1;
3102  s->resync_mb_x=mb_x;
3103  s->resync_mb_y=mb_y;
3104  }
3105  }
3106 
3107  if( (s->resync_mb_x == s->mb_x)
3108  && s->resync_mb_y+1 == s->mb_y){
3109  s->first_slice_line=0;
3110  }
3111 
3112  s->mb_skipped=0;
3113  s->dquant=0; //only for QP_RD
3114 
3115  update_mb_info(s, 0);
3116 
3117  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3118  int next_block=0;
3119  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3120 
3121  copy_context_before_encode(&backup_s, s, -1);
3122  backup_s.pb= s->pb;
3123  best_s.data_partitioning= s->data_partitioning;
3124  best_s.partitioned_frame= s->partitioned_frame;
3125  if(s->data_partitioning){
3126  backup_s.pb2= s->pb2;
3127  backup_s.tex_pb= s->tex_pb;
3128  }
3129 
3131  s->mv_dir = MV_DIR_FORWARD;
3132  s->mv_type = MV_TYPE_16X16;
3133  s->mb_intra= 0;
3134  s->mv[0][0][0] = s->p_mv_table[xy][0];
3135  s->mv[0][0][1] = s->p_mv_table[xy][1];
3136  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3137  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3138  }
3140  s->mv_dir = MV_DIR_FORWARD;
3141  s->mv_type = MV_TYPE_FIELD;
3142  s->mb_intra= 0;
3143  for(i=0; i<2; i++){
3144  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3145  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3146  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3147  }
3148  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3149  &dmin, &next_block, 0, 0);
3150  }
3152  s->mv_dir = MV_DIR_FORWARD;
3153  s->mv_type = MV_TYPE_16X16;
3154  s->mb_intra= 0;
3155  s->mv[0][0][0] = 0;
3156  s->mv[0][0][1] = 0;
3157  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3158  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3159  }
3161  s->mv_dir = MV_DIR_FORWARD;
3162  s->mv_type = MV_TYPE_8X8;
3163  s->mb_intra= 0;
3164  for(i=0; i<4; i++){
3165  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3166  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3167  }
3168  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3169  &dmin, &next_block, 0, 0);
3170  }
3172  s->mv_dir = MV_DIR_FORWARD;
3173  s->mv_type = MV_TYPE_16X16;
3174  s->mb_intra= 0;
3175  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3176  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3177  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3178  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3179  }
3181  s->mv_dir = MV_DIR_BACKWARD;
3182  s->mv_type = MV_TYPE_16X16;
3183  s->mb_intra= 0;
3184  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3185  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3186  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3187  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3188  }
3190  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3191  s->mv_type = MV_TYPE_16X16;
3192  s->mb_intra= 0;
3193  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3194  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3195  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3196  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3197  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3198  &dmin, &next_block, 0, 0);
3199  }
3201  s->mv_dir = MV_DIR_FORWARD;
3202  s->mv_type = MV_TYPE_FIELD;
3203  s->mb_intra= 0;
3204  for(i=0; i<2; i++){
3205  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3206  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3207  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3208  }
3209  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3210  &dmin, &next_block, 0, 0);
3211  }
3213  s->mv_dir = MV_DIR_BACKWARD;
3214  s->mv_type = MV_TYPE_FIELD;
3215  s->mb_intra= 0;
3216  for(i=0; i<2; i++){
3217  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3218  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3219  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3220  }
3221  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3222  &dmin, &next_block, 0, 0);
3223  }
3225  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3226  s->mv_type = MV_TYPE_FIELD;
3227  s->mb_intra= 0;
3228  for(dir=0; dir<2; dir++){
3229  for(i=0; i<2; i++){
3230  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3231  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3232  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3233  }
3234  }
3235  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3236  &dmin, &next_block, 0, 0);
3237  }
3239  s->mv_dir = 0;
3240  s->mv_type = MV_TYPE_16X16;
3241  s->mb_intra= 1;
3242  s->mv[0][0][0] = 0;
3243  s->mv[0][0][1] = 0;
3244  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3245  &dmin, &next_block, 0, 0);
3246  if(s->h263_pred || s->h263_aic){
3247  if(best_s.mb_intra)
3248  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3249  else
3250  ff_clean_intra_table_entries(s); //old mode?
3251  }
3252  }
3253 
3254  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3255  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3256  const int last_qp= backup_s.qscale;
3257  int qpi, qp, dc[6];
3258  int16_t ac[6][16];
3259  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3260  static const int dquant_tab[4]={-1,1,-2,2};
3261  int storecoefs = s->mb_intra && s->dc_val[0];
3262 
3263  av_assert2(backup_s.dquant == 0);
3264 
3265  //FIXME intra
3266  s->mv_dir= best_s.mv_dir;
3267  s->mv_type = MV_TYPE_16X16;
3268  s->mb_intra= best_s.mb_intra;
3269  s->mv[0][0][0] = best_s.mv[0][0][0];
3270  s->mv[0][0][1] = best_s.mv[0][0][1];
3271  s->mv[1][0][0] = best_s.mv[1][0][0];
3272  s->mv[1][0][1] = best_s.mv[1][0][1];
3273 
3274  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3275  for(; qpi<4; qpi++){
3276  int dquant= dquant_tab[qpi];
3277  qp= last_qp + dquant;
3278  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3279  continue;
3280  backup_s.dquant= dquant;
3281  if(storecoefs){
3282  for(i=0; i<6; i++){
3283  dc[i]= s->dc_val[0][ s->block_index[i] ];
3284  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3285  }
3286  }
3287 
3288  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3289  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3290  if(best_s.qscale != qp){
3291  if(storecoefs){
3292  for(i=0; i<6; i++){
3293  s->dc_val[0][ s->block_index[i] ]= dc[i];
3294  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3295  }
3296  }
3297  }
3298  }
3299  }
3300  }
3302  int mx= s->b_direct_mv_table[xy][0];
3303  int my= s->b_direct_mv_table[xy][1];
3304 
3305  backup_s.dquant = 0;
3306  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3307  s->mb_intra= 0;
3308  ff_mpeg4_set_direct_mv(s, mx, my);
3309  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3310  &dmin, &next_block, mx, my);
3311  }
3313  backup_s.dquant = 0;
3314  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3315  s->mb_intra= 0;
3316  ff_mpeg4_set_direct_mv(s, 0, 0);
3317  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3318  &dmin, &next_block, 0, 0);
3319  }
3320  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3321  int coded=0;
3322  for(i=0; i<6; i++)
3323  coded |= s->block_last_index[i];
3324  if(coded){
3325  int mx,my;
3326  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3327  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3328  mx=my=0; //FIXME find the one we actually used
3329  ff_mpeg4_set_direct_mv(s, mx, my);
3330  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3331  mx= s->mv[1][0][0];
3332  my= s->mv[1][0][1];
3333  }else{
3334  mx= s->mv[0][0][0];
3335  my= s->mv[0][0][1];
3336  }
3337 
3338  s->mv_dir= best_s.mv_dir;
3339  s->mv_type = best_s.mv_type;
3340  s->mb_intra= 0;
3341 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3342  s->mv[0][0][1] = best_s.mv[0][0][1];
3343  s->mv[1][0][0] = best_s.mv[1][0][0];
3344  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3345  backup_s.dquant= 0;
3346  s->skipdct=1;
3347  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3348  &dmin, &next_block, mx, my);
3349  s->skipdct=0;
3350  }
3351  }
3352 
3353  s->current_picture.qscale_table[xy] = best_s.qscale;
3354 
3355  copy_context_after_encode(s, &best_s, -1);
3356 
3357  pb_bits_count= put_bits_count(&s->pb);
3358  flush_put_bits(&s->pb);
3359  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3360  s->pb= backup_s.pb;
3361 
3362  if(s->data_partitioning){
3363  pb2_bits_count= put_bits_count(&s->pb2);
3364  flush_put_bits(&s->pb2);
3365  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3366  s->pb2= backup_s.pb2;
3367 
3368  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3369  flush_put_bits(&s->tex_pb);
3370  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3371  s->tex_pb= backup_s.tex_pb;
3372  }
3373  s->last_bits= put_bits_count(&s->pb);
3374 
3375  if (CONFIG_H263_ENCODER &&
3376  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3378 
3379  if(next_block==0){ //FIXME 16 vs linesize16
3380  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3381  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3382  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3383  }
3384 
3385  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3386  ff_mpv_reconstruct_mb(s, s->block);
3387  } else {
3388  int motion_x = 0, motion_y = 0;
3389  s->mv_type=MV_TYPE_16X16;
3390  // only one MB-Type possible
3391 
3392  switch(mb_type){
3394  s->mv_dir = 0;
3395  s->mb_intra= 1;
3396  motion_x= s->mv[0][0][0] = 0;
3397  motion_y= s->mv[0][0][1] = 0;
3398  break;
3400  s->mv_dir = MV_DIR_FORWARD;
3401  s->mb_intra= 0;
3402  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3403  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3404  break;
3406  s->mv_dir = MV_DIR_FORWARD;
3407  s->mv_type = MV_TYPE_FIELD;
3408  s->mb_intra= 0;
3409  for(i=0; i<2; i++){
3410  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3411  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3412  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3413  }
3414  break;
3416  s->mv_dir = MV_DIR_FORWARD;
3417  s->mv_type = MV_TYPE_8X8;
3418  s->mb_intra= 0;
3419  for(i=0; i<4; i++){
3420  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3421  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3422  }
3423  break;
3425  if (CONFIG_MPEG4_ENCODER) {
3427  s->mb_intra= 0;
3428  motion_x=s->b_direct_mv_table[xy][0];
3429  motion_y=s->b_direct_mv_table[xy][1];
3430  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3431  }
3432  break;
3434  if (CONFIG_MPEG4_ENCODER) {
3436  s->mb_intra= 0;
3437  ff_mpeg4_set_direct_mv(s, 0, 0);
3438  }
3439  break;
3441  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3442  s->mb_intra= 0;
3443  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3444  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3445  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3446  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3447  break;
3449  s->mv_dir = MV_DIR_BACKWARD;
3450  s->mb_intra= 0;
3451  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3452  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3453  break;
3455  s->mv_dir = MV_DIR_FORWARD;
3456  s->mb_intra= 0;
3457  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3458  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3459  break;
3461  s->mv_dir = MV_DIR_FORWARD;
3462  s->mv_type = MV_TYPE_FIELD;
3463  s->mb_intra= 0;
3464  for(i=0; i<2; i++){
3465  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3466  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3467  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3468  }
3469  break;
3471  s->mv_dir = MV_DIR_BACKWARD;
3472  s->mv_type = MV_TYPE_FIELD;
3473  s->mb_intra= 0;
3474  for(i=0; i<2; i++){
3475  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3476  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3477  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3478  }
3479  break;
3481  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3482  s->mv_type = MV_TYPE_FIELD;
3483  s->mb_intra= 0;
3484  for(dir=0; dir<2; dir++){
3485  for(i=0; i<2; i++){
3486  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3487  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3488  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3489  }
3490  }
3491  break;
3492  default:
3493  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3494  }
3495 
3496  encode_mb(s, motion_x, motion_y);
3497 
3498  // RAL: Update last macroblock type
3499  s->last_mv_dir = s->mv_dir;
3500 
3501  if (CONFIG_H263_ENCODER &&
3502  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3504 
3505  ff_mpv_reconstruct_mb(s, s->block);
3506  }
3507 
3508  /* clean the MV table in IPS frames for direct mode in B-frames */
3509  if(s->mb_intra /* && I,P,S_TYPE */){
3510  s->p_mv_table[xy][0]=0;
3511  s->p_mv_table[xy][1]=0;
3512  }
3513 
3514  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3515  int w= 16;
3516  int h= 16;
3517 
3518  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3519  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3520 
3521  s->current_picture.encoding_error[0] += sse(
3522  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3523  s->dest[0], w, h, s->linesize);
3524  s->current_picture.encoding_error[1] += sse(
3525  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3526  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3527  s->current_picture.encoding_error[2] += sse(
3528  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3529  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3530  }
3531  if(s->loop_filter){
3532  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3534  }
3535  ff_dlog(s->avctx, "MB %d %d bits\n",
3536  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3537  }
3538  }
3539 
3540  //not beautiful here but we must write it before flushing so it has to be here
3541  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3543 
3544  write_slice_end(s);
3545 
3546 #if FF_API_RTP_CALLBACK
3548  /* Send the last GOB if RTP */
3549  if (s->avctx->rtp_callback) {
3550  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3551  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3552  /* Call the RTP callback to send the last GOB */
3553  emms_c();
3554  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3555  }
3557 #endif
3558 
3559  return 0;
3560 }
3561 
3562 #define MERGE(field) dst->field += src->field; src->field=0
3564  MERGE(me.scene_change_score);
3565  MERGE(me.mc_mb_var_sum_temp);
3566  MERGE(me.mb_var_sum_temp);
3567 }
3568 
3570  int i;
3571 
3572  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3573  MERGE(dct_count[1]);
3574  MERGE(mv_bits);
3575  MERGE(i_tex_bits);
3576  MERGE(p_tex_bits);
3577  MERGE(i_count);
3578  MERGE(f_count);
3579  MERGE(b_count);
3580  MERGE(skip_count);
3581  MERGE(misc_bits);
3582  MERGE(er.error_count);
3587 
3588  if (dst->noise_reduction){
3589  for(i=0; i<64; i++){
3590  MERGE(dct_error_sum[0][i]);
3591  MERGE(dct_error_sum[1][i]);
3592  }
3593  }
3594 
3595  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3596  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3597  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3598  flush_put_bits(&dst->pb);
3599 }
3600 
3601 static int estimate_qp(MpegEncContext *s, int dry_run){
3602  if (s->next_lambda){
3603  s->current_picture_ptr->f->quality =
3604  s->current_picture.f->quality = s->next_lambda;
3605  if(!dry_run) s->next_lambda= 0;
3606  } else if (!s->fixed_qscale) {
3607  int quality = ff_rate_estimate_qscale(s, dry_run);
3608  s->current_picture_ptr->f->quality =
3609  s->current_picture.f->quality = quality;
3610  if (s->current_picture.f->quality < 0)
3611  return -1;
3612  }
3613 
3614  if(s->adaptive_quant){
3615  switch(s->codec_id){
3616  case AV_CODEC_ID_MPEG4:
3619  break;
3620  case AV_CODEC_ID_H263:
3621  case AV_CODEC_ID_H263P:
3622  case AV_CODEC_ID_FLV1:
3623  if (CONFIG_H263_ENCODER)
3625  break;
3626  default:
3628  }
3629 
3630  s->lambda= s->lambda_table[0];
3631  //FIXME broken
3632  }else
3633  s->lambda = s->current_picture.f->quality;
3634  update_qscale(s);
3635  return 0;
3636 }
3637 
3638 /* must be called before writing the header */
3640  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3641  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3642 
3643  if(s->pict_type==AV_PICTURE_TYPE_B){
3644  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3645  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3646  }else{
3647  s->pp_time= s->time - s->last_non_b_time;
3648  s->last_non_b_time= s->time;
3649  av_assert1(s->picture_number==0 || s->pp_time > 0);
3650  }
3651 }
3652 
3654 {
3655  int i, ret;
3656  int bits;
3657  int context_count = s->slice_context_count;
3658 
3659  s->picture_number = picture_number;
3660 
3661  /* Reset the average MB variance */
3662  s->me.mb_var_sum_temp =
3663  s->me.mc_mb_var_sum_temp = 0;
3664 
3665  /* we need to initialize some time vars before we can encode B-frames */
3666  // RAL: Condition added for MPEG1VIDEO
3667  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3669  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3671 
3672  s->me.scene_change_score=0;
3673 
3674 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3675 
3676  if(s->pict_type==AV_PICTURE_TYPE_I){
3677  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3678  else s->no_rounding=0;
3679  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3680  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3681  s->no_rounding ^= 1;
3682  }
3683 
3684  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3685  if (estimate_qp(s,1) < 0)
3686  return -1;
3688  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3689  if(s->pict_type==AV_PICTURE_TYPE_B)
3690  s->lambda= s->last_lambda_for[s->pict_type];
3691  else
3692  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3693  update_qscale(s);
3694  }
3695 
3696  if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3697  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3698  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3699  s->q_chroma_intra_matrix = s->q_intra_matrix;
3700  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3701  }
3702 
3703  s->mb_intra=0; //for the rate distortion & bit compare functions
3704  for(i=1; i<context_count; i++){
3705  ret = ff_update_duplicate_context(s->thread_context[i], s);
3706  if (ret < 0)
3707  return ret;
3708  }
3709 
3710  if(ff_init_me(s)<0)
3711  return -1;
3712 
3713  /* Estimate motion for every MB */
3714  if(s->pict_type != AV_PICTURE_TYPE_I){
3715  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3716  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3717  if (s->pict_type != AV_PICTURE_TYPE_B) {
3718  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3719  s->me_pre == 2) {
3720  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3721  }
3722  }
3723 
3724  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3725  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3726  /* I-Frame */
3727  for(i=0; i<s->mb_stride*s->mb_height; i++)
3728  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3729 
3730  if(!s->fixed_qscale){
3731  /* finding spatial complexity for I-frame rate control */
3732  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3733  }
3734  }
3735  for(i=1; i<context_count; i++){
3736  merge_context_after_me(s, s->thread_context[i]);
3737  }
3738  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3739  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3740  emms_c();
3741 
3742  if (s->me.scene_change_score > s->scenechange_threshold &&
3743  s->pict_type == AV_PICTURE_TYPE_P) {
3744  s->pict_type= AV_PICTURE_TYPE_I;
3745  for(i=0; i<s->mb_stride*s->mb_height; i++)
3746  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3747  if(s->msmpeg4_version >= 3)
3748  s->no_rounding=1;
3749  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3750  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3751  }
3752 
3753  if(!s->umvplus){
3754  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3755  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3756 
3757  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3758  int a,b;
3759  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3760  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3761  s->f_code= FFMAX3(s->f_code, a, b);
3762  }
3763 
3765  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3766  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3767  int j;
3768  for(i=0; i<2; i++){
3769  for(j=0; j<2; j++)
3770  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3771  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3772  }
3773  }
3774  }
3775 
3776  if(s->pict_type==AV_PICTURE_TYPE_B){
3777  int a, b;
3778 
3779  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3780  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3781  s->f_code = FFMAX(a, b);
3782 
3783  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3784  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3785  s->b_code = FFMAX(a, b);
3786 
3787  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3788  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3789  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3790  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3791  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3792  int dir, j;
3793  for(dir=0; dir<2; dir++){
3794  for(i=0; i<2; i++){
3795  for(j=0; j<2; j++){
3798  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3799  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3800  }
3801  }
3802  }
3803  }
3804  }
3805  }
3806 
3807  if (estimate_qp(s, 0) < 0)
3808  return -1;
3809 
3810  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3811  s->pict_type == AV_PICTURE_TYPE_I &&
3812  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3813  s->qscale= 3; //reduce clipping problems
3814 
3815  if (s->out_format == FMT_MJPEG) {
3816  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3817  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3818 
3819  if (s->avctx->intra_matrix) {
3820  chroma_matrix =
3821  luma_matrix = s->avctx->intra_matrix;
3822  }
3823  if (s->avctx->chroma_intra_matrix)
3824  chroma_matrix = s->avctx->chroma_intra_matrix;
3825 
3826  /* for mjpeg, we do include qscale in the matrix */
3827  for(i=1;i<64;i++){
3828  int j = s->idsp.idct_permutation[i];
3829 
3830  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3831  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3832  }
3833  s->y_dc_scale_table=
3834  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3835  s->chroma_intra_matrix[0] =
3836  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3837  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3838  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3839  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3840  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3841  s->qscale= 8;
3842  }
3843  if(s->codec_id == AV_CODEC_ID_AMV){
3844  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3845  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3846  for(i=1;i<64;i++){
3847  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3848 
3849  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3850  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3851  }
3852  s->y_dc_scale_table= y;
3853  s->c_dc_scale_table= c;
3854  s->intra_matrix[0] = 13;
3855  s->chroma_intra_matrix[0] = 14;
3856  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3857  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3858  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3859  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3860  s->qscale= 8;
3861  }
3862 
3863  //FIXME var duplication
3864  s->current_picture_ptr->f->key_frame =
3865  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3866  s->current_picture_ptr->f->pict_type =
3867  s->current_picture.f->pict_type = s->pict_type;
3868 
3869  if (s->current_picture.f->key_frame)
3870  s->picture_in_gop_number=0;
3871 
3872  s->mb_x = s->mb_y = 0;
3873  s->last_bits= put_bits_count(&s->pb);
3874  switch(s->out_format) {
3875  case FMT_MJPEG:
3876  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3877  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3878  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3879  break;
3880  case FMT_H261:
3881  if (CONFIG_H261_ENCODER)
3883  break;
3884  case FMT_H263:
3885  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3887  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3889  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3891  if (ret < 0)
3892  return ret;
3893  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3895  if (ret < 0)
3896  return ret;
3897  }
3898  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3900  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3902  else if (CONFIG_H263_ENCODER)
3904  break;
3905  case FMT_MPEG1:
3908  break;
3909  default:
3910  av_assert0(0);
3911  }
3912  bits= put_bits_count(&s->pb);
3913  s->header_bits= bits - s->last_bits;
3914 
3915  for(i=1; i<context_count; i++){
3916  update_duplicate_context_after_me(s->thread_context[i], s);
3917  }
3918  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3919  for(i=1; i<context_count; i++){
3920  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3921  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3922  merge_context_after_encode(s, s->thread_context[i]);
3923  }
3924  emms_c();
3925  return 0;
3926 }
3927 
3928 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3929  const int intra= s->mb_intra;
3930  int i;
3931 
3932  s->dct_count[intra]++;
3933 
3934  for(i=0; i<64; i++){
3935  int level= block[i];
3936 
3937  if(level){
3938  if(level>0){
3939  s->dct_error_sum[intra][i] += level;
3940  level -= s->dct_offset[intra][i];
3941  if(level<0) level=0;
3942  }else{
3943  s->dct_error_sum[intra][i] -= level;
3944  level += s->dct_offset[intra][i];
3945  if(level>0) level=0;
3946  }
3947  block[i]= level;
3948  }
3949  }
3950 }
3951 
3953  int16_t *block, int n,
3954  int qscale, int *overflow){
3955  const int *qmat;
3956  const uint16_t *matrix;
3957  const uint8_t *scantable;
3958  const uint8_t *perm_scantable;
3959  int max=0;
3960  unsigned int threshold1, threshold2;
3961  int bias=0;
3962  int run_tab[65];
3963  int level_tab[65];
3964  int score_tab[65];
3965  int survivor[65];
3966  int survivor_count;
3967  int last_run=0;
3968  int last_level=0;
3969  int last_score= 0;
3970  int last_i;
3971  int coeff[2][64];
3972  int coeff_count[64];
3973  int qmul, qadd, start_i, last_non_zero, i, dc;
3974  const int esc_length= s->ac_esc_length;
3975  uint8_t * length;
3976  uint8_t * last_length;
3977  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3978  int mpeg2_qscale;
3979 
3980  s->fdsp.fdct(block);
3981 
3982  if(s->dct_error_sum)
3983  s->denoise_dct(s, block);
3984  qmul= qscale*16;
3985  qadd= ((qscale-1)|1)*8;
3986 
3987  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3988  else mpeg2_qscale = qscale << 1;
3989 
3990  if (s->mb_intra) {
3991  int q;
3992  scantable= s->intra_scantable.scantable;
3993  perm_scantable= s->intra_scantable.permutated;
3994  if (!s->h263_aic) {
3995  if (n < 4)
3996  q = s->y_dc_scale;
3997  else
3998  q = s->c_dc_scale;
3999  q = q << 3;
4000  } else{
4001  /* For AIC we skip quant/dequant of INTRADC */
4002  q = 1 << 3;
4003  qadd=0;
4004  }
4005 
4006  /* note: block[0] is assumed to be positive */
4007  block[0] = (block[0] + (q >> 1)) / q;
4008  start_i = 1;
4009  last_non_zero = 0;
4010  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4011  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4012  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4013  bias= 1<<(QMAT_SHIFT-1);
4014 
4015  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4016  length = s->intra_chroma_ac_vlc_length;
4017  last_length= s->intra_chroma_ac_vlc_last_length;
4018  } else {
4019  length = s->intra_ac_vlc_length;
4020  last_length= s->intra_ac_vlc_last_length;
4021  }
4022  } else {
4023  scantable= s->inter_scantable.scantable;
4024  perm_scantable= s->inter_scantable.permutated;
4025  start_i = 0;
4026  last_non_zero = -1;
4027  qmat = s->q_inter_matrix[qscale];
4028  matrix = s->inter_matrix;
4029  length = s->inter_ac_vlc_length;
4030  last_length= s->inter_ac_vlc_last_length;
4031  }
4032  last_i= start_i;
4033 
4034  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4035  threshold2= (threshold1<<1);
4036 
4037  for(i=63; i>=start_i; i--) {
4038  const int j = scantable[i];
4039  int level = block[j] * qmat[j];
4040 
4041  if(((unsigned)(level+threshold1))>threshold2){
4042  last_non_zero = i;
4043  break;
4044  }
4045  }
4046 
4047  for(i=start_i; i<=last_non_zero; i++) {
4048  const int j = scantable[i];
4049  int level = block[j] * qmat[j];
4050 
4051 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4052 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4053  if(((unsigned)(level+threshold1))>threshold2){
4054  if(level>0){
4055  level= (bias + level)>>QMAT_SHIFT;
4056  coeff[0][i]= level;
4057  coeff[1][i]= level-1;
4058 // coeff[2][k]= level-2;
4059  }else{
4060  level= (bias - level)>>QMAT_SHIFT;
4061  coeff[0][i]= -level;
4062  coeff[1][i]= -level+1;
4063 // coeff[2][k]= -level+2;
4064  }
4065  coeff_count[i]= FFMIN(level, 2);
4066  av_assert2(coeff_count[i]);
4067  max |=level;
4068  }else{
4069  coeff[0][i]= (level>>31)|1;
4070  coeff_count[i]= 1;
4071  }
4072  }
4073 
4074  *overflow= s->max_qcoeff < max; //overflow might have happened
4075 
4076  if(last_non_zero < start_i){
4077  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4078  return last_non_zero;
4079  }
4080 
4081  score_tab[start_i]= 0;
4082  survivor[0]= start_i;
4083  survivor_count= 1;
4084 
4085  for(i=start_i; i<=last_non_zero; i++){
4086  int level_index, j, zero_distortion;
4087  int dct_coeff= FFABS(block[ scantable[i] ]);
4088  int best_score=256*256*256*120;
4089 
4090  if (s->fdsp.fdct == ff_fdct_ifast)
4091  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4092  zero_distortion= dct_coeff*dct_coeff;
4093 
4094  for(level_index=0; level_index < coeff_count[i]; level_index++){
4095  int distortion;
4096  int level= coeff[level_index][i];
4097  const int alevel= FFABS(level);
4098  int unquant_coeff;
4099 
4100  av_assert2(level);
4101 
4102  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4103  unquant_coeff= alevel*qmul + qadd;
4104  } else if(s->out_format == FMT_MJPEG) {
4105  j = s->idsp.idct_permutation[scantable[i]];
4106  unquant_coeff = alevel * matrix[j] * 8;
4107  }else{ // MPEG-1
4108  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4109  if(s->mb_intra){
4110  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4111  unquant_coeff = (unquant_coeff - 1) | 1;
4112  }else{
4113  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4114  unquant_coeff = (unquant_coeff - 1) | 1;
4115  }
4116  unquant_coeff<<= 3;
4117  }
4118 
4119  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4120  level+=64;
4121  if((level&(~127)) == 0){
4122  for(j=survivor_count-1; j>=0; j--){
4123  int run= i - survivor[j];
4124  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4125  score += score_tab[i-run];
4126 
4127  if(score < best_score){
4128  best_score= score;
4129  run_tab[i+1]= run;
4130  level_tab[i+1]= level-64;
4131  }
4132  }
4133 
4134  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4135  for(j=survivor_count-1; j>=0; j--){
4136  int run= i - survivor[j];
4137  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4138  score += score_tab[i-run];
4139  if(score < last_score){
4140  last_score= score;
4141  last_run= run;
4142  last_level= level-64;
4143  last_i= i+1;
4144  }
4145  }
4146  }
4147  }else{
4148  distortion += esc_length*lambda;
4149  for(j=survivor_count-1; j>=0; j--){
4150  int run= i - survivor[j];
4151  int score= distortion + score_tab[i-run];
4152 
4153  if(score < best_score){
4154  best_score= score;
4155  run_tab[i+1]= run;
4156  level_tab[i+1]= level-64;
4157  }
4158  }
4159 
4160  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4161  for(j=survivor_count-1; j>=0; j--){
4162  int run= i - survivor[j];
4163  int score= distortion + score_tab[i-run];
4164  if(score < last_score){
4165  last_score= score;
4166  last_run= run;
4167  last_level= level-64;
4168  last_i= i+1;
4169  }
4170  }
4171  }
4172  }
4173  }
4174 
4175  score_tab[i+1]= best_score;
4176 
4177  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4178  if(last_non_zero <= 27){
4179  for(; survivor_count; survivor_count--){
4180  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4181  break;
4182  }
4183  }else{
4184  for(; survivor_count; survivor_count--){
4185  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4186  break;
4187  }
4188  }
4189 
4190  survivor[ survivor_count++ ]= i+1;
4191  }
4192 
4193  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4194  last_score= 256*256*256*120;
4195  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4196  int score= score_tab[i];
4197  if (i)
4198  score += lambda * 2; // FIXME more exact?
4199 
4200  if(score < last_score){
4201  last_score= score;
4202  last_i= i;
4203  last_level= level_tab[i];
4204  last_run= run_tab[i];
4205  }
4206  }
4207  }
4208 
4209  s->coded_score[n] = last_score;
4210 
4211  dc= FFABS(block[0]);
4212  last_non_zero= last_i - 1;
4213  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4214 
4215  if(last_non_zero < start_i)
4216  return last_non_zero;
4217 
4218  if(last_non_zero == 0 && start_i == 0){
4219  int best_level= 0;
4220  int best_score= dc * dc;
4221 
4222  for(i=0; i<coeff_count[0]; i++){
4223  int level= coeff[i][0];
4224  int alevel= FFABS(level);
4225  int unquant_coeff, score, distortion;
4226 
4227  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4228  unquant_coeff= (alevel*qmul + qadd)>>3;
4229  } else{ // MPEG-1
4230  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4231  unquant_coeff = (unquant_coeff - 1) | 1;
4232  }
4233  unquant_coeff = (unquant_coeff + 4) >> 3;
4234  unquant_coeff<<= 3 + 3;
4235 
4236  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4237  level+=64;
4238  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4239  else score= distortion + esc_length*lambda;
4240 
4241  if(score < best_score){
4242  best_score= score;
4243  best_level= level - 64;
4244  }
4245  }
4246  block[0]= best_level;
4247  s->coded_score[n] = best_score - dc*dc;
4248  if(best_level == 0) return -1;
4249  else return last_non_zero;
4250  }
4251 
4252  i= last_i;
4253  av_assert2(last_level);
4254 
4255  block[ perm_scantable[last_non_zero] ]= last_level;
4256  i -= last_run + 1;
4257 
4258  for(; i>start_i; i -= run_tab[i] + 1){
4259  block[ perm_scantable[i-1] ]= level_tab[i];
4260  }
4261 
4262  return last_non_zero;
4263 }
4264 
4265 static int16_t basis[64][64];
4266 
4267 static void build_basis(uint8_t *perm){
4268  int i, j, x, y;
4269  emms_c();
4270  for(i=0; i<8; i++){
4271  for(j=0; j<8; j++){
4272  for(y=0; y<8; y++){
4273  for(x=0; x<8; x++){
4274  double s= 0.25*(1<<BASIS_SHIFT);
4275  int index= 8*i + j;
4276  int perm_index= perm[index];
4277  if(i==0) s*= sqrt(0.5);
4278  if(j==0) s*= sqrt(0.5);
4279  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4280  }
4281  }
4282  }
4283  }
4284 }
4285 
4286 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4287  int16_t *block, int16_t *weight, int16_t *orig,
4288  int n, int qscale){
4289  int16_t rem[64];
4290  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4291  const uint8_t *scantable;
4292  const uint8_t *perm_scantable;
4293 // unsigned int threshold1, threshold2;
4294 // int bias=0;
4295  int run_tab[65];
4296  int prev_run=0;
4297  int prev_level=0;
4298  int qmul, qadd, start_i, last_non_zero, i, dc;
4299  uint8_t * length;
4300  uint8_t * last_length;
4301  int lambda;
4302  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4303 
4304  if(basis[0][0] == 0)
4305  build_basis(s->idsp.idct_permutation);
4306 
4307  qmul= qscale*2;
4308  qadd= (qscale-1)|1;
4309  if (s->mb_intra) {
4310  scantable= s->intra_scantable.scantable;
4311  perm_scantable= s->intra_scantable.permutated;
4312  if (!s->h263_aic) {
4313  if (n < 4)
4314  q = s->y_dc_scale;
4315  else
4316  q = s->c_dc_scale;
4317  } else{
4318  /* For AIC we skip quant/dequant of INTRADC */
4319  q = 1;
4320  qadd=0;
4321  }
4322  q <<= RECON_SHIFT-3;
4323  /* note: block[0] is assumed to be positive */
4324  dc= block[0]*q;
4325 // block[0] = (block[0] + (q >> 1)) / q;
4326  start_i = 1;
4327 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4328 // bias= 1<<(QMAT_SHIFT-1);
4329  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4330  length = s->intra_chroma_ac_vlc_length;
4331  last_length= s->intra_chroma_ac_vlc_last_length;
4332  } else {
4333  length = s->intra_ac_vlc_length;
4334  last_length= s->intra_ac_vlc_last_length;
4335  }
4336  } else {
4337  scantable= s->inter_scantable.scantable;
4338  perm_scantable= s->inter_scantable.permutated;
4339  dc= 0;
4340  start_i = 0;
4341  length = s->inter_ac_vlc_length;
4342  last_length= s->inter_ac_vlc_last_length;
4343  }
4344  last_non_zero = s->block_last_index[n];
4345 
4346  dc += (1<<(RECON_SHIFT-1));
4347  for(i=0; i<64; i++){
4348  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4349  }
4350 
4351  sum=0;
4352  for(i=0; i<64; i++){
4353  int one= 36;
4354  int qns=4;
4355  int w;
4356 
4357  w= FFABS(weight[i]) + qns*one;
4358  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4359 
4360  weight[i] = w;
4361 // w=weight[i] = (63*qns + (w/2)) / w;
4362 
4363  av_assert2(w>0);
4364  av_assert2(w<(1<<6));
4365  sum += w*w;
4366  }
4367  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4368 
4369  run=0;
4370  rle_index=0;
4371  for(i=start_i; i<=last_non_zero; i++){
4372  int j= perm_scantable[i];
4373  const int level= block[j];
4374  int coeff;
4375 
4376  if(level){
4377  if(level<0) coeff= qmul*level - qadd;
4378  else coeff= qmul*level + qadd;
4379  run_tab[rle_index++]=run;
4380  run=0;
4381 
4382  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4383  }else{
4384  run++;
4385  }
4386  }
4387 
4388  for(;;){
4389  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4390  int best_coeff=0;
4391  int best_change=0;
4392  int run2, best_unquant_change=0, analyze_gradient;
4393  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4394 
4395  if(analyze_gradient){
4396  for(i=0; i<64; i++){
4397  int w= weight[i];
4398 
4399  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4400  }
4401  s->fdsp.fdct(d1);
4402  }
4403 
4404  if(start_i){
4405  const int level= block[0];
4406  int change, old_coeff;
4407 
4408  av_assert2(s->mb_intra);
4409 
4410  old_coeff= q*level;
4411 
4412  for(change=-1; change<=1; change+=2){
4413  int new_level= level + change;
4414  int score, new_coeff;
4415 
4416  new_coeff= q*new_level;
4417  if(new_coeff >= 2048 || new_coeff < 0)
4418  continue;
4419 
4420  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4421  new_coeff - old_coeff);
4422  if(score<best_score){
4423  best_score= score;
4424  best_coeff= 0;
4425  best_change= change;
4426  best_unquant_change= new_coeff - old_coeff;
4427  }
4428  }
4429  }
4430 
4431  run=0;
4432  rle_index=0;
4433  run2= run_tab[rle_index++];
4434  prev_level=0;
4435  prev_run=0;
4436 
4437  for(i=start_i; i<64; i++){
4438  int j= perm_scantable[i];
4439  const int level= block[j];
4440  int change, old_coeff;
4441 
4442  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4443  break;
4444 
4445  if(level){
4446  if(level<0) old_coeff= qmul*level - qadd;
4447  else old_coeff= qmul*level + qadd;
4448  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4449  }else{
4450  old_coeff=0;
4451  run2--;
4452  av_assert2(run2>=0 || i >= last_non_zero );
4453  }
4454 
4455  for(change=-1; change<=1; change+=2){
4456  int new_level= level + change;
4457  int score, new_coeff, unquant_change;
4458 
4459  score=0;
4460  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4461  continue;
4462 
4463  if(new_level){
4464  if(new_level<0) new_coeff= qmul*new_level - qadd;
4465  else new_coeff= qmul*new_level + qadd;
4466  if(new_coeff >= 2048 || new_coeff <= -2048)
4467  continue;
4468  //FIXME check for overflow
4469 
4470  if(level){
4471  if(level < 63 && level > -63){
4472  if(i < last_non_zero)
4473  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4474  - length[UNI_AC_ENC_INDEX(run, level+64)];
4475  else
4476  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4477  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4478  }
4479  }else{
4480  av_assert2(FFABS(new_level)==1);
4481 
4482  if(analyze_gradient){
4483  int g= d1[ scantable[i] ];
4484  if(g && (g^new_level) >= 0)
4485  continue;
4486  }
4487 
4488  if(i < last_non_zero){
4489  int next_i= i + run2 + 1;
4490  int next_level= block[ perm_scantable[next_i] ] + 64;
4491 
4492  if(next_level&(~127))
4493  next_level= 0;
4494 
4495  if(next_i < last_non_zero)
4496  score += length[UNI_AC_ENC_INDEX(run, 65)]
4497  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4498  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4499  else
4500  score += length[UNI_AC_ENC_INDEX(run, 65)]
4501  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4502  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4503  }else{
4504  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4505  if(prev_level){
4506  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4507  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4508  }
4509  }
4510  }
4511  }else{
4512  new_coeff=0;
4513  av_assert2(FFABS(level)==1);
4514 
4515  if(i < last_non_zero){
4516  int next_i= i + run2 + 1;
4517  int next_level= block[ perm_scantable[next_i] ] + 64;
4518 
4519  if(next_level&(~127))
4520  next_level= 0;
4521 
4522  if(next_i < last_non_zero)
4523  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4524  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4525  - length[UNI_AC_ENC_INDEX(run, 65)];
4526  else
4527  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4528  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4529  - length[UNI_AC_ENC_INDEX(run, 65)];
4530  }else{
4531  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4532  if(prev_level){
4533  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4534  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4535  }
4536  }
4537  }
4538 
4539  score *= lambda;
4540 
4541  unquant_change= new_coeff - old_coeff;
4542  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4543 
4544  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4545  unquant_change);
4546  if(score<best_score){
4547  best_score= score;
4548  best_coeff= i;
4549  best_change= change;
4550  best_unquant_change= unquant_change;
4551  }
4552  }
4553  if(level){
4554  prev_level= level + 64;
4555  if(prev_level&(~127))
4556  prev_level= 0;
4557  prev_run= run;
4558  run=0;
4559  }else{
4560  run++;
4561  }
4562  }
4563 
4564  if(best_change){
4565  int j= perm_scantable[ best_coeff ];
4566 
4567  block[j] += best_change;
4568 
4569  if(best_coeff > last_non_zero){
4570  last_non_zero= best_coeff;
4571  av_assert2(block[j]);
4572  }else{
4573  for(; last_non_zero>=start_i; last_non_zero--){
4574  if(block[perm_scantable[last_non_zero]])
4575  break;
4576  }
4577  }
4578 
4579  run=0;
4580  rle_index=0;
4581  for(i=start_i; i<=last_non_zero; i++){
4582  int j= perm_scantable[i];
4583  const int level= block[j];
4584 
4585  if(level){
4586  run_tab[rle_index++]=run;
4587  run=0;
4588  }else{
4589  run++;
4590  }
4591  }
4592 
4593  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4594  }else{
4595  break;
4596  }
4597  }
4598 
4599  return last_non_zero;
4600 }
4601 
4602 /**
4603  * Permute an 8x8 block according to permutation.
4604  * @param block the block which will be permuted according to
4605  * the given permutation vector
4606  * @param permutation the permutation vector
4607  * @param last the last non zero coefficient in scantable order, used to
4608  * speed the permutation up
4609  * @param scantable the used scantable, this is only used to speed the
4610  * permutation up, the block is not (inverse) permutated
4611  * to scantable order!
4612  */
4613 void ff_block_permute(int16_t *block, uint8_t *permutation,
4614  const uint8_t *scantable, int last)
4615 {
4616  int i;
4617  int16_t temp[64];
4618 
4619  if (last <= 0)
4620  return;
4621  //FIXME it is ok but not clean and might fail for some permutations
4622  // if (permutation[1] == 1)
4623  // return;
4624 
4625  for (i = 0; i <= last; i++) {
4626  const int j = scantable[i];
4627  temp[j] = block[j];
4628  block[j] = 0;
4629  }
4630 
4631  for (i = 0; i <= last; i++) {
4632  const int j = scantable[i];
4633  const int perm_j = permutation[j];
4634  block[perm_j] = temp[j];
4635  }
4636 }
4637 
4639  int16_t *block, int n,
4640  int qscale, int *overflow)
4641 {
4642  int i, j, level, last_non_zero, q, start_i;
4643  const int *qmat;
4644  const uint8_t *scantable;
4645  int bias;
4646  int max=0;
4647  unsigned int threshold1, threshold2;
4648 
4649  s->fdsp.fdct(block);
4650 
4651  if(s->dct_error_sum)
4652  s->denoise_dct(s, block);
4653 
4654  if (s->mb_intra) {
4655  scantable= s->intra_scantable.scantable;
4656  if (!s->h263_aic) {
4657  if (n < 4)
4658  q = s->y_dc_scale;
4659  else
4660  q = s->c_dc_scale;
4661  q = q << 3;
4662  } else
4663  /* For AIC we skip quant/dequant of INTRADC */
4664  q = 1 << 3;
4665 
4666  /* note: block[0] is assumed to be positive */
4667  block[0] = (block[0] + (q >> 1)) / q;
4668  start_i = 1;
4669  last_non_zero = 0;
4670  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4671  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4672  } else {
4673  scantable= s->inter_scantable.scantable;
4674  start_i = 0;
4675  last_non_zero = -1;
4676  qmat = s->q_inter_matrix[qscale];
4677  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4678  }
4679  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4680  threshold2= (threshold1<<1);
4681  for(i=63;i>=start_i;i--) {
4682  j = scantable[i];
4683  level = block[j] * qmat[j];
4684 
4685  if(((unsigned)(level+threshold1))>threshold2){
4686  last_non_zero = i;
4687  break;
4688  }else{
4689  block[j]=0;
4690  }
4691  }
4692  for(i=start_i; i<=last_non_zero; i++) {
4693  j = scantable[i];
4694  level = block[j] * qmat[j];
4695 
4696 // if( bias+level >= (1<<QMAT_SHIFT)
4697 // || bias-level >= (1<<QMAT_SHIFT)){
4698  if(((unsigned)(level+threshold1))>threshold2){
4699  if(level>0){
4700  level= (bias + level)>>QMAT_SHIFT;
4701  block[j]= level;
4702  }else{
4703  level= (bias - level)>>QMAT_SHIFT;
4704  block[j]= -level;
4705  }
4706  max |=level;
4707  }else{
4708  block[j]=0;
4709  }
4710  }
4711  *overflow= s->max_qcoeff < max; //overflow might have happened
4712 
4713  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4714  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4715  ff_block_permute(block, s->idsp.idct_permutation,
4716  scantable, last_non_zero);
4717 
4718  return last_non_zero;
4719 }
4720 
4721 #define OFFSET(x) offsetof(MpegEncContext, x)
4722 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4723 static const AVOption h263_options[] = {
4724  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4725  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4727  { NULL },
4728 };
4729 
4730 static const AVClass h263_class = {
4731  .class_name = "H.263 encoder",
4732  .item_name = av_default_item_name,
4733  .option = h263_options,
4734  .version = LIBAVUTIL_VERSION_INT,
4735 };
4736 
4738  .name = "h263",
4739  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4740  .type = AVMEDIA_TYPE_VIDEO,
4741  .id = AV_CODEC_ID_H263,
4742  .priv_data_size = sizeof(MpegEncContext),
4744  .encode2 = ff_mpv_encode_picture,
4745  .close = ff_mpv_encode_end,
4746  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4748  .priv_class = &h263_class,
4749 };
4750 
4751 static const AVOption h263p_options[] = {
4752  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4753  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4754  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4755  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4757  { NULL },
4758 };
4759 static const AVClass h263p_class = {
4760  .class_name = "H.263p encoder",
4761  .item_name = av_default_item_name,
4762  .option = h263p_options,
4763  .version = LIBAVUTIL_VERSION_INT,
4764 };
4765 
4767  .name = "h263p",
4768  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4769  .type = AVMEDIA_TYPE_VIDEO,
4770  .id = AV_CODEC_ID_H263P,
4771  .priv_data_size = sizeof(MpegEncContext),
4773  .encode2 = ff_mpv_encode_picture,
4774  .close = ff_mpv_encode_end,
4775  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4776  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4778  .priv_class = &h263p_class,
4779 };
4780 
4781 static const AVClass msmpeg4v2_class = {
4782  .class_name = "msmpeg4v2 encoder",
4783  .item_name = av_default_item_name,
4784  .option = ff_mpv_generic_options,
4785  .version = LIBAVUTIL_VERSION_INT,
4786 };
4787 
4789  .name = "msmpeg4v2",
4790  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4791  .type = AVMEDIA_TYPE_VIDEO,
4792  .id = AV_CODEC_ID_MSMPEG4V2,
4793  .priv_data_size = sizeof(MpegEncContext),
4795  .encode2 = ff_mpv_encode_picture,
4796  .close = ff_mpv_encode_end,
4797  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4799  .priv_class = &msmpeg4v2_class,
4800 };
4801 
4802 static const AVClass msmpeg4v3_class = {
4803  .class_name = "msmpeg4v3 encoder",
4804  .item_name = av_default_item_name,
4805  .option = ff_mpv_generic_options,
4806  .version = LIBAVUTIL_VERSION_INT,
4807 };
4808 
4810  .name = "msmpeg4",
4811  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4812  .type = AVMEDIA_TYPE_VIDEO,
4813  .id = AV_CODEC_ID_MSMPEG4V3,
4814  .priv_data_size = sizeof(MpegEncContext),
4816  .encode2 = ff_mpv_encode_picture,
4817  .close = ff_mpv_encode_end,
4818  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4820  .priv_class = &msmpeg4v3_class,
4821 };
4822 
4823 static const AVClass wmv1_class = {
4824  .class_name = "wmv1 encoder",
4825  .item_name = av_default_item_name,
4826  .option = ff_mpv_generic_options,
4827  .version = LIBAVUTIL_VERSION_INT,
4828 };
4829 
4831  .name = "wmv1",
4832  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4833  .type = AVMEDIA_TYPE_VIDEO,
4834  .id = AV_CODEC_ID_WMV1,
4835  .priv_data_size = sizeof(MpegEncContext),
4837  .encode2 = ff_mpv_encode_picture,
4838  .close = ff_mpv_encode_end,
4839  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4841  .priv_class = &wmv1_class,
4842 };
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:348
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:890
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:73
MpegEncContext::mb_skipped
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
AVCodec
AVCodec.
Definition: codec.h:190
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
h263data.h
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:209
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:404
ARCH_X86
#define ARCH_X86
Definition: config.h:38
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3639
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:422
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:206
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:42
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:124
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_wmv1_encoder
AVCodec ff_wmv1_encoder
Definition: mpegvideo_enc.c:4830
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1362
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:239
AVCodecContext::mpeg_quant
attribute_deprecated int mpeg_quant
Definition: avcodec.h:821
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1336
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:644
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1594
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1411
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1651
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:728
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2574
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:275
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2720
MAX_RUN
#define MAX_RUN
Definition: rl.h:35
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4613
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4265
ff_mjpeg_encode_picture_header
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
Definition: mjpegenc_common.c:248
CONFIG_H263_ENCODER
#define CONFIG_H263_ENCODER
Definition: config.h:1293
encode_frame
static int encode_frame(AVCodecContext *c, AVFrame *frame)
Definition: mpegvideo_enc.c:1342
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2762
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1726
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:264
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:405
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
pixdesc.h
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:376
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:114
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:465
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo_enc.c:1142
MpegEncContext::f_count
int f_count
Definition: mpegvideo.h:349
AVOption
AVOption.
Definition: opt.h:246
ff_mpv_generic_options
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:85
b
#define b
Definition: input.c:41
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:116
data
const char data[16]
Definition: mxf.c:91
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:213
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
AVCodecContext::p_tex_bits
attribute_deprecated int p_tex_bits
Definition: avcodec.h:1525
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:227
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:71
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:121
AVCodecContext::skip_count
attribute_deprecated int skip_count
Definition: avcodec.h:1531
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1909
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
CONFIG_MPEG2VIDEO_ENCODER
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1303
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
ff_h261_encode_init
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2272
max
#define max(a, b)
Definition: cuda_runtime.h:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
mathematics.h
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
Picture
Picture.
Definition: mpegpicture.h:45
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:107
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2743
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:151
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1992
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1375
AVCodecContext::frame_skip_threshold
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:1455
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:279
ff_set_cmp
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:474
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:682
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
thread.h
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1593
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:588
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
CONFIG_H263P_ENCODER
#define CONFIG_H263P_ENCODER
Definition: config.h:1294
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:52
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:115
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:426
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:48
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:940
AVCodecContext::frame_bits
attribute_deprecated int frame_bits
Definition: avcodec.h:1537
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1819
sp5x.h
AVCodecContext::pre_me
attribute_deprecated int pre_me
Definition: avcodec.h:966
OFFSET
#define OFFSET(x)
Definition: mpegvideo_enc.c:4721
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3601
AVCodecContext::prediction_method
attribute_deprecated int prediction_method
Definition: avcodec.h:885
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
FDCTDSPContext
Definition: fdctdsp.h:26
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:874
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
av_packet_add_side_data
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:298
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1796
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3563
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:108
fail
#define fail()
Definition: checkasm.h:123
h261.h
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:112
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc_common.c:539
MpegEncContext::padding_bug_score
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:411
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:11135
get_intra_count
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1119
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:555
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1035
avcodec_find_encoder
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:914
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
perm
perm
Definition: f_perms.c:74
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:584
MpegEncContext::umvplus
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
pts
static int64_t pts
Definition: transcode_aac.c:647
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:64
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:304
ff_sqrt
#define ff_sqrt
Definition: mathops.h:206
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:106
h263_options
static const AVOption h263_options[]
Definition: mpegvideo_enc.c:4723
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:465
sp5x_quant_table
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
flv.h
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:269
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:476
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2875
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:283
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg4videoenc.c:1062
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:346
AVCodecContext::p_count
attribute_deprecated int p_count
Definition: avcodec.h:1529
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1131
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1747
RateControlContext
rate control context.
Definition: ratecontrol.h:63
mpeg12.h
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:218
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2852
av_cold
#define av_cold
Definition: attributes.h:90
dct.h
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
ff_h261_get_picture_format
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:83
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4267
CONFIG_RV10_ENCODER
#define CONFIG_RV10_ENCODER
Definition: config.h:1323
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
width
#define width
emms_c
#define emms_c()
Definition: internal.h:55
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:47
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
s
#define s(width, name)
Definition: cbs_vp9.c:257
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:79
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:484
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:299
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
ff_mpeg2_dc_scale_table
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:261
g
const char * g
Definition: vf_curves.c:115
MpegEncContext::mb_skip_run
int mb_skip_run
Definition: mpegvideo.h:289
msmpeg4v3_class
static const AVClass msmpeg4v3_class
Definition: mpegvideo_enc.c:4802
sse
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2699
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
HUFFMAN_TABLE_OPTIMAL
@ HUFFMAN_TABLE_OPTIMAL
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
AVCodecContext::mv_bits
attribute_deprecated int mv_bits
Definition: avcodec.h:1519
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
bits
uint8_t bits
Definition: vp3data.h:202
FMT_H261
@ FMT_H261
Definition: mpegutils.h:125
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVCodecContext::brd_scale
attribute_deprecated int brd_scale
Definition: avcodec.h:1099
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:202
limits.h
ff_check_alignment
int ff_check_alignment(void)
Definition: me_cmp.c:1014
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:63
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
MpegEncContext::b_count
int b_count
Definition: mpegvideo.h:350
CONFIG_WMV2_ENCODER
#define CONFIG_WMV2_ENCODER
Definition: config.h:1340
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:864
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1404
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1709
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:448
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
PutBitContext
Definition: put_bits.h:35
Picture::encoding_error
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2789
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:87
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1389
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
AVCPBProperties::avg_bitrate
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:472
CONFIG_MPEG4_ENCODER
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1304
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:451
h263_class
static const AVClass h263_class
Definition: mpegvideo_enc.c:4730
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:409
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:38
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:261
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2832
run
uint8_t run
Definition: svq3.c:208
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:288
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:238
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
me
#define me
Definition: vf_colormatrix.c:104
AVCodecContext::i_tex_bits
attribute_deprecated int i_tex_bits
Definition: avcodec.h:1523
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:33
AVCodecContext::misc_bits
attribute_deprecated int misc_bits
Definition: avcodec.h:1533
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:576
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:56
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:114
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
src
#define src
Definition: vp8dsp.c:254
ff_msmpeg4v3_encoder
AVCodec ff_msmpeg4v3_encoder
Definition: mpegvideo_enc.c:4809
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2336
mathops.h
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:344
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:338
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3562
AVCodecContext::b_frame_strategy
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:800
AVCodecContext::noise_reduction
attribute_deprecated int noise_reduction
Definition: avcodec.h:1044
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1067
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1015
qpeldsp.h
avcodec_open2
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:548
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1475
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:66
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:236
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:1978
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:127
wmv2.h
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:266
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1560
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
avpriv_copy_bits
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:254
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3928
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:358
CONFIG_RV20_ENCODER
#define CONFIG_RV20_ENCODER
Definition: config.h:1324
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
avpriv_align_put_bits
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:649
get_sae
static int get_sae(uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1105
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:287
desc
const char * desc
Definition: nvenc.c:79
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
AVCodecContext::vbv_delay
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2027
CONFIG_MPEG1VIDEO_ENCODER
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1302
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3569
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:290
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:71
VE
#define VE
Definition: mpegvideo_enc.c:4722
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
AVPacket::size
int size
Definition: packet.h:356
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:721
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:204
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1150
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
CONFIG_H261_ENCODER
#define CONFIG_H261_ENCODER
Definition: config.h:1292
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2080
CONFIG_MSMPEG4_ENCODER
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:412
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:115
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:38
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:207
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:333
AVCodecContext::frame_skip_exp
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:1463
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
size
int size
Definition: twinvq_data.h:11134
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVCodecContext::rtp_payload_size
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:1508
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:149
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:119
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:1028
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
MpegEncContext::interlaced_dct
int interlaced_dct
Definition: mpegvideo.h:491
CONFIG_FAANDCT
#define CONFIG_FAANDCT
Definition: config.h:626
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:354
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
Definition: mpegvideo_enc.c:2194
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
ff_msmpeg4_encode_init
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:265
AVCPBProperties::max_bitrate
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:454
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
MpegEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:128
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:150
rv10.h
AVCodecContext::i_count
attribute_deprecated int i_count
Definition: avcodec.h:1527
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1592
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:214
av_log2
#define av_log2
Definition: intmath.h:83
M_PI
#define M_PI
Definition: mathematics.h:52
r
#define r
Definition: input.c:40
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:38
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:174
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:485
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:740
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:490
ff_msmpeg4v2_encoder
AVCodec ff_msmpeg4v2_encoder
Definition: mpegvideo_enc.c:4788
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2648
src1
#define src1
Definition: h264pred.c:139
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::last_mv
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:213
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:489
FMT_H263
@ FMT_H263
Definition: mpegutils.h:126
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:54
AVCodecContext::b_sensitivity
attribute_deprecated int b_sensitivity
Definition: avcodec.h:1132
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:483
lrintf
#define lrintf(x)
Definition: libm_mips.h:70
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:67
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3952
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2911
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:33
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1366
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1052
MpegEncContext::esc3_level_length
int esc3_level_length
Definition: mpegvideo.h:440
MpegEncContext::obmc
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:408
AVCodecContext::frame_skip_cmp
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:1467
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:288
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:55
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AVCodecContext::header_bits
attribute_deprecated int header_bits
Definition: avcodec.h:1521
get_visual_weight
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2170
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:941
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
h263p_class
static const AVClass h263p_class
Definition: mpegvideo_enc.c:4759
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1323
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:566
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
AVCPBProperties::min_bitrate
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:463
AVCodecContext::height
int height
Definition: avcodec.h:699
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:392
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:109
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:127
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
MpegEncContext::h263_slice_structured
int h263_slice_structured
Definition: mpegvideo.h:377
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:755
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:343
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
CONFIG_MJPEG_ENCODER
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1301
AVCPBProperties::buffer_size
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:481
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1589
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:117
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_mjpeg_encode_close
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:126
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:591
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:231
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
AVCodecContext::scenechange_threshold
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:1040
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1700
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:210
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:82
Picture::shared
int shared
Definition: mpegpicture.h:88
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:68
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:466
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:351
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mpeg4video.h
MpegEncContext::last_bits
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1368
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
MpegEncContext::gop_picture_number
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:451
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1500
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1676
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2253
temp
else temp
Definition: vf_mcdeint.c:256
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:74
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:110
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1017
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
shift
static int shift(int a, int b)
Definition: sonic.c:82
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:54
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:786
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:582
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:90
packet_internal.h
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1037
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
skip_check
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
Definition: mpegvideo_enc.c:1301
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:288
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:576
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1418
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4286
ff_h263p_encoder
AVCodec ff_h263p_encoder
Definition: mpegvideo_enc.c:4766
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:589
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
FF_CMP_DCTMAX
#define FF_CMP_DCTMAX
Definition: avcodec.h:944
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:131
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1177
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1016
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2136
libxvid.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:64
encode_picture
static int encode_picture(MpegEncContext *s, int picture_number)
Definition: mpegvideo_enc.c:3653
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
AVCodecContext::me_penalty_compensation
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:1087
bytestream.h
wmv1_class
static const AVClass wmv1_class
Definition: mpegvideo_enc.c:4823
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:590
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:587
FF_ALLOCZ_OR_GOTO
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2609
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:65
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
h
h
Definition: vp9dsp_template.c:2038
CONFIG_FLV_ENCODER
#define CONFIG_FLV_ENCODER
Definition: config.h:1290
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:154
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2581
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:70
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4638
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
int
int
Definition: ffmpeg_filter.c:192
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:418
msmpeg4v2_class
static const AVClass msmpeg4v2_class
Definition: mpegvideo_enc.c:4781
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:616
AVCodecContext::frame_skip_factor
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:1459
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:81
mb_info
Definition: cinepakenc.c:87
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:905
MpegEncContext::alt_inter_vlc
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:347
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
pixblockdsp.h
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:111
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
h263p_options
static const AVOption h263p_options[]
Definition: mpegvideo_enc.c:4751
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2814
ff_h263_encoder
AVCodec ff_h263_encoder
Definition: mpegvideo_enc.c:4737
intmath.h