FFmpeg  2.6.9
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include <stdint.h>
31 
32 #include "libavutil/internal.h"
33 #include "libavutil/intmath.h"
34 #include "libavutil/mathematics.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/timer.h"
38 #include "avcodec.h"
39 #include "dct.h"
40 #include "idctdsp.h"
41 #include "mpeg12.h"
42 #include "mpegvideo.h"
43 #include "h261.h"
44 #include "h263.h"
45 #include "mjpegenc_common.h"
46 #include "mathops.h"
47 #include "mpegutils.h"
48 #include "mjpegenc.h"
49 #include "msmpeg4.h"
50 #include "pixblockdsp.h"
51 #include "qpeldsp.h"
52 #include "faandct.h"
53 #include "thread.h"
54 #include "aandcttab.h"
55 #include "flv.h"
56 #include "mpeg4video.h"
57 #include "internal.h"
58 #include "bytestream.h"
59 #include <limits.h>
60 #include "sp5x.h"
61 
62 #define QUANT_BIAS_SHIFT 8
63 
64 #define QMAT_SHIFT_MMX 16
65 #define QMAT_SHIFT 21
66 
68 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
69 static int sse_mb(MpegEncContext *s);
70 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
71 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
72 
75 
78  { NULL },
79 };
80 
81 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
82  uint16_t (*qmat16)[2][64],
83  const uint16_t *quant_matrix,
84  int bias, int qmin, int qmax, int intra)
85 {
86  FDCTDSPContext *fdsp = &s->fdsp;
87  int qscale;
88  int shift = 0;
89 
90  for (qscale = qmin; qscale <= qmax; qscale++) {
91  int i;
92  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
94  fdsp->fdct == ff_faandct ||
95 #endif /* CONFIG_FAANDCT */
96  fdsp->fdct == ff_jpeg_fdct_islow_10) {
97  for (i = 0; i < 64; i++) {
98  const int j = s->idsp.idct_permutation[i];
99  int64_t den = (int64_t) qscale * quant_matrix[j];
100  /* 16 <= qscale * quant_matrix[i] <= 7905
101  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
102  * 19952 <= x <= 249205026
103  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
104  * 3444240 >= (1 << 36) / (x) >= 275 */
105 
106  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
107  }
108  } else if (fdsp->fdct == ff_fdct_ifast) {
109  for (i = 0; i < 64; i++) {
110  const int j = s->idsp.idct_permutation[i];
111  int64_t den = ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
112  /* 16 <= qscale * quant_matrix[i] <= 7905
113  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
114  * 19952 <= x <= 249205026
115  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
116  * 3444240 >= (1 << 36) / (x) >= 275 */
117 
118  qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) / den);
119  }
120  } else {
121  for (i = 0; i < 64; i++) {
122  const int j = s->idsp.idct_permutation[i];
123  int64_t den = (int64_t) qscale * quant_matrix[j];
124  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
125  * Assume x = qscale * quant_matrix[i]
126  * So 16 <= x <= 7905
127  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
128  * so 32768 >= (1 << 19) / (x) >= 67 */
129  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) / den);
130  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
131  // (qscale * quant_matrix[i]);
132  qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / den;
133 
134  if (qmat16[qscale][0][i] == 0 ||
135  qmat16[qscale][0][i] == 128 * 256)
136  qmat16[qscale][0][i] = 128 * 256 - 1;
137  qmat16[qscale][1][i] =
138  ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
139  qmat16[qscale][0][i]);
140  }
141  }
142 
143  for (i = intra; i < 64; i++) {
144  int64_t max = 8191;
145  if (fdsp->fdct == ff_fdct_ifast) {
146  max = (8191LL * ff_aanscales[i]) >> 14;
147  }
148  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
149  shift++;
150  }
151  }
152  }
153  if (shift) {
155  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
156  QMAT_SHIFT - shift);
157  }
158 }
159 
160 static inline void update_qscale(MpegEncContext *s)
161 {
162  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
163  (FF_LAMBDA_SHIFT + 7);
164  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
165 
166  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
168 }
169 
170 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
171 {
172  int i;
173 
174  if (matrix) {
175  put_bits(pb, 1, 1);
176  for (i = 0; i < 64; i++) {
177  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
178  }
179  } else
180  put_bits(pb, 1, 0);
181 }
182 
183 /**
184  * init s->current_picture.qscale_table from s->lambda_table
185  */
187 {
188  int8_t * const qscale_table = s->current_picture.qscale_table;
189  int i;
190 
191  for (i = 0; i < s->mb_num; i++) {
192  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
193  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
194  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
195  s->avctx->qmax);
196  }
197 }
198 
201 {
202 #define COPY(a) dst->a= src->a
203  COPY(pict_type);
205  COPY(f_code);
206  COPY(b_code);
207  COPY(qscale);
208  COPY(lambda);
209  COPY(lambda2);
212  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
213  COPY(progressive_frame); // FIXME don't set in encode_header
214  COPY(partitioned_frame); // FIXME don't set in encode_header
215 #undef COPY
216 }
217 
218 /**
219  * Set the given MpegEncContext to defaults for encoding.
220  * the changed fields will not depend upon the prior state of the MpegEncContext.
221  */
223 {
224  int i;
226 
227  for (i = -16; i < 16; i++) {
228  default_fcode_tab[i + MAX_MV] = 1;
229  }
232 
233  s->input_picture_number = 0;
234  s->picture_in_gop_number = 0;
235 }
236 
238  if (ARCH_X86)
240 
243  if (!s->dct_quantize)
245  if (!s->denoise_dct)
248  if (s->avctx->trellis)
250 
251  return 0;
252 }
253 
254 /* init video encoder */
256 {
257  MpegEncContext *s = avctx->priv_data;
258  int i, ret, format_supported;
259 
261 
262  switch (avctx->codec_id) {
264  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
265  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
266  av_log(avctx, AV_LOG_ERROR,
267  "only YUV420 and YUV422 are supported\n");
268  return -1;
269  }
270  break;
271  case AV_CODEC_ID_MJPEG:
272  case AV_CODEC_ID_AMV:
273  format_supported = 0;
274  /* JPEG color space */
275  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
276  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
277  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
278  (avctx->color_range == AVCOL_RANGE_JPEG &&
279  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
280  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
281  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
282  format_supported = 1;
283  /* MPEG color space */
284  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
285  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
286  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
287  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
288  format_supported = 1;
289 
290  if (!format_supported) {
291  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
292  return -1;
293  }
294  break;
295  default:
296  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
297  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
298  return -1;
299  }
300  }
301 
302  switch (avctx->pix_fmt) {
303  case AV_PIX_FMT_YUVJ444P:
304  case AV_PIX_FMT_YUV444P:
306  break;
307  case AV_PIX_FMT_YUVJ422P:
308  case AV_PIX_FMT_YUV422P:
310  break;
311  case AV_PIX_FMT_YUVJ420P:
312  case AV_PIX_FMT_YUV420P:
313  default:
315  break;
316  }
317 
318  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
319  s->bit_rate = avctx->bit_rate;
320  s->width = avctx->width;
321  s->height = avctx->height;
322  if (avctx->gop_size > 600 &&
324  av_log(avctx, AV_LOG_WARNING,
325  "keyframe interval too large!, reducing it from %d to %d\n",
326  avctx->gop_size, 600);
327  avctx->gop_size = 600;
328  }
329  s->gop_size = avctx->gop_size;
330  s->avctx = avctx;
331  s->flags = avctx->flags;
332  s->flags2 = avctx->flags2;
333  if (avctx->max_b_frames > MAX_B_FRAMES) {
334  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
335  "is %d.\n", MAX_B_FRAMES);
336  avctx->max_b_frames = MAX_B_FRAMES;
337  }
338  s->max_b_frames = avctx->max_b_frames;
339  s->codec_id = avctx->codec->id;
341  s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
342  s->mpeg_quant = avctx->mpeg_quant;
343  s->rtp_mode = !!avctx->rtp_payload_size;
345 
346  // workaround some differences between how applications specify dc precision
347  if (s->intra_dc_precision < 0) {
348  s->intra_dc_precision += 8;
349  } else if (s->intra_dc_precision >= 8)
350  s->intra_dc_precision -= 8;
351 
352  if (s->intra_dc_precision < 0) {
353  av_log(avctx, AV_LOG_ERROR,
354  "intra dc precision must be positive, note some applications use"
355  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
356  return AVERROR(EINVAL);
357  }
358 
359  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
360  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
361  return AVERROR(EINVAL);
362  }
364 
365  if (s->gop_size <= 1) {
366  s->intra_only = 1;
367  s->gop_size = 12;
368  } else {
369  s->intra_only = 0;
370  }
371 
372  s->me_method = avctx->me_method;
373 
374  /* Fixed QSCALE */
375  s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
376 
377 #if FF_API_MPV_OPT
379  if (avctx->border_masking != 0.0)
380  s->border_masking = avctx->border_masking;
382 #endif
383 
384  s->adaptive_quant = (s->avctx->lumi_masking ||
385  s->avctx->dark_masking ||
388  s->avctx->p_masking ||
389  s->border_masking ||
390  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
391  !s->fixed_qscale;
392 
394 
395  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
396  switch(avctx->codec_id) {
399  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
400  break;
401  case AV_CODEC_ID_MPEG4:
405  if (avctx->rc_max_rate >= 15000000) {
406  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
407  } else if(avctx->rc_max_rate >= 2000000) {
408  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
409  } else if(avctx->rc_max_rate >= 384000) {
410  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
411  } else
412  avctx->rc_buffer_size = 40;
413  avctx->rc_buffer_size *= 16384;
414  break;
415  }
416  if (avctx->rc_buffer_size) {
417  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
418  }
419  }
420 
421  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
422  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
423  return -1;
424  }
425 
426  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
427  av_log(avctx, AV_LOG_INFO,
428  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
429  }
430 
431  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
432  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
433  return -1;
434  }
435 
436  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
437  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
438  return -1;
439  }
440 
441  if (avctx->rc_max_rate &&
442  avctx->rc_max_rate == avctx->bit_rate &&
443  avctx->rc_max_rate != avctx->rc_min_rate) {
444  av_log(avctx, AV_LOG_INFO,
445  "impossible bitrate constraints, this will fail\n");
446  }
447 
448  if (avctx->rc_buffer_size &&
449  avctx->bit_rate * (int64_t)avctx->time_base.num >
450  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
451  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
452  return -1;
453  }
454 
455  if (!s->fixed_qscale &&
456  avctx->bit_rate * av_q2d(avctx->time_base) >
457  avctx->bit_rate_tolerance) {
458  av_log(avctx, AV_LOG_WARNING,
459  "bitrate tolerance %d too small for bitrate %d, overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
460  avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
461  }
462 
463  if (s->avctx->rc_max_rate &&
464  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
467  90000LL * (avctx->rc_buffer_size - 1) >
468  s->avctx->rc_max_rate * 0xFFFFLL) {
469  av_log(avctx, AV_LOG_INFO,
470  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
471  "specified vbv buffer is too large for the given bitrate!\n");
472  }
473 
474  if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
476  s->codec_id != AV_CODEC_ID_FLV1) {
477  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
478  return -1;
479  }
480 
481  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
482  av_log(avctx, AV_LOG_ERROR,
483  "OBMC is only supported with simple mb decision\n");
484  return -1;
485  }
486 
487  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
488  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
489  return -1;
490  }
491 
492  if (s->max_b_frames &&
493  s->codec_id != AV_CODEC_ID_MPEG4 &&
496  av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
497  return -1;
498  }
499  if (s->max_b_frames < 0) {
500  av_log(avctx, AV_LOG_ERROR,
501  "max b frames must be 0 or positive for mpegvideo based encoders\n");
502  return -1;
503  }
504 
505  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
506  s->codec_id == AV_CODEC_ID_H263 ||
507  s->codec_id == AV_CODEC_ID_H263P) &&
508  (avctx->sample_aspect_ratio.num > 255 ||
509  avctx->sample_aspect_ratio.den > 255)) {
510  av_log(avctx, AV_LOG_WARNING,
511  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
514  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
515  }
516 
517  if ((s->codec_id == AV_CODEC_ID_H263 ||
518  s->codec_id == AV_CODEC_ID_H263P) &&
519  (avctx->width > 2048 ||
520  avctx->height > 1152 )) {
521  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
522  return -1;
523  }
524  if ((s->codec_id == AV_CODEC_ID_H263 ||
525  s->codec_id == AV_CODEC_ID_H263P) &&
526  ((avctx->width &3) ||
527  (avctx->height&3) )) {
528  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
529  return -1;
530  }
531 
532  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
533  (avctx->width > 4095 ||
534  avctx->height > 4095 )) {
535  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
536  return -1;
537  }
538 
539  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
540  (avctx->width > 16383 ||
541  avctx->height > 16383 )) {
542  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
543  return -1;
544  }
545 
546  if (s->codec_id == AV_CODEC_ID_RV10 &&
547  (avctx->width &15 ||
548  avctx->height&15 )) {
549  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
550  return AVERROR(EINVAL);
551  }
552 
553  if (s->codec_id == AV_CODEC_ID_RV20 &&
554  (avctx->width &3 ||
555  avctx->height&3 )) {
556  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
557  return AVERROR(EINVAL);
558  }
559 
560  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
561  s->codec_id == AV_CODEC_ID_WMV2) &&
562  avctx->width & 1) {
563  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
564  return -1;
565  }
566 
569  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
570  return -1;
571  }
572 
573  // FIXME mpeg2 uses that too
574  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
575  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
576  av_log(avctx, AV_LOG_ERROR,
577  "mpeg2 style quantization not supported by codec\n");
578  return -1;
579  }
580 
581  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
582  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
583  return -1;
584  }
585 
586  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
588  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
589  return -1;
590  }
591 
592  if (s->avctx->scenechange_threshold < 1000000000 &&
593  (s->flags & CODEC_FLAG_CLOSED_GOP)) {
594  av_log(avctx, AV_LOG_ERROR,
595  "closed gop with scene change detection are not supported yet, "
596  "set threshold to 1000000000\n");
597  return -1;
598  }
599 
600  if (s->flags & CODEC_FLAG_LOW_DELAY) {
601  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
602  av_log(avctx, AV_LOG_ERROR,
603  "low delay forcing is only available for mpeg2\n");
604  return -1;
605  }
606  if (s->max_b_frames != 0) {
607  av_log(avctx, AV_LOG_ERROR,
608  "b frames cannot be used with low delay\n");
609  return -1;
610  }
611  }
612 
613  if (s->q_scale_type == 1) {
614  if (avctx->qmax > 12) {
615  av_log(avctx, AV_LOG_ERROR,
616  "non linear quant only supports qmax <= 12 currently\n");
617  return -1;
618  }
619  }
620 
621  if (s->avctx->thread_count > 1 &&
622  s->codec_id != AV_CODEC_ID_MPEG4 &&
625  s->codec_id != AV_CODEC_ID_MJPEG &&
626  (s->codec_id != AV_CODEC_ID_H263P)) {
627  av_log(avctx, AV_LOG_ERROR,
628  "multi threaded encoding not supported by codec\n");
629  return -1;
630  }
631 
632  if (s->avctx->thread_count < 1) {
633  av_log(avctx, AV_LOG_ERROR,
634  "automatic thread number detection not supported by codec, "
635  "patch welcome\n");
636  return -1;
637  }
638 
639  if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
640  s->rtp_mode = 1;
641 
642  if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
643  s->h263_slice_structured = 1;
644 
645  if (!avctx->time_base.den || !avctx->time_base.num) {
646  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
647  return -1;
648  }
649 
650  if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
651  av_log(avctx, AV_LOG_INFO,
652  "notice: b_frame_strategy only affects the first pass\n");
653  avctx->b_frame_strategy = 0;
654  }
655 
656  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
657  if (i > 1) {
658  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
659  avctx->time_base.den /= i;
660  avctx->time_base.num /= i;
661  //return -1;
662  }
663 
665  // (a + x * 3 / 8) / x
666  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
667  s->inter_quant_bias = 0;
668  } else {
669  s->intra_quant_bias = 0;
670  // (a - x / 4) / x
671  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
672  }
673 
674  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
675  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
676  return AVERROR(EINVAL);
677  }
678 
683 
684  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
685 
686  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
687  s->avctx->time_base.den > (1 << 16) - 1) {
688  av_log(avctx, AV_LOG_ERROR,
689  "timebase %d/%d not supported by MPEG 4 standard, "
690  "the maximum admitted value for the timebase denominator "
691  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
692  (1 << 16) - 1);
693  return -1;
694  }
695  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
696 
697  switch (avctx->codec->id) {
699  s->out_format = FMT_MPEG1;
700  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
701  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
702  break;
704  s->out_format = FMT_MPEG1;
705  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
706  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
707  s->rtp_mode = 1;
708  break;
709  case AV_CODEC_ID_MJPEG:
710  case AV_CODEC_ID_AMV:
711  s->out_format = FMT_MJPEG;
712  s->intra_only = 1; /* force intra only for jpeg */
713  if (!CONFIG_MJPEG_ENCODER ||
714  ff_mjpeg_encode_init(s) < 0)
715  return -1;
716  avctx->delay = 0;
717  s->low_delay = 1;
718  break;
719  case AV_CODEC_ID_H261:
720  if (!CONFIG_H261_ENCODER)
721  return -1;
722  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
723  av_log(avctx, AV_LOG_ERROR,
724  "The specified picture size of %dx%d is not valid for the "
725  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
726  s->width, s->height);
727  return -1;
728  }
729  s->out_format = FMT_H261;
730  avctx->delay = 0;
731  s->low_delay = 1;
732  s->rtp_mode = 0; /* Sliced encoding not supported */
733  break;
734  case AV_CODEC_ID_H263:
735  if (!CONFIG_H263_ENCODER)
736  return -1;
738  s->width, s->height) == 8) {
739  av_log(avctx, AV_LOG_ERROR,
740  "The specified picture size of %dx%d is not valid for "
741  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
742  "352x288, 704x576, and 1408x1152. "
743  "Try H.263+.\n", s->width, s->height);
744  return -1;
745  }
746  s->out_format = FMT_H263;
747  avctx->delay = 0;
748  s->low_delay = 1;
749  break;
750  case AV_CODEC_ID_H263P:
751  s->out_format = FMT_H263;
752  s->h263_plus = 1;
753  /* Fx */
754  s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
755  s->modified_quant = s->h263_aic;
756  s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
757  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
758 
759  /* /Fx */
760  /* These are just to be sure */
761  avctx->delay = 0;
762  s->low_delay = 1;
763  break;
764  case AV_CODEC_ID_FLV1:
765  s->out_format = FMT_H263;
766  s->h263_flv = 2; /* format = 1; 11-bit codes */
767  s->unrestricted_mv = 1;
768  s->rtp_mode = 0; /* don't allow GOB */
769  avctx->delay = 0;
770  s->low_delay = 1;
771  break;
772  case AV_CODEC_ID_RV10:
773  s->out_format = FMT_H263;
774  avctx->delay = 0;
775  s->low_delay = 1;
776  break;
777  case AV_CODEC_ID_RV20:
778  s->out_format = FMT_H263;
779  avctx->delay = 0;
780  s->low_delay = 1;
781  s->modified_quant = 1;
782  s->h263_aic = 1;
783  s->h263_plus = 1;
784  s->loop_filter = 1;
785  s->unrestricted_mv = 0;
786  break;
787  case AV_CODEC_ID_MPEG4:
788  s->out_format = FMT_H263;
789  s->h263_pred = 1;
790  s->unrestricted_mv = 1;
791  s->low_delay = s->max_b_frames ? 0 : 1;
792  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
793  break;
795  s->out_format = FMT_H263;
796  s->h263_pred = 1;
797  s->unrestricted_mv = 1;
798  s->msmpeg4_version = 2;
799  avctx->delay = 0;
800  s->low_delay = 1;
801  break;
803  s->out_format = FMT_H263;
804  s->h263_pred = 1;
805  s->unrestricted_mv = 1;
806  s->msmpeg4_version = 3;
807  s->flipflop_rounding = 1;
808  avctx->delay = 0;
809  s->low_delay = 1;
810  break;
811  case AV_CODEC_ID_WMV1:
812  s->out_format = FMT_H263;
813  s->h263_pred = 1;
814  s->unrestricted_mv = 1;
815  s->msmpeg4_version = 4;
816  s->flipflop_rounding = 1;
817  avctx->delay = 0;
818  s->low_delay = 1;
819  break;
820  case AV_CODEC_ID_WMV2:
821  s->out_format = FMT_H263;
822  s->h263_pred = 1;
823  s->unrestricted_mv = 1;
824  s->msmpeg4_version = 5;
825  s->flipflop_rounding = 1;
826  avctx->delay = 0;
827  s->low_delay = 1;
828  break;
829  default:
830  return -1;
831  }
832 
833  avctx->has_b_frames = !s->low_delay;
834 
835  s->encoding = 1;
836 
837  s->progressive_frame =
840  s->alternate_scan);
841 
842  /* init */
843  ff_mpv_idct_init(s);
844  if (ff_mpv_common_init(s) < 0)
845  return -1;
846 
847  ff_fdctdsp_init(&s->fdsp, avctx);
848  ff_me_cmp_init(&s->mecc, avctx);
850  ff_pixblockdsp_init(&s->pdsp, avctx);
851  ff_qpeldsp_init(&s->qdsp);
852 
854 
855  if (s->msmpeg4_version) {
857  2 * 2 * (MAX_LEVEL + 1) *
858  (MAX_RUN + 1) * 2 * sizeof(int), fail);
859  }
860  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
861 
862  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
863  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
864  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
865  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
866  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
867  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
869  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
871  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
872 
873  if (s->avctx->noise_reduction) {
875  2 * 64 * sizeof(uint16_t), fail);
876  }
877 
879 
882 
883  s->quant_precision = 5;
884 
887 
893  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
894  return ret;
896  && s->out_format == FMT_MPEG1)
898 
899  /* init q matrix */
900  for (i = 0; i < 64; i++) {
901  int j = s->idsp.idct_permutation[i];
903  s->mpeg_quant) {
906  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
907  s->intra_matrix[j] =
909  } else {
910  /* mpeg1/2 */
911  s->chroma_intra_matrix[j] =
914  }
915  if (s->avctx->intra_matrix)
916  s->intra_matrix[j] = s->avctx->intra_matrix[i];
917  if (s->avctx->inter_matrix)
918  s->inter_matrix[j] = s->avctx->inter_matrix[i];
919  }
920 
921  /* precompute matrix */
922  /* for mjpeg, we do include qscale in the matrix */
923  if (s->out_format != FMT_MJPEG) {
925  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
926  31, 1);
928  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
929  31, 0);
930  }
931 
932  if (ff_rate_control_init(s) < 0)
933  return -1;
934 
935 #if FF_API_ERROR_RATE
937  if (avctx->error_rate)
938  s->error_rate = avctx->error_rate;
940 #endif
941 
942 #if FF_API_NORMALIZE_AQP
944  if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
947 #endif
948 
949 #if FF_API_MV0
951  if (avctx->flags & CODEC_FLAG_MV0)
954 #endif
955 
956 #if FF_API_MPV_OPT
958  if (avctx->rc_qsquish != 0.0)
959  s->rc_qsquish = avctx->rc_qsquish;
960  if (avctx->rc_qmod_amp != 0.0)
961  s->rc_qmod_amp = avctx->rc_qmod_amp;
962  if (avctx->rc_qmod_freq)
963  s->rc_qmod_freq = avctx->rc_qmod_freq;
964  if (avctx->rc_buffer_aggressivity != 1.0)
966  if (avctx->rc_initial_cplx != 0.0)
967  s->rc_initial_cplx = avctx->rc_initial_cplx;
968  if (avctx->lmin)
969  s->lmin = avctx->lmin;
970  if (avctx->lmax)
971  s->lmax = avctx->lmax;
972 
973  if (avctx->rc_eq) {
974  av_freep(&s->rc_eq);
975  s->rc_eq = av_strdup(avctx->rc_eq);
976  if (!s->rc_eq)
977  return AVERROR(ENOMEM);
978  }
980 #endif
981 
982  if (avctx->b_frame_strategy == 2) {
983  for (i = 0; i < s->max_b_frames + 2; i++) {
984  s->tmp_frames[i] = av_frame_alloc();
985  if (!s->tmp_frames[i])
986  return AVERROR(ENOMEM);
987 
989  s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
990  s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
991 
992  ret = av_frame_get_buffer(s->tmp_frames[i], 32);
993  if (ret < 0)
994  return ret;
995  }
996  }
997 
998  return 0;
999 fail:
1000  ff_mpv_encode_end(avctx);
1001  return AVERROR_UNKNOWN;
1002 }
1003 
1005 {
1006  MpegEncContext *s = avctx->priv_data;
1007  int i;
1008 
1010 
1011  ff_mpv_common_end(s);
1012  if (CONFIG_MJPEG_ENCODER &&
1013  s->out_format == FMT_MJPEG)
1015 
1016  av_freep(&avctx->extradata);
1017 
1018  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1019  av_frame_free(&s->tmp_frames[i]);
1020 
1023 
1024  av_freep(&s->avctx->stats_out);
1025  av_freep(&s->ac_stats);
1026 
1031  av_freep(&s->q_intra_matrix);
1032  av_freep(&s->q_inter_matrix);
1035  av_freep(&s->input_picture);
1037  av_freep(&s->dct_offset);
1038 
1039  return 0;
1040 }
1041 
1042 static int get_sae(uint8_t *src, int ref, int stride)
1043 {
1044  int x,y;
1045  int acc = 0;
1046 
1047  for (y = 0; y < 16; y++) {
1048  for (x = 0; x < 16; x++) {
1049  acc += FFABS(src[x + y * stride] - ref);
1050  }
1051  }
1052 
1053  return acc;
1054 }
1055 
1057  uint8_t *ref, int stride)
1058 {
1059  int x, y, w, h;
1060  int acc = 0;
1061 
1062  w = s->width & ~15;
1063  h = s->height & ~15;
1064 
1065  for (y = 0; y < h; y += 16) {
1066  for (x = 0; x < w; x += 16) {
1067  int offset = x + y * stride;
1068  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1069  stride, 16);
1070  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1071  int sae = get_sae(src + offset, mean, stride);
1072 
1073  acc += sae + 500 < sad;
1074  }
1075  }
1076  return acc;
1077 }
1078 
1079 
1080 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1081 {
1082  Picture *pic = NULL;
1083  int64_t pts;
1084  int i, display_picture_number = 0, ret;
1085  const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1086  (s->low_delay ? 0 : 1);
1087  int direct = 1;
1088 
1089  if (pic_arg) {
1090  pts = pic_arg->pts;
1091  display_picture_number = s->input_picture_number++;
1092 
1093  if (pts != AV_NOPTS_VALUE) {
1094  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1095  int64_t last = s->user_specified_pts;
1096 
1097  if (pts <= last) {
1099  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1100  pts, last);
1101  return AVERROR(EINVAL);
1102  }
1103 
1104  if (!s->low_delay && display_picture_number == 1)
1105  s->dts_delta = pts - last;
1106  }
1107  s->user_specified_pts = pts;
1108  } else {
1109  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1110  s->user_specified_pts =
1111  pts = s->user_specified_pts + 1;
1112  av_log(s->avctx, AV_LOG_INFO,
1113  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1114  pts);
1115  } else {
1116  pts = display_picture_number;
1117  }
1118  }
1119  }
1120 
1121  if (pic_arg) {
1122  if (!pic_arg->buf[0] ||
1123  pic_arg->linesize[0] != s->linesize ||
1124  pic_arg->linesize[1] != s->uvlinesize ||
1125  pic_arg->linesize[2] != s->uvlinesize)
1126  direct = 0;
1127  if ((s->width & 15) || (s->height & 15))
1128  direct = 0;
1129  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1130  direct = 0;
1131  if (s->linesize & (STRIDE_ALIGN-1))
1132  direct = 0;
1133 
1134  av_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1135  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1136 
1137  i = ff_find_unused_picture(s, direct);
1138  if (i < 0)
1139  return i;
1140 
1141  pic = &s->picture[i];
1142  pic->reference = 3;
1143 
1144  if (direct) {
1145  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1146  return ret;
1147  if (ff_alloc_picture(s, pic, 1) < 0) {
1148  return -1;
1149  }
1150  } else {
1151  if (ff_alloc_picture(s, pic, 0) < 0) {
1152  return -1;
1153  }
1154 
1155  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1156  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1157  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1158  // empty
1159  } else {
1160  int h_chroma_shift, v_chroma_shift;
1162  &h_chroma_shift,
1163  &v_chroma_shift);
1164 
1165  for (i = 0; i < 3; i++) {
1166  int src_stride = pic_arg->linesize[i];
1167  int dst_stride = i ? s->uvlinesize : s->linesize;
1168  int h_shift = i ? h_chroma_shift : 0;
1169  int v_shift = i ? v_chroma_shift : 0;
1170  int w = s->width >> h_shift;
1171  int h = s->height >> v_shift;
1172  uint8_t *src = pic_arg->data[i];
1173  uint8_t *dst = pic->f->data[i];
1174  int vpad = 16;
1175 
1176  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1177  && !s->progressive_sequence
1178  && FFALIGN(s->height, 32) - s->height > 16)
1179  vpad = 32;
1180 
1181  if (!s->avctx->rc_buffer_size)
1182  dst += INPLACE_OFFSET;
1183 
1184  if (src_stride == dst_stride)
1185  memcpy(dst, src, src_stride * h);
1186  else {
1187  int h2 = h;
1188  uint8_t *dst2 = dst;
1189  while (h2--) {
1190  memcpy(dst2, src, w);
1191  dst2 += dst_stride;
1192  src += src_stride;
1193  }
1194  }
1195  if ((s->width & 15) || (s->height & (vpad-1))) {
1196  s->mpvencdsp.draw_edges(dst, dst_stride,
1197  w, h,
1198  16 >> h_shift,
1199  vpad >> v_shift,
1200  EDGE_BOTTOM);
1201  }
1202  }
1203  }
1204  }
1205  ret = av_frame_copy_props(pic->f, pic_arg);
1206  if (ret < 0)
1207  return ret;
1208 
1209  pic->f->display_picture_number = display_picture_number;
1210  pic->f->pts = pts; // we set this here to avoid modifiying pic_arg
1211  }
1212 
1213  /* shift buffer entries */
1214  for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1215  s->input_picture[i - 1] = s->input_picture[i];
1216 
1217  s->input_picture[encoding_delay] = (Picture*) pic;
1218 
1219  return 0;
1220 }
1221 
1222 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1223 {
1224  int x, y, plane;
1225  int score = 0;
1226  int64_t score64 = 0;
1227 
1228  for (plane = 0; plane < 3; plane++) {
1229  const int stride = p->f->linesize[plane];
1230  const int bw = plane ? 1 : 2;
1231  for (y = 0; y < s->mb_height * bw; y++) {
1232  for (x = 0; x < s->mb_width * bw; x++) {
1233  int off = p->shared ? 0 : 16;
1234  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1235  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1236  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1237 
1238  switch (FFABS(s->avctx->frame_skip_exp)) {
1239  case 0: score = FFMAX(score, v); break;
1240  case 1: score += FFABS(v); break;
1241  case 2: score64 += v * (int64_t)v; break;
1242  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1243  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1244  }
1245  }
1246  }
1247  }
1248  emms_c();
1249 
1250  if (score)
1251  score64 = score;
1252  if (s->avctx->frame_skip_exp < 0)
1253  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1254  -1.0/s->avctx->frame_skip_exp);
1255 
1256  if (score64 < s->avctx->frame_skip_threshold)
1257  return 1;
1258  if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1259  return 1;
1260  return 0;
1261 }
1262 
1264 {
1265  AVPacket pkt = { 0 };
1266  int ret, got_output;
1267 
1268  av_init_packet(&pkt);
1269  ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1270  if (ret < 0)
1271  return ret;
1272 
1273  ret = pkt.size;
1274  av_free_packet(&pkt);
1275  return ret;
1276 }
1277 
1279 {
1282  const int scale = s->avctx->brd_scale;
1283  int i, j, out_size, p_lambda, b_lambda, lambda2;
1284  int64_t best_rd = INT64_MAX;
1285  int best_b_count = -1;
1286 
1287  av_assert0(scale >= 0 && scale <= 3);
1288 
1289  //emms_c();
1290  //s->next_picture_ptr->quality;
1291  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1292  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1293  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1294  if (!b_lambda) // FIXME we should do this somewhere else
1295  b_lambda = p_lambda;
1296  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1298 
1299  c->width = s->width >> scale;
1300  c->height = s->height >> scale;
1302  c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1303  c->mb_decision = s->avctx->mb_decision;
1304  c->me_cmp = s->avctx->me_cmp;
1305  c->mb_cmp = s->avctx->mb_cmp;
1306  c->me_sub_cmp = s->avctx->me_sub_cmp;
1308  c->time_base = s->avctx->time_base;
1309  c->max_b_frames = s->max_b_frames;
1310 
1311  if (avcodec_open2(c, codec, NULL) < 0)
1312  return -1;
1313 
1314  for (i = 0; i < s->max_b_frames + 2; i++) {
1315  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1316  s->next_picture_ptr;
1317  uint8_t *data[4];
1318 
1319  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1320  pre_input = *pre_input_ptr;
1321  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1322 
1323  if (!pre_input.shared && i) {
1324  data[0] += INPLACE_OFFSET;
1325  data[1] += INPLACE_OFFSET;
1326  data[2] += INPLACE_OFFSET;
1327  }
1328 
1329  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1330  s->tmp_frames[i]->linesize[0],
1331  data[0],
1332  pre_input.f->linesize[0],
1333  c->width, c->height);
1334  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1335  s->tmp_frames[i]->linesize[1],
1336  data[1],
1337  pre_input.f->linesize[1],
1338  c->width >> 1, c->height >> 1);
1339  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1340  s->tmp_frames[i]->linesize[2],
1341  data[2],
1342  pre_input.f->linesize[2],
1343  c->width >> 1, c->height >> 1);
1344  }
1345  }
1346 
1347  for (j = 0; j < s->max_b_frames + 1; j++) {
1348  int64_t rd = 0;
1349 
1350  if (!s->input_picture[j])
1351  break;
1352 
1353  c->error[0] = c->error[1] = c->error[2] = 0;
1354 
1356  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1357 
1358  out_size = encode_frame(c, s->tmp_frames[0]);
1359 
1360  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1361 
1362  for (i = 0; i < s->max_b_frames + 1; i++) {
1363  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1364 
1365  s->tmp_frames[i + 1]->pict_type = is_p ?
1367  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1368 
1369  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1370 
1371  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1372  }
1373 
1374  /* get the delayed frames */
1375  while (out_size) {
1376  out_size = encode_frame(c, NULL);
1377  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1378  }
1379 
1380  rd += c->error[0] + c->error[1] + c->error[2];
1381 
1382  if (rd < best_rd) {
1383  best_rd = rd;
1384  best_b_count = j;
1385  }
1386  }
1387 
1388  avcodec_close(c);
1389  av_freep(&c);
1390 
1391  return best_b_count;
1392 }
1393 
1395 {
1396  int i, ret;
1397 
1398  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1400  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1401 
1402  /* set next picture type & ordering */
1403  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1405  if (s->picture_in_gop_number < s->gop_size &&
1406  s->next_picture_ptr &&
1407  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1408  // FIXME check that te gop check above is +-1 correct
1409  av_frame_unref(s->input_picture[0]->f);
1410 
1411  ff_vbv_update(s, 0);
1412 
1413  goto no_output_pic;
1414  }
1415  }
1416 
1417  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1418  !s->next_picture_ptr || s->intra_only) {
1419  s->reordered_input_picture[0] = s->input_picture[0];
1422  s->coded_picture_number++;
1423  } else {
1424  int b_frames;
1425 
1426  if (s->flags & CODEC_FLAG_PASS2) {
1427  for (i = 0; i < s->max_b_frames + 1; i++) {
1428  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1429 
1430  if (pict_num >= s->rc_context.num_entries)
1431  break;
1432  if (!s->input_picture[i]) {
1433  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1434  break;
1435  }
1436 
1437  s->input_picture[i]->f->pict_type =
1438  s->rc_context.entry[pict_num].new_pict_type;
1439  }
1440  }
1441 
1442  if (s->avctx->b_frame_strategy == 0) {
1443  b_frames = s->max_b_frames;
1444  while (b_frames && !s->input_picture[b_frames])
1445  b_frames--;
1446  } else if (s->avctx->b_frame_strategy == 1) {
1447  for (i = 1; i < s->max_b_frames + 1; i++) {
1448  if (s->input_picture[i] &&
1449  s->input_picture[i]->b_frame_score == 0) {
1450  s->input_picture[i]->b_frame_score =
1451  get_intra_count(s,
1452  s->input_picture[i ]->f->data[0],
1453  s->input_picture[i - 1]->f->data[0],
1454  s->linesize) + 1;
1455  }
1456  }
1457  for (i = 0; i < s->max_b_frames + 1; i++) {
1458  if (!s->input_picture[i] ||
1459  s->input_picture[i]->b_frame_score - 1 >
1460  s->mb_num / s->avctx->b_sensitivity)
1461  break;
1462  }
1463 
1464  b_frames = FFMAX(0, i - 1);
1465 
1466  /* reset scores */
1467  for (i = 0; i < b_frames + 1; i++) {
1468  s->input_picture[i]->b_frame_score = 0;
1469  }
1470  } else if (s->avctx->b_frame_strategy == 2) {
1471  b_frames = estimate_best_b_count(s);
1472  } else {
1473  av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1474  b_frames = 0;
1475  }
1476 
1477  emms_c();
1478 
1479  for (i = b_frames - 1; i >= 0; i--) {
1480  int type = s->input_picture[i]->f->pict_type;
1481  if (type && type != AV_PICTURE_TYPE_B)
1482  b_frames = i;
1483  }
1484  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1485  b_frames == s->max_b_frames) {
1487  "warning, too many b frames in a row\n");
1488  }
1489 
1490  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1491  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1492  s->gop_size > s->picture_in_gop_number) {
1493  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1494  } else {
1495  if (s->flags & CODEC_FLAG_CLOSED_GOP)
1496  b_frames = 0;
1497  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1498  }
1499  }
1500 
1501  if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1502  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1503  b_frames--;
1504 
1505  s->reordered_input_picture[0] = s->input_picture[b_frames];
1509  s->coded_picture_number++;
1510  for (i = 0; i < b_frames; i++) {
1511  s->reordered_input_picture[i + 1] = s->input_picture[i];
1512  s->reordered_input_picture[i + 1]->f->pict_type =
1515  s->coded_picture_number++;
1516  }
1517  }
1518  }
1519 no_output_pic:
1520  if (s->reordered_input_picture[0]) {
1523  AV_PICTURE_TYPE_B ? 3 : 0;
1524 
1526  if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1527  return ret;
1528 
1529  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1530  // input is a shared pix, so we can't modifiy it -> alloc a new
1531  // one & ensure that the shared one is reuseable
1532 
1533  Picture *pic;
1534  int i = ff_find_unused_picture(s, 0);
1535  if (i < 0)
1536  return i;
1537  pic = &s->picture[i];
1538 
1540  if (ff_alloc_picture(s, pic, 0) < 0) {
1541  return -1;
1542  }
1543 
1544  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1545  if (ret < 0)
1546  return ret;
1547 
1548  /* mark us unused / free shared pic */
1550  s->reordered_input_picture[0]->shared = 0;
1551 
1552  s->current_picture_ptr = pic;
1553  } else {
1554  // input is not a shared pix -> reuse buffer for current_pix
1556  for (i = 0; i < 4; i++) {
1557  s->new_picture.f->data[i] += INPLACE_OFFSET;
1558  }
1559  }
1561  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1562  s->current_picture_ptr)) < 0)
1563  return ret;
1564 
1566  } else {
1568  }
1569  return 0;
1570 }
1571 
1572 static void frame_end(MpegEncContext *s)
1573 {
1574  if (s->unrestricted_mv &&
1576  !s->intra_only) {
1578  int hshift = desc->log2_chroma_w;
1579  int vshift = desc->log2_chroma_h;
1581  s->current_picture.f->linesize[0],
1582  s->h_edge_pos, s->v_edge_pos,
1584  EDGE_TOP | EDGE_BOTTOM);
1586  s->current_picture.f->linesize[1],
1587  s->h_edge_pos >> hshift,
1588  s->v_edge_pos >> vshift,
1589  EDGE_WIDTH >> hshift,
1590  EDGE_WIDTH >> vshift,
1591  EDGE_TOP | EDGE_BOTTOM);
1593  s->current_picture.f->linesize[2],
1594  s->h_edge_pos >> hshift,
1595  s->v_edge_pos >> vshift,
1596  EDGE_WIDTH >> hshift,
1597  EDGE_WIDTH >> vshift,
1598  EDGE_TOP | EDGE_BOTTOM);
1599  }
1600 
1601  emms_c();
1602 
1603  s->last_pict_type = s->pict_type;
1605  if (s->pict_type!= AV_PICTURE_TYPE_B)
1607 
1609 
1610 }
1611 
1613 {
1614  int intra, i;
1615 
1616  for (intra = 0; intra < 2; intra++) {
1617  if (s->dct_count[intra] > (1 << 16)) {
1618  for (i = 0; i < 64; i++) {
1619  s->dct_error_sum[intra][i] >>= 1;
1620  }
1621  s->dct_count[intra] >>= 1;
1622  }
1623 
1624  for (i = 0; i < 64; i++) {
1625  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1626  s->dct_count[intra] +
1627  s->dct_error_sum[intra][i] / 2) /
1628  (s->dct_error_sum[intra][i] + 1);
1629  }
1630  }
1631 }
1632 
1634 {
1635  int ret;
1636 
1637  /* mark & release old frames */
1638  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1640  s->last_picture_ptr->f->buf[0]) {
1642  }
1643 
1646 
1648  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1649  s->current_picture_ptr)) < 0)
1650  return ret;
1651 
1652  if (s->pict_type != AV_PICTURE_TYPE_B) {
1654  if (!s->droppable)
1656  }
1657 
1658  if (s->last_picture_ptr) {
1660  if (s->last_picture_ptr->f->buf[0] &&
1661  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1662  s->last_picture_ptr)) < 0)
1663  return ret;
1664  }
1665  if (s->next_picture_ptr) {
1667  if (s->next_picture_ptr->f->buf[0] &&
1668  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1669  s->next_picture_ptr)) < 0)
1670  return ret;
1671  }
1672 
1673  if (s->picture_structure!= PICT_FRAME) {
1674  int i;
1675  for (i = 0; i < 4; i++) {
1677  s->current_picture.f->data[i] +=
1678  s->current_picture.f->linesize[i];
1679  }
1680  s->current_picture.f->linesize[i] *= 2;
1681  s->last_picture.f->linesize[i] *= 2;
1682  s->next_picture.f->linesize[i] *= 2;
1683  }
1684  }
1685 
1686  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1689  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1692  } else {
1695  }
1696 
1697  if (s->dct_error_sum) {
1700  }
1701 
1702  return 0;
1703 }
1704 
1706  const AVFrame *pic_arg, int *got_packet)
1707 {
1708  MpegEncContext *s = avctx->priv_data;
1709  int i, stuffing_count, ret;
1710  int context_count = s->slice_context_count;
1711 
1712  s->picture_in_gop_number++;
1713 
1714  if (load_input_picture(s, pic_arg) < 0)
1715  return -1;
1716 
1717  if (select_input_picture(s) < 0) {
1718  return -1;
1719  }
1720 
1721  /* output? */
1722  if (s->new_picture.f->data[0]) {
1723  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1724  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - FF_INPUT_BUFFER_PADDING_SIZE
1725  :
1726  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1727  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
1728  return ret;
1729  if (s->mb_info) {
1732  s->mb_width*s->mb_height*12);
1733  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1734  }
1735 
1736  for (i = 0; i < context_count; i++) {
1737  int start_y = s->thread_context[i]->start_mb_y;
1738  int end_y = s->thread_context[i]-> end_mb_y;
1739  int h = s->mb_height;
1740  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1741  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1742 
1743  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1744  }
1745 
1746  s->pict_type = s->new_picture.f->pict_type;
1747  //emms_c();
1748  ret = frame_start(s);
1749  if (ret < 0)
1750  return ret;
1751 vbv_retry:
1752  ret = encode_picture(s, s->picture_number);
1753  if (growing_buffer) {
1754  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1755  pkt->data = s->pb.buf;
1756  pkt->size = avctx->internal->byte_buffer_size;
1757  }
1758  if (ret < 0)
1759  return -1;
1760 
1761  avctx->header_bits = s->header_bits;
1762  avctx->mv_bits = s->mv_bits;
1763  avctx->misc_bits = s->misc_bits;
1764  avctx->i_tex_bits = s->i_tex_bits;
1765  avctx->p_tex_bits = s->p_tex_bits;
1766  avctx->i_count = s->i_count;
1767  // FIXME f/b_count in avctx
1768  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1769  avctx->skip_count = s->skip_count;
1770 
1771  frame_end(s);
1772 
1775 
1776  if (avctx->rc_buffer_size) {
1777  RateControlContext *rcc = &s->rc_context;
1778  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1779 
1780  if (put_bits_count(&s->pb) > max_size &&
1781  s->lambda < s->lmax) {
1782  s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1783  (s->qscale + 1) / s->qscale);
1784  if (s->adaptive_quant) {
1785  int i;
1786  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1787  s->lambda_table[i] =
1788  FFMAX(s->lambda_table[i] + 1,
1789  s->lambda_table[i] * (s->qscale + 1) /
1790  s->qscale);
1791  }
1792  s->mb_skipped = 0; // done in frame_start()
1793  // done in encode_picture() so we must undo it
1794  if (s->pict_type == AV_PICTURE_TYPE_P) {
1795  if (s->flipflop_rounding ||
1796  s->codec_id == AV_CODEC_ID_H263P ||
1798  s->no_rounding ^= 1;
1799  }
1800  if (s->pict_type != AV_PICTURE_TYPE_B) {
1801  s->time_base = s->last_time_base;
1802  s->last_non_b_time = s->time - s->pp_time;
1803  }
1804  for (i = 0; i < context_count; i++) {
1805  PutBitContext *pb = &s->thread_context[i]->pb;
1806  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1807  }
1808  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1809  goto vbv_retry;
1810  }
1811 
1813  }
1814 
1815  if (s->flags & CODEC_FLAG_PASS1)
1817 
1818  for (i = 0; i < 4; i++) {
1819  s->current_picture_ptr->f->error[i] =
1820  s->current_picture.f->error[i] =
1821  s->current_picture.error[i];
1822  avctx->error[i] += s->current_picture_ptr->f->error[i];
1823  }
1824 
1825  if (s->flags & CODEC_FLAG_PASS1)
1826  assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1827  avctx->i_tex_bits + avctx->p_tex_bits ==
1828  put_bits_count(&s->pb));
1829  flush_put_bits(&s->pb);
1830  s->frame_bits = put_bits_count(&s->pb);
1831 
1832  stuffing_count = ff_vbv_update(s, s->frame_bits);
1833  s->stuffing_bits = 8*stuffing_count;
1834  if (stuffing_count) {
1835  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1836  stuffing_count + 50) {
1837  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1838  return -1;
1839  }
1840 
1841  switch (s->codec_id) {
1844  while (stuffing_count--) {
1845  put_bits(&s->pb, 8, 0);
1846  }
1847  break;
1848  case AV_CODEC_ID_MPEG4:
1849  put_bits(&s->pb, 16, 0);
1850  put_bits(&s->pb, 16, 0x1C3);
1851  stuffing_count -= 4;
1852  while (stuffing_count--) {
1853  put_bits(&s->pb, 8, 0xFF);
1854  }
1855  break;
1856  default:
1857  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1858  }
1859  flush_put_bits(&s->pb);
1860  s->frame_bits = put_bits_count(&s->pb);
1861  }
1862 
1863  /* update mpeg1/2 vbv_delay for CBR */
1864  if (s->avctx->rc_max_rate &&
1865  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1866  s->out_format == FMT_MPEG1 &&
1867  90000LL * (avctx->rc_buffer_size - 1) <=
1868  s->avctx->rc_max_rate * 0xFFFFLL) {
1869  int vbv_delay, min_delay;
1870  double inbits = s->avctx->rc_max_rate *
1871  av_q2d(s->avctx->time_base);
1872  int minbits = s->frame_bits - 8 *
1873  (s->vbv_delay_ptr - s->pb.buf - 1);
1874  double bits = s->rc_context.buffer_index + minbits - inbits;
1875 
1876  if (bits < 0)
1878  "Internal error, negative bits\n");
1879 
1880  assert(s->repeat_first_field == 0);
1881 
1882  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1883  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1884  s->avctx->rc_max_rate;
1885 
1886  vbv_delay = FFMAX(vbv_delay, min_delay);
1887 
1888  av_assert0(vbv_delay < 0xFFFF);
1889 
1890  s->vbv_delay_ptr[0] &= 0xF8;
1891  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1892  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1893  s->vbv_delay_ptr[2] &= 0x07;
1894  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1895  avctx->vbv_delay = vbv_delay * 300;
1896  }
1897  s->total_bits += s->frame_bits;
1898  avctx->frame_bits = s->frame_bits;
1899 
1900  pkt->pts = s->current_picture.f->pts;
1901  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1903  pkt->dts = pkt->pts - s->dts_delta;
1904  else
1905  pkt->dts = s->reordered_pts;
1906  s->reordered_pts = pkt->pts;
1907  } else
1908  pkt->dts = pkt->pts;
1909  if (s->current_picture.f->key_frame)
1910  pkt->flags |= AV_PKT_FLAG_KEY;
1911  if (s->mb_info)
1913  } else {
1914  s->frame_bits = 0;
1915  }
1916 
1917  /* release non-reference frames */
1918  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1919  if (!s->picture[i].reference)
1920  ff_mpeg_unref_picture(s, &s->picture[i]);
1921  }
1922 
1923  av_assert1((s->frame_bits & 7) == 0);
1924 
1925  pkt->size = s->frame_bits / 8;
1926  *got_packet = !!pkt->size;
1927  return 0;
1928 }
1929 
1931  int n, int threshold)
1932 {
1933  static const char tab[64] = {
1934  3, 2, 2, 1, 1, 1, 1, 1,
1935  1, 1, 1, 1, 1, 1, 1, 1,
1936  1, 1, 1, 1, 1, 1, 1, 1,
1937  0, 0, 0, 0, 0, 0, 0, 0,
1938  0, 0, 0, 0, 0, 0, 0, 0,
1939  0, 0, 0, 0, 0, 0, 0, 0,
1940  0, 0, 0, 0, 0, 0, 0, 0,
1941  0, 0, 0, 0, 0, 0, 0, 0
1942  };
1943  int score = 0;
1944  int run = 0;
1945  int i;
1946  int16_t *block = s->block[n];
1947  const int last_index = s->block_last_index[n];
1948  int skip_dc;
1949 
1950  if (threshold < 0) {
1951  skip_dc = 0;
1952  threshold = -threshold;
1953  } else
1954  skip_dc = 1;
1955 
1956  /* Are all we could set to zero already zero? */
1957  if (last_index <= skip_dc - 1)
1958  return;
1959 
1960  for (i = 0; i <= last_index; i++) {
1961  const int j = s->intra_scantable.permutated[i];
1962  const int level = FFABS(block[j]);
1963  if (level == 1) {
1964  if (skip_dc && i == 0)
1965  continue;
1966  score += tab[run];
1967  run = 0;
1968  } else if (level > 1) {
1969  return;
1970  } else {
1971  run++;
1972  }
1973  }
1974  if (score >= threshold)
1975  return;
1976  for (i = skip_dc; i <= last_index; i++) {
1977  const int j = s->intra_scantable.permutated[i];
1978  block[j] = 0;
1979  }
1980  if (block[0])
1981  s->block_last_index[n] = 0;
1982  else
1983  s->block_last_index[n] = -1;
1984 }
1985 
1986 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1987  int last_index)
1988 {
1989  int i;
1990  const int maxlevel = s->max_qcoeff;
1991  const int minlevel = s->min_qcoeff;
1992  int overflow = 0;
1993 
1994  if (s->mb_intra) {
1995  i = 1; // skip clipping of intra dc
1996  } else
1997  i = 0;
1998 
1999  for (; i <= last_index; i++) {
2000  const int j = s->intra_scantable.permutated[i];
2001  int level = block[j];
2002 
2003  if (level > maxlevel) {
2004  level = maxlevel;
2005  overflow++;
2006  } else if (level < minlevel) {
2007  level = minlevel;
2008  overflow++;
2009  }
2010 
2011  block[j] = level;
2012  }
2013 
2014  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2015  av_log(s->avctx, AV_LOG_INFO,
2016  "warning, clipping %d dct coefficients to %d..%d\n",
2017  overflow, minlevel, maxlevel);
2018 }
2019 
2020 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2021 {
2022  int x, y;
2023  // FIXME optimize
2024  for (y = 0; y < 8; y++) {
2025  for (x = 0; x < 8; x++) {
2026  int x2, y2;
2027  int sum = 0;
2028  int sqr = 0;
2029  int count = 0;
2030 
2031  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2032  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2033  int v = ptr[x2 + y2 * stride];
2034  sum += v;
2035  sqr += v * v;
2036  count++;
2037  }
2038  }
2039  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2040  }
2041  }
2042 }
2043 
2045  int motion_x, int motion_y,
2046  int mb_block_height,
2047  int mb_block_width,
2048  int mb_block_count)
2049 {
2050  int16_t weight[12][64];
2051  int16_t orig[12][64];
2052  const int mb_x = s->mb_x;
2053  const int mb_y = s->mb_y;
2054  int i;
2055  int skip_dct[12];
2056  int dct_offset = s->linesize * 8; // default for progressive frames
2057  int uv_dct_offset = s->uvlinesize * 8;
2058  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2059  ptrdiff_t wrap_y, wrap_c;
2060 
2061  for (i = 0; i < mb_block_count; i++)
2062  skip_dct[i] = s->skipdct;
2063 
2064  if (s->adaptive_quant) {
2065  const int last_qp = s->qscale;
2066  const int mb_xy = mb_x + mb_y * s->mb_stride;
2067 
2068  s->lambda = s->lambda_table[mb_xy];
2069  update_qscale(s);
2070 
2071  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2072  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2073  s->dquant = s->qscale - last_qp;
2074 
2075  if (s->out_format == FMT_H263) {
2076  s->dquant = av_clip(s->dquant, -2, 2);
2077 
2078  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2079  if (!s->mb_intra) {
2080  if (s->pict_type == AV_PICTURE_TYPE_B) {
2081  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2082  s->dquant = 0;
2083  }
2084  if (s->mv_type == MV_TYPE_8X8)
2085  s->dquant = 0;
2086  }
2087  }
2088  }
2089  }
2090  ff_set_qscale(s, last_qp + s->dquant);
2091  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2092  ff_set_qscale(s, s->qscale + s->dquant);
2093 
2094  wrap_y = s->linesize;
2095  wrap_c = s->uvlinesize;
2096  ptr_y = s->new_picture.f->data[0] +
2097  (mb_y * 16 * wrap_y) + mb_x * 16;
2098  ptr_cb = s->new_picture.f->data[1] +
2099  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2100  ptr_cr = s->new_picture.f->data[2] +
2101  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2102 
2103  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2104  uint8_t *ebuf = s->edge_emu_buffer + 36 * wrap_y;
2105  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2106  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2107  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2108  wrap_y, wrap_y,
2109  16, 16, mb_x * 16, mb_y * 16,
2110  s->width, s->height);
2111  ptr_y = ebuf;
2112  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2113  wrap_c, wrap_c,
2114  mb_block_width, mb_block_height,
2115  mb_x * mb_block_width, mb_y * mb_block_height,
2116  cw, ch);
2117  ptr_cb = ebuf + 16 * wrap_y;
2118  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2119  wrap_c, wrap_c,
2120  mb_block_width, mb_block_height,
2121  mb_x * mb_block_width, mb_y * mb_block_height,
2122  cw, ch);
2123  ptr_cr = ebuf + 16 * wrap_y + 16;
2124  }
2125 
2126  if (s->mb_intra) {
2127  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
2128  int progressive_score, interlaced_score;
2129 
2130  s->interlaced_dct = 0;
2131  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2132  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2133  NULL, wrap_y, 8) - 400;
2134 
2135  if (progressive_score > 0) {
2136  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2137  NULL, wrap_y * 2, 8) +
2138  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2139  NULL, wrap_y * 2, 8);
2140  if (progressive_score > interlaced_score) {
2141  s->interlaced_dct = 1;
2142 
2143  dct_offset = wrap_y;
2144  uv_dct_offset = wrap_c;
2145  wrap_y <<= 1;
2146  if (s->chroma_format == CHROMA_422 ||
2147  s->chroma_format == CHROMA_444)
2148  wrap_c <<= 1;
2149  }
2150  }
2151  }
2152 
2153  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2154  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2155  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2156  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2157 
2158  if (s->flags & CODEC_FLAG_GRAY) {
2159  skip_dct[4] = 1;
2160  skip_dct[5] = 1;
2161  } else {
2162  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2163  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2164  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2165  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2166  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2167  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2168  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2169  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2170  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2171  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2172  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2173  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2174  }
2175  }
2176  } else {
2177  op_pixels_func (*op_pix)[4];
2178  qpel_mc_func (*op_qpix)[16];
2179  uint8_t *dest_y, *dest_cb, *dest_cr;
2180 
2181  dest_y = s->dest[0];
2182  dest_cb = s->dest[1];
2183  dest_cr = s->dest[2];
2184 
2185  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2186  op_pix = s->hdsp.put_pixels_tab;
2187  op_qpix = s->qdsp.put_qpel_pixels_tab;
2188  } else {
2189  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2190  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2191  }
2192 
2193  if (s->mv_dir & MV_DIR_FORWARD) {
2194  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2195  s->last_picture.f->data,
2196  op_pix, op_qpix);
2197  op_pix = s->hdsp.avg_pixels_tab;
2198  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2199  }
2200  if (s->mv_dir & MV_DIR_BACKWARD) {
2201  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2202  s->next_picture.f->data,
2203  op_pix, op_qpix);
2204  }
2205 
2206  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
2207  int progressive_score, interlaced_score;
2208 
2209  s->interlaced_dct = 0;
2210  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2211  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2212  ptr_y + wrap_y * 8,
2213  wrap_y, 8) - 400;
2214 
2215  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2216  progressive_score -= 400;
2217 
2218  if (progressive_score > 0) {
2219  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2220  wrap_y * 2, 8) +
2221  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2222  ptr_y + wrap_y,
2223  wrap_y * 2, 8);
2224 
2225  if (progressive_score > interlaced_score) {
2226  s->interlaced_dct = 1;
2227 
2228  dct_offset = wrap_y;
2229  uv_dct_offset = wrap_c;
2230  wrap_y <<= 1;
2231  if (s->chroma_format == CHROMA_422)
2232  wrap_c <<= 1;
2233  }
2234  }
2235  }
2236 
2237  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2238  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2239  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2240  dest_y + dct_offset, wrap_y);
2241  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2242  dest_y + dct_offset + 8, wrap_y);
2243 
2244  if (s->flags & CODEC_FLAG_GRAY) {
2245  skip_dct[4] = 1;
2246  skip_dct[5] = 1;
2247  } else {
2248  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2249  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2250  if (!s->chroma_y_shift) { /* 422 */
2251  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2252  dest_cb + uv_dct_offset, wrap_c);
2253  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2254  dest_cr + uv_dct_offset, wrap_c);
2255  }
2256  }
2257  /* pre quantization */
2258  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2259  2 * s->qscale * s->qscale) {
2260  // FIXME optimize
2261  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2262  skip_dct[0] = 1;
2263  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2264  skip_dct[1] = 1;
2265  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2266  wrap_y, 8) < 20 * s->qscale)
2267  skip_dct[2] = 1;
2268  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2269  wrap_y, 8) < 20 * s->qscale)
2270  skip_dct[3] = 1;
2271  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2272  skip_dct[4] = 1;
2273  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2274  skip_dct[5] = 1;
2275  if (!s->chroma_y_shift) { /* 422 */
2276  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2277  dest_cb + uv_dct_offset,
2278  wrap_c, 8) < 20 * s->qscale)
2279  skip_dct[6] = 1;
2280  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2281  dest_cr + uv_dct_offset,
2282  wrap_c, 8) < 20 * s->qscale)
2283  skip_dct[7] = 1;
2284  }
2285  }
2286  }
2287 
2288  if (s->quantizer_noise_shaping) {
2289  if (!skip_dct[0])
2290  get_visual_weight(weight[0], ptr_y , wrap_y);
2291  if (!skip_dct[1])
2292  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2293  if (!skip_dct[2])
2294  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2295  if (!skip_dct[3])
2296  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2297  if (!skip_dct[4])
2298  get_visual_weight(weight[4], ptr_cb , wrap_c);
2299  if (!skip_dct[5])
2300  get_visual_weight(weight[5], ptr_cr , wrap_c);
2301  if (!s->chroma_y_shift) { /* 422 */
2302  if (!skip_dct[6])
2303  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2304  wrap_c);
2305  if (!skip_dct[7])
2306  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2307  wrap_c);
2308  }
2309  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2310  }
2311 
2312  /* DCT & quantize */
2313  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2314  {
2315  for (i = 0; i < mb_block_count; i++) {
2316  if (!skip_dct[i]) {
2317  int overflow;
2318  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2319  // FIXME we could decide to change to quantizer instead of
2320  // clipping
2321  // JS: I don't think that would be a good idea it could lower
2322  // quality instead of improve it. Just INTRADC clipping
2323  // deserves changes in quantizer
2324  if (overflow)
2325  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2326  } else
2327  s->block_last_index[i] = -1;
2328  }
2329  if (s->quantizer_noise_shaping) {
2330  for (i = 0; i < mb_block_count; i++) {
2331  if (!skip_dct[i]) {
2332  s->block_last_index[i] =
2333  dct_quantize_refine(s, s->block[i], weight[i],
2334  orig[i], i, s->qscale);
2335  }
2336  }
2337  }
2338 
2339  if (s->luma_elim_threshold && !s->mb_intra)
2340  for (i = 0; i < 4; i++)
2342  if (s->chroma_elim_threshold && !s->mb_intra)
2343  for (i = 4; i < mb_block_count; i++)
2345 
2346  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2347  for (i = 0; i < mb_block_count; i++) {
2348  if (s->block_last_index[i] == -1)
2349  s->coded_score[i] = INT_MAX / 256;
2350  }
2351  }
2352  }
2353 
2354  if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2355  s->block_last_index[4] =
2356  s->block_last_index[5] = 0;
2357  s->block[4][0] =
2358  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2359  if (!s->chroma_y_shift) { /* 422 / 444 */
2360  for (i=6; i<12; i++) {
2361  s->block_last_index[i] = 0;
2362  s->block[i][0] = s->block[4][0];
2363  }
2364  }
2365  }
2366 
2367  // non c quantize code returns incorrect block_last_index FIXME
2368  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2369  for (i = 0; i < mb_block_count; i++) {
2370  int j;
2371  if (s->block_last_index[i] > 0) {
2372  for (j = 63; j > 0; j--) {
2373  if (s->block[i][s->intra_scantable.permutated[j]])
2374  break;
2375  }
2376  s->block_last_index[i] = j;
2377  }
2378  }
2379  }
2380 
2381  /* huffman encode */
2382  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2386  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2387  break;
2388  case AV_CODEC_ID_MPEG4:
2390  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2391  break;
2392  case AV_CODEC_ID_MSMPEG4V2:
2393  case AV_CODEC_ID_MSMPEG4V3:
2394  case AV_CODEC_ID_WMV1:
2396  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2397  break;
2398  case AV_CODEC_ID_WMV2:
2399  if (CONFIG_WMV2_ENCODER)
2400  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2401  break;
2402  case AV_CODEC_ID_H261:
2403  if (CONFIG_H261_ENCODER)
2404  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2405  break;
2406  case AV_CODEC_ID_H263:
2407  case AV_CODEC_ID_H263P:
2408  case AV_CODEC_ID_FLV1:
2409  case AV_CODEC_ID_RV10:
2410  case AV_CODEC_ID_RV20:
2411  if (CONFIG_H263_ENCODER)
2412  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2413  break;
2414  case AV_CODEC_ID_MJPEG:
2415  case AV_CODEC_ID_AMV:
2417  ff_mjpeg_encode_mb(s, s->block);
2418  break;
2419  default:
2420  av_assert1(0);
2421  }
2422 }
2423 
2424 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2425 {
2426  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2427  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2428  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2429 }
2430 
2432  int i;
2433 
2434  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2435 
2436  /* mpeg1 */
2437  d->mb_skip_run= s->mb_skip_run;
2438  for(i=0; i<3; i++)
2439  d->last_dc[i] = s->last_dc[i];
2440 
2441  /* statistics */
2442  d->mv_bits= s->mv_bits;
2443  d->i_tex_bits= s->i_tex_bits;
2444  d->p_tex_bits= s->p_tex_bits;
2445  d->i_count= s->i_count;
2446  d->f_count= s->f_count;
2447  d->b_count= s->b_count;
2448  d->skip_count= s->skip_count;
2449  d->misc_bits= s->misc_bits;
2450  d->last_bits= 0;
2451 
2452  d->mb_skipped= 0;
2453  d->qscale= s->qscale;
2454  d->dquant= s->dquant;
2455 
2457 }
2458 
2460  int i;
2461 
2462  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2463  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2464 
2465  /* mpeg1 */
2466  d->mb_skip_run= s->mb_skip_run;
2467  for(i=0; i<3; i++)
2468  d->last_dc[i] = s->last_dc[i];
2469 
2470  /* statistics */
2471  d->mv_bits= s->mv_bits;
2472  d->i_tex_bits= s->i_tex_bits;
2473  d->p_tex_bits= s->p_tex_bits;
2474  d->i_count= s->i_count;
2475  d->f_count= s->f_count;
2476  d->b_count= s->b_count;
2477  d->skip_count= s->skip_count;
2478  d->misc_bits= s->misc_bits;
2479 
2480  d->mb_intra= s->mb_intra;
2481  d->mb_skipped= s->mb_skipped;
2482  d->mv_type= s->mv_type;
2483  d->mv_dir= s->mv_dir;
2484  d->pb= s->pb;
2485  if(s->data_partitioning){
2486  d->pb2= s->pb2;
2487  d->tex_pb= s->tex_pb;
2488  }
2489  d->block= s->block;
2490  for(i=0; i<8; i++)
2491  d->block_last_index[i]= s->block_last_index[i];
2493  d->qscale= s->qscale;
2494 
2496 }
2497 
2498 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2500  int *dmin, int *next_block, int motion_x, int motion_y)
2501 {
2502  int score;
2503  uint8_t *dest_backup[3];
2504 
2505  copy_context_before_encode(s, backup, type);
2506 
2507  s->block= s->blocks[*next_block];
2508  s->pb= pb[*next_block];
2509  if(s->data_partitioning){
2510  s->pb2 = pb2 [*next_block];
2511  s->tex_pb= tex_pb[*next_block];
2512  }
2513 
2514  if(*next_block){
2515  memcpy(dest_backup, s->dest, sizeof(s->dest));
2516  s->dest[0] = s->rd_scratchpad;
2517  s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2518  s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2519  av_assert0(s->linesize >= 32); //FIXME
2520  }
2521 
2522  encode_mb(s, motion_x, motion_y);
2523 
2524  score= put_bits_count(&s->pb);
2525  if(s->data_partitioning){
2526  score+= put_bits_count(&s->pb2);
2527  score+= put_bits_count(&s->tex_pb);
2528  }
2529 
2530  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2531  ff_mpv_decode_mb(s, s->block);
2532 
2533  score *= s->lambda2;
2534  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2535  }
2536 
2537  if(*next_block){
2538  memcpy(s->dest, dest_backup, sizeof(s->dest));
2539  }
2540 
2541  if(score<*dmin){
2542  *dmin= score;
2543  *next_block^=1;
2544 
2545  copy_context_after_encode(best, s, type);
2546  }
2547 }
2548 
2549 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2550  uint32_t *sq = ff_square_tab + 256;
2551  int acc=0;
2552  int x,y;
2553 
2554  if(w==16 && h==16)
2555  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2556  else if(w==8 && h==8)
2557  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2558 
2559  for(y=0; y<h; y++){
2560  for(x=0; x<w; x++){
2561  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2562  }
2563  }
2564 
2565  av_assert2(acc>=0);
2566 
2567  return acc;
2568 }
2569 
2570 static int sse_mb(MpegEncContext *s){
2571  int w= 16;
2572  int h= 16;
2573 
2574  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2575  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2576 
2577  if(w==16 && h==16)
2578  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2579  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2580  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2581  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2582  }else{
2583  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2584  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2585  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2586  }
2587  else
2588  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2589  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2590  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2591 }
2592 
2594  MpegEncContext *s= *(void**)arg;
2595 
2596 
2597  s->me.pre_pass=1;
2598  s->me.dia_size= s->avctx->pre_dia_size;
2599  s->first_slice_line=1;
2600  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2601  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2603  }
2604  s->first_slice_line=0;
2605  }
2606 
2607  s->me.pre_pass=0;
2608 
2609  return 0;
2610 }
2611 
2613  MpegEncContext *s= *(void**)arg;
2614 
2616 
2617  s->me.dia_size= s->avctx->dia_size;
2618  s->first_slice_line=1;
2619  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2620  s->mb_x=0; //for block init below
2622  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2623  s->block_index[0]+=2;
2624  s->block_index[1]+=2;
2625  s->block_index[2]+=2;
2626  s->block_index[3]+=2;
2627 
2628  /* compute motion vector & mb_type and store in context */
2631  else
2633  }
2634  s->first_slice_line=0;
2635  }
2636  return 0;
2637 }
2638 
2639 static int mb_var_thread(AVCodecContext *c, void *arg){
2640  MpegEncContext *s= *(void**)arg;
2641  int mb_x, mb_y;
2642 
2644 
2645  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2646  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2647  int xx = mb_x * 16;
2648  int yy = mb_y * 16;
2649  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2650  int varc;
2651  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2652 
2653  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2654  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2655 
2656  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2657  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2658  s->me.mb_var_sum_temp += varc;
2659  }
2660  }
2661  return 0;
2662 }
2663 
2666  if(s->partitioned_frame){
2668  }
2669 
2670  ff_mpeg4_stuffing(&s->pb);
2671  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2673  }
2674 
2676  flush_put_bits(&s->pb);
2677 
2678  if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2679  s->misc_bits+= get_bits_diff(s);
2680 }
2681 
2683 {
2684  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2685  int offset = put_bits_count(&s->pb);
2686  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2687  int gobn = s->mb_y / s->gob_index;
2688  int pred_x, pred_y;
2689  if (CONFIG_H263_ENCODER)
2690  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2691  bytestream_put_le32(&ptr, offset);
2692  bytestream_put_byte(&ptr, s->qscale);
2693  bytestream_put_byte(&ptr, gobn);
2694  bytestream_put_le16(&ptr, mba);
2695  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2696  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2697  /* 4MV not implemented */
2698  bytestream_put_byte(&ptr, 0); /* hmv2 */
2699  bytestream_put_byte(&ptr, 0); /* vmv2 */
2700 }
2701 
2702 static void update_mb_info(MpegEncContext *s, int startcode)
2703 {
2704  if (!s->mb_info)
2705  return;
2706  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2707  s->mb_info_size += 12;
2708  s->prev_mb_info = s->last_mb_info;
2709  }
2710  if (startcode) {
2711  s->prev_mb_info = put_bits_count(&s->pb)/8;
2712  /* This might have incremented mb_info_size above, and we return without
2713  * actually writing any info into that slot yet. But in that case,
2714  * this will be called again at the start of the after writing the
2715  * start code, actually writing the mb info. */
2716  return;
2717  }
2718 
2719  s->last_mb_info = put_bits_count(&s->pb)/8;
2720  if (!s->mb_info_size)
2721  s->mb_info_size += 12;
2722  write_mb_info(s);
2723 }
2724 
2725 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2726 {
2727  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2728  && s->slice_context_count == 1
2729  && s->pb.buf == s->avctx->internal->byte_buffer) {
2730  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2731  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2732 
2733  uint8_t *new_buffer = NULL;
2734  int new_buffer_size = 0;
2735 
2736  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2737  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2738  return AVERROR(ENOMEM);
2739  }
2740 
2741  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2742  s->avctx->internal->byte_buffer_size + size_increase);
2743  if (!new_buffer)
2744  return AVERROR(ENOMEM);
2745 
2746  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2748  s->avctx->internal->byte_buffer = new_buffer;
2749  s->avctx->internal->byte_buffer_size = new_buffer_size;
2750  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2751  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2752  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2753  }
2754  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2755  return AVERROR(EINVAL);
2756  return 0;
2757 }
2758 
2759 static int encode_thread(AVCodecContext *c, void *arg){
2760  MpegEncContext *s= *(void**)arg;
2761  int mb_x, mb_y, pdif = 0;
2762  int chr_h= 16>>s->chroma_y_shift;
2763  int i, j;
2764  MpegEncContext best_s = { 0 }, backup_s;
2765  uint8_t bit_buf[2][MAX_MB_BYTES];
2766  uint8_t bit_buf2[2][MAX_MB_BYTES];
2767  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2768  PutBitContext pb[2], pb2[2], tex_pb[2];
2769 
2771 
2772  for(i=0; i<2; i++){
2773  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2774  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2775  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2776  }
2777 
2778  s->last_bits= put_bits_count(&s->pb);
2779  s->mv_bits=0;
2780  s->misc_bits=0;
2781  s->i_tex_bits=0;
2782  s->p_tex_bits=0;
2783  s->i_count=0;
2784  s->f_count=0;
2785  s->b_count=0;
2786  s->skip_count=0;
2787 
2788  for(i=0; i<3; i++){
2789  /* init last dc values */
2790  /* note: quant matrix value (8) is implied here */
2791  s->last_dc[i] = 128 << s->intra_dc_precision;
2792 
2793  s->current_picture.error[i] = 0;
2794  }
2795  if(s->codec_id==AV_CODEC_ID_AMV){
2796  s->last_dc[0] = 128*8/13;
2797  s->last_dc[1] = 128*8/14;
2798  s->last_dc[2] = 128*8/14;
2799  }
2800  s->mb_skip_run = 0;
2801  memset(s->last_mv, 0, sizeof(s->last_mv));
2802 
2803  s->last_mv_dir = 0;
2804 
2805  switch(s->codec_id){
2806  case AV_CODEC_ID_H263:
2807  case AV_CODEC_ID_H263P:
2808  case AV_CODEC_ID_FLV1:
2809  if (CONFIG_H263_ENCODER)
2811  break;
2812  case AV_CODEC_ID_MPEG4:
2815  break;
2816  }
2817 
2818  s->resync_mb_x=0;
2819  s->resync_mb_y=0;
2820  s->first_slice_line = 1;
2821  s->ptr_lastgob = s->pb.buf;
2822  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2823  s->mb_x=0;
2824  s->mb_y= mb_y;
2825 
2826  ff_set_qscale(s, s->qscale);
2828 
2829  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2830  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2831  int mb_type= s->mb_type[xy];
2832 // int d;
2833  int dmin= INT_MAX;
2834  int dir;
2835  int size_increase = s->avctx->internal->byte_buffer_size/4
2836  + s->mb_width*MAX_MB_BYTES;
2837 
2838  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2839  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2840  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2841  return -1;
2842  }
2843  if(s->data_partitioning){
2844  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2845  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2846  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2847  return -1;
2848  }
2849  }
2850 
2851  s->mb_x = mb_x;
2852  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2854 
2857  xy= s->mb_y*s->mb_stride + s->mb_x;
2858  mb_type= s->mb_type[xy];
2859  }
2860 
2861  /* write gob / video packet header */
2862  if(s->rtp_mode){
2863  int current_packet_size, is_gob_start;
2864 
2865  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2866 
2867  is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2868 
2869  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2870 
2871  switch(s->codec_id){
2872  case AV_CODEC_ID_H263:
2873  case AV_CODEC_ID_H263P:
2874  if(!s->h263_slice_structured)
2875  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2876  break;
2878  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2880  if(s->mb_skip_run) is_gob_start=0;
2881  break;
2882  case AV_CODEC_ID_MJPEG:
2883  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2884  break;
2885  }
2886 
2887  if(is_gob_start){
2888  if(s->start_mb_y != mb_y || mb_x!=0){
2889  write_slice_end(s);
2890 
2893  }
2894  }
2895 
2896  av_assert2((put_bits_count(&s->pb)&7) == 0);
2897  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2898 
2899  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
2900  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2901  int d = 100 / s->error_rate;
2902  if(r % d == 0){
2903  current_packet_size=0;
2904  s->pb.buf_ptr= s->ptr_lastgob;
2905  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2906  }
2907  }
2908 
2909  if (s->avctx->rtp_callback){
2910  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2911  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2912  }
2913  update_mb_info(s, 1);
2914 
2915  switch(s->codec_id){
2916  case AV_CODEC_ID_MPEG4:
2917  if (CONFIG_MPEG4_ENCODER) {
2920  }
2921  break;
2927  }
2928  break;
2929  case AV_CODEC_ID_H263:
2930  case AV_CODEC_ID_H263P:
2931  if (CONFIG_H263_ENCODER)
2932  ff_h263_encode_gob_header(s, mb_y);
2933  break;
2934  }
2935 
2936  if(s->flags&CODEC_FLAG_PASS1){
2937  int bits= put_bits_count(&s->pb);
2938  s->misc_bits+= bits - s->last_bits;
2939  s->last_bits= bits;
2940  }
2941 
2942  s->ptr_lastgob += current_packet_size;
2943  s->first_slice_line=1;
2944  s->resync_mb_x=mb_x;
2945  s->resync_mb_y=mb_y;
2946  }
2947  }
2948 
2949  if( (s->resync_mb_x == s->mb_x)
2950  && s->resync_mb_y+1 == s->mb_y){
2951  s->first_slice_line=0;
2952  }
2953 
2954  s->mb_skipped=0;
2955  s->dquant=0; //only for QP_RD
2956 
2957  update_mb_info(s, 0);
2958 
2959  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2960  int next_block=0;
2961  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2962 
2963  copy_context_before_encode(&backup_s, s, -1);
2964  backup_s.pb= s->pb;
2967  if(s->data_partitioning){
2968  backup_s.pb2= s->pb2;
2969  backup_s.tex_pb= s->tex_pb;
2970  }
2971 
2972  if(mb_type&CANDIDATE_MB_TYPE_INTER){
2973  s->mv_dir = MV_DIR_FORWARD;
2974  s->mv_type = MV_TYPE_16X16;
2975  s->mb_intra= 0;
2976  s->mv[0][0][0] = s->p_mv_table[xy][0];
2977  s->mv[0][0][1] = s->p_mv_table[xy][1];
2978  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2979  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2980  }
2981  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2982  s->mv_dir = MV_DIR_FORWARD;
2983  s->mv_type = MV_TYPE_FIELD;
2984  s->mb_intra= 0;
2985  for(i=0; i<2; i++){
2986  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2987  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2988  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2989  }
2990  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2991  &dmin, &next_block, 0, 0);
2992  }
2993  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2994  s->mv_dir = MV_DIR_FORWARD;
2995  s->mv_type = MV_TYPE_16X16;
2996  s->mb_intra= 0;
2997  s->mv[0][0][0] = 0;
2998  s->mv[0][0][1] = 0;
2999  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3000  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3001  }
3002  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3003  s->mv_dir = MV_DIR_FORWARD;
3004  s->mv_type = MV_TYPE_8X8;
3005  s->mb_intra= 0;
3006  for(i=0; i<4; i++){
3007  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3008  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3009  }
3010  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3011  &dmin, &next_block, 0, 0);
3012  }
3013  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3014  s->mv_dir = MV_DIR_FORWARD;
3015  s->mv_type = MV_TYPE_16X16;
3016  s->mb_intra= 0;
3017  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3018  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3019  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3020  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3021  }
3022  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3023  s->mv_dir = MV_DIR_BACKWARD;
3024  s->mv_type = MV_TYPE_16X16;
3025  s->mb_intra= 0;
3026  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3027  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3028  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3029  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3030  }
3031  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3033  s->mv_type = MV_TYPE_16X16;
3034  s->mb_intra= 0;
3035  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3036  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3037  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3038  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3039  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3040  &dmin, &next_block, 0, 0);
3041  }
3042  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3043  s->mv_dir = MV_DIR_FORWARD;
3044  s->mv_type = MV_TYPE_FIELD;
3045  s->mb_intra= 0;
3046  for(i=0; i<2; i++){
3047  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3048  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3049  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3050  }
3051  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3052  &dmin, &next_block, 0, 0);
3053  }
3054  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3055  s->mv_dir = MV_DIR_BACKWARD;
3056  s->mv_type = MV_TYPE_FIELD;
3057  s->mb_intra= 0;
3058  for(i=0; i<2; i++){
3059  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3060  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3061  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3062  }
3063  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3064  &dmin, &next_block, 0, 0);
3065  }
3066  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3068  s->mv_type = MV_TYPE_FIELD;
3069  s->mb_intra= 0;
3070  for(dir=0; dir<2; dir++){
3071  for(i=0; i<2; i++){
3072  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3073  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3074  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3075  }
3076  }
3077  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3078  &dmin, &next_block, 0, 0);
3079  }
3080  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3081  s->mv_dir = 0;
3082  s->mv_type = MV_TYPE_16X16;
3083  s->mb_intra= 1;
3084  s->mv[0][0][0] = 0;
3085  s->mv[0][0][1] = 0;
3086  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3087  &dmin, &next_block, 0, 0);
3088  if(s->h263_pred || s->h263_aic){
3089  if(best_s.mb_intra)
3090  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3091  else
3092  ff_clean_intra_table_entries(s); //old mode?
3093  }
3094  }
3095 
3096  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3097  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3098  const int last_qp= backup_s.qscale;
3099  int qpi, qp, dc[6];
3100  int16_t ac[6][16];
3101  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3102  static const int dquant_tab[4]={-1,1,-2,2};
3103  int storecoefs = s->mb_intra && s->dc_val[0];
3104 
3105  av_assert2(backup_s.dquant == 0);
3106 
3107  //FIXME intra
3108  s->mv_dir= best_s.mv_dir;
3109  s->mv_type = MV_TYPE_16X16;
3110  s->mb_intra= best_s.mb_intra;
3111  s->mv[0][0][0] = best_s.mv[0][0][0];
3112  s->mv[0][0][1] = best_s.mv[0][0][1];
3113  s->mv[1][0][0] = best_s.mv[1][0][0];
3114  s->mv[1][0][1] = best_s.mv[1][0][1];
3115 
3116  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3117  for(; qpi<4; qpi++){
3118  int dquant= dquant_tab[qpi];
3119  qp= last_qp + dquant;
3120  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3121  continue;
3122  backup_s.dquant= dquant;
3123  if(storecoefs){
3124  for(i=0; i<6; i++){
3125  dc[i]= s->dc_val[0][ s->block_index[i] ];
3126  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3127  }
3128  }
3129 
3130  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3131  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3132  if(best_s.qscale != qp){
3133  if(storecoefs){
3134  for(i=0; i<6; i++){
3135  s->dc_val[0][ s->block_index[i] ]= dc[i];
3136  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3137  }
3138  }
3139  }
3140  }
3141  }
3142  }
3144  int mx= s->b_direct_mv_table[xy][0];
3145  int my= s->b_direct_mv_table[xy][1];
3146 
3147  backup_s.dquant = 0;
3149  s->mb_intra= 0;
3150  ff_mpeg4_set_direct_mv(s, mx, my);
3151  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3152  &dmin, &next_block, mx, my);
3153  }
3155  backup_s.dquant = 0;
3157  s->mb_intra= 0;
3158  ff_mpeg4_set_direct_mv(s, 0, 0);
3159  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3160  &dmin, &next_block, 0, 0);
3161  }
3162  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3163  int coded=0;
3164  for(i=0; i<6; i++)
3165  coded |= s->block_last_index[i];
3166  if(coded){
3167  int mx,my;
3168  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3169  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3170  mx=my=0; //FIXME find the one we actually used
3171  ff_mpeg4_set_direct_mv(s, mx, my);
3172  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3173  mx= s->mv[1][0][0];
3174  my= s->mv[1][0][1];
3175  }else{
3176  mx= s->mv[0][0][0];
3177  my= s->mv[0][0][1];
3178  }
3179 
3180  s->mv_dir= best_s.mv_dir;
3181  s->mv_type = best_s.mv_type;
3182  s->mb_intra= 0;
3183 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3184  s->mv[0][0][1] = best_s.mv[0][0][1];
3185  s->mv[1][0][0] = best_s.mv[1][0][0];
3186  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3187  backup_s.dquant= 0;
3188  s->skipdct=1;
3189  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3190  &dmin, &next_block, mx, my);
3191  s->skipdct=0;
3192  }
3193  }
3194 
3195  s->current_picture.qscale_table[xy] = best_s.qscale;
3196 
3197  copy_context_after_encode(s, &best_s, -1);
3198 
3199  pb_bits_count= put_bits_count(&s->pb);
3200  flush_put_bits(&s->pb);
3201  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3202  s->pb= backup_s.pb;
3203 
3204  if(s->data_partitioning){
3205  pb2_bits_count= put_bits_count(&s->pb2);
3206  flush_put_bits(&s->pb2);
3207  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3208  s->pb2= backup_s.pb2;
3209 
3210  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3211  flush_put_bits(&s->tex_pb);
3212  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3213  s->tex_pb= backup_s.tex_pb;
3214  }
3215  s->last_bits= put_bits_count(&s->pb);
3216 
3217  if (CONFIG_H263_ENCODER &&
3220 
3221  if(next_block==0){ //FIXME 16 vs linesize16
3222  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
3223  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3224  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3225  }
3226 
3228  ff_mpv_decode_mb(s, s->block);
3229  } else {
3230  int motion_x = 0, motion_y = 0;
3232  // only one MB-Type possible
3233 
3234  switch(mb_type){
3236  s->mv_dir = 0;
3237  s->mb_intra= 1;
3238  motion_x= s->mv[0][0][0] = 0;
3239  motion_y= s->mv[0][0][1] = 0;
3240  break;
3242  s->mv_dir = MV_DIR_FORWARD;
3243  s->mb_intra= 0;
3244  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3245  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3246  break;
3248  s->mv_dir = MV_DIR_FORWARD;
3249  s->mv_type = MV_TYPE_FIELD;
3250  s->mb_intra= 0;
3251  for(i=0; i<2; i++){
3252  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3253  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3254  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3255  }
3256  break;
3258  s->mv_dir = MV_DIR_FORWARD;
3259  s->mv_type = MV_TYPE_8X8;
3260  s->mb_intra= 0;
3261  for(i=0; i<4; i++){
3262  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3263  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3264  }
3265  break;
3267  if (CONFIG_MPEG4_ENCODER) {
3269  s->mb_intra= 0;
3270  motion_x=s->b_direct_mv_table[xy][0];
3271  motion_y=s->b_direct_mv_table[xy][1];
3272  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3273  }
3274  break;
3276  if (CONFIG_MPEG4_ENCODER) {
3278  s->mb_intra= 0;
3279  ff_mpeg4_set_direct_mv(s, 0, 0);
3280  }
3281  break;
3284  s->mb_intra= 0;
3285  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3286  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3287  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3288  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3289  break;
3291  s->mv_dir = MV_DIR_BACKWARD;
3292  s->mb_intra= 0;
3293  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3294  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3295  break;
3297  s->mv_dir = MV_DIR_FORWARD;
3298  s->mb_intra= 0;
3299  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3300  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3301  break;
3303  s->mv_dir = MV_DIR_FORWARD;
3304  s->mv_type = MV_TYPE_FIELD;
3305  s->mb_intra= 0;
3306  for(i=0; i<2; i++){
3307  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3308  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3309  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3310  }
3311  break;
3313  s->mv_dir = MV_DIR_BACKWARD;
3314  s->mv_type = MV_TYPE_FIELD;
3315  s->mb_intra= 0;
3316  for(i=0; i<2; i++){
3317  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3318  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3319  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3320  }
3321  break;
3324  s->mv_type = MV_TYPE_FIELD;
3325  s->mb_intra= 0;
3326  for(dir=0; dir<2; dir++){
3327  for(i=0; i<2; i++){
3328  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3329  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3330  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3331  }
3332  }
3333  break;
3334  default:
3335  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3336  }
3337 
3338  encode_mb(s, motion_x, motion_y);
3339 
3340  // RAL: Update last macroblock type
3341  s->last_mv_dir = s->mv_dir;
3342 
3343  if (CONFIG_H263_ENCODER &&
3346 
3347  ff_mpv_decode_mb(s, s->block);
3348  }
3349 
3350  /* clean the MV table in IPS frames for direct mode in B frames */
3351  if(s->mb_intra /* && I,P,S_TYPE */){
3352  s->p_mv_table[xy][0]=0;
3353  s->p_mv_table[xy][1]=0;
3354  }
3355 
3356  if(s->flags&CODEC_FLAG_PSNR){
3357  int w= 16;
3358  int h= 16;
3359 
3360  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3361  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3362 
3363  s->current_picture.error[0] += sse(
3364  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3365  s->dest[0], w, h, s->linesize);
3366  s->current_picture.error[1] += sse(
3367  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3368  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3369  s->current_picture.error[2] += sse(
3370  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3371  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3372  }
3373  if(s->loop_filter){
3376  }
3377  av_dlog(s->avctx, "MB %d %d bits\n",
3378  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3379  }
3380  }
3381 
3382  //not beautiful here but we must write it before flushing so it has to be here
3385 
3386  write_slice_end(s);
3387 
3388  /* Send the last GOB if RTP */
3389  if (s->avctx->rtp_callback) {
3390  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3391  pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3392  /* Call the RTP callback to send the last GOB */
3393  emms_c();
3394  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3395  }
3396 
3397  return 0;
3398 }
3399 
3400 #define MERGE(field) dst->field += src->field; src->field=0
3402  MERGE(me.scene_change_score);
3403  MERGE(me.mc_mb_var_sum_temp);
3404  MERGE(me.mb_var_sum_temp);
3405 }
3406 
3408  int i;
3409 
3410  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3411  MERGE(dct_count[1]);
3412  MERGE(mv_bits);
3413  MERGE(i_tex_bits);
3414  MERGE(p_tex_bits);
3415  MERGE(i_count);
3416  MERGE(f_count);
3417  MERGE(b_count);
3418  MERGE(skip_count);
3419  MERGE(misc_bits);
3420  MERGE(er.error_count);
3425 
3426  if(dst->avctx->noise_reduction){
3427  for(i=0; i<64; i++){
3428  MERGE(dct_error_sum[0][i]);
3429  MERGE(dct_error_sum[1][i]);
3430  }
3431  }
3432 
3433  assert(put_bits_count(&src->pb) % 8 ==0);
3434  assert(put_bits_count(&dst->pb) % 8 ==0);
3435  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3436  flush_put_bits(&dst->pb);
3437 }
3438 
3439 static int estimate_qp(MpegEncContext *s, int dry_run){
3440  if (s->next_lambda){
3443  if(!dry_run) s->next_lambda= 0;
3444  } else if (!s->fixed_qscale) {
3447  if (s->current_picture.f->quality < 0)
3448  return -1;
3449  }
3450 
3451  if(s->adaptive_quant){
3452  switch(s->codec_id){
3453  case AV_CODEC_ID_MPEG4:
3456  break;
3457  case AV_CODEC_ID_H263:
3458  case AV_CODEC_ID_H263P:
3459  case AV_CODEC_ID_FLV1:
3460  if (CONFIG_H263_ENCODER)
3462  break;
3463  default:
3464  ff_init_qscale_tab(s);
3465  }
3466 
3467  s->lambda= s->lambda_table[0];
3468  //FIXME broken
3469  }else
3470  s->lambda = s->current_picture.f->quality;
3471  update_qscale(s);
3472  return 0;
3473 }
3474 
3475 /* must be called before writing the header */
3478  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3479 
3480  if(s->pict_type==AV_PICTURE_TYPE_B){
3481  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3482  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3483  }else{
3484  s->pp_time= s->time - s->last_non_b_time;
3485  s->last_non_b_time= s->time;
3486  assert(s->picture_number==0 || s->pp_time > 0);
3487  }
3488 }
3489 
3491 {
3492  int i, ret;
3493  int bits;
3494  int context_count = s->slice_context_count;
3495 
3497 
3498  /* Reset the average MB variance */
3499  s->me.mb_var_sum_temp =
3500  s->me.mc_mb_var_sum_temp = 0;
3501 
3502  /* we need to initialize some time vars before we can encode b-frames */
3503  // RAL: Condition added for MPEG1VIDEO
3507  ff_set_mpeg4_time(s);
3508 
3509  s->me.scene_change_score=0;
3510 
3511 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3512 
3513  if(s->pict_type==AV_PICTURE_TYPE_I){
3514  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3515  else s->no_rounding=0;
3516  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3518  s->no_rounding ^= 1;
3519  }
3520 
3521  if(s->flags & CODEC_FLAG_PASS2){
3522  if (estimate_qp(s,1) < 0)
3523  return -1;
3524  ff_get_2pass_fcode(s);
3525  }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3527  s->lambda= s->last_lambda_for[s->pict_type];
3528  else
3530  update_qscale(s);
3531  }
3532 
3538  }
3539 
3540  s->mb_intra=0; //for the rate distortion & bit compare functions
3541  for(i=1; i<context_count; i++){
3543  if (ret < 0)
3544  return ret;
3545  }
3546 
3547  if(ff_init_me(s)<0)
3548  return -1;
3549 
3550  /* Estimate motion for every MB */
3551  if(s->pict_type != AV_PICTURE_TYPE_I){
3552  s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3553  s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3554  if (s->pict_type != AV_PICTURE_TYPE_B) {
3555  if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3556  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3557  }
3558  }
3559 
3560  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3561  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3562  /* I-Frame */
3563  for(i=0; i<s->mb_stride*s->mb_height; i++)
3565 
3566  if(!s->fixed_qscale){
3567  /* finding spatial complexity for I-frame rate control */
3568  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3569  }
3570  }
3571  for(i=1; i<context_count; i++){
3573  }
3575  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3576  emms_c();
3577 
3580  for(i=0; i<s->mb_stride*s->mb_height; i++)
3582  if(s->msmpeg4_version >= 3)
3583  s->no_rounding=1;
3584  av_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3586  }
3587 
3588  if(!s->umvplus){
3591 
3593  int a,b;
3594  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3596  s->f_code= FFMAX3(s->f_code, a, b);
3597  }
3598 
3599  ff_fix_long_p_mvs(s);
3602  int j;
3603  for(i=0; i<2; i++){
3604  for(j=0; j<2; j++)
3607  }
3608  }
3609  }
3610 
3611  if(s->pict_type==AV_PICTURE_TYPE_B){
3612  int a, b;
3613 
3616  s->f_code = FFMAX(a, b);
3617 
3620  s->b_code = FFMAX(a, b);
3621 
3627  int dir, j;
3628  for(dir=0; dir<2; dir++){
3629  for(i=0; i<2; i++){
3630  for(j=0; j<2; j++){
3633  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3634  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3635  }
3636  }
3637  }
3638  }
3639  }
3640  }
3641 
3642  if (estimate_qp(s, 0) < 0)
3643  return -1;
3644 
3645  if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3646  s->qscale= 3; //reduce clipping problems
3647 
3648  if (s->out_format == FMT_MJPEG) {
3649  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3650  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3651 
3652  if (s->avctx->intra_matrix) {
3653  chroma_matrix =
3654  luma_matrix = s->avctx->intra_matrix;
3655  }
3656  if (s->avctx->chroma_intra_matrix)
3657  chroma_matrix = s->avctx->chroma_intra_matrix;
3658 
3659  /* for mjpeg, we do include qscale in the matrix */
3660  for(i=1;i<64;i++){
3661  int j = s->idsp.idct_permutation[i];
3662 
3663  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3664  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3665  }
3666  s->y_dc_scale_table=
3668  s->chroma_intra_matrix[0] =
3671  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3673  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3674  s->qscale= 8;
3675  }
3676  if(s->codec_id == AV_CODEC_ID_AMV){
3677  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3678  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3679  for(i=1;i<64;i++){
3680  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3681 
3682  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3683  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3684  }
3685  s->y_dc_scale_table= y;
3686  s->c_dc_scale_table= c;
3687  s->intra_matrix[0] = 13;
3688  s->chroma_intra_matrix[0] = 14;
3690  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3692  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3693  s->qscale= 8;
3694  }
3695 
3696  //FIXME var duplication
3698  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3701 
3702  if (s->current_picture.f->key_frame)
3703  s->picture_in_gop_number=0;
3704 
3705  s->mb_x = s->mb_y = 0;
3706  s->last_bits= put_bits_count(&s->pb);
3707  switch(s->out_format) {
3708  case FMT_MJPEG:
3712  break;
3713  case FMT_H261:
3714  if (CONFIG_H261_ENCODER)
3715  ff_h261_encode_picture_header(s, picture_number);
3716  break;
3717  case FMT_H263:
3719  ff_wmv2_encode_picture_header(s, picture_number);
3720  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3721  ff_msmpeg4_encode_picture_header(s, picture_number);
3722  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3723  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3724  if (ret < 0)
3725  return ret;
3726  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3727  ret = ff_rv10_encode_picture_header(s, picture_number);
3728  if (ret < 0)
3729  return ret;
3730  }
3731  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3732  ff_rv20_encode_picture_header(s, picture_number);
3733  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3734  ff_flv_encode_picture_header(s, picture_number);
3735  else if (CONFIG_H263_ENCODER)
3736  ff_h263_encode_picture_header(s, picture_number);
3737  break;
3738  case FMT_MPEG1:
3740  ff_mpeg1_encode_picture_header(s, picture_number);
3741  break;
3742  default:
3743  av_assert0(0);
3744  }
3745  bits= put_bits_count(&s->pb);
3746  s->header_bits= bits - s->last_bits;
3747 
3748  for(i=1; i<context_count; i++){
3750  }
3751  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3752  for(i=1; i<context_count; i++){
3753  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3754  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3756  }
3757  emms_c();
3758  return 0;
3759 }
3760 
3761 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3762  const int intra= s->mb_intra;
3763  int i;
3764 
3765  s->dct_count[intra]++;
3766 
3767  for(i=0; i<64; i++){
3768  int level= block[i];
3769 
3770  if(level){
3771  if(level>0){
3772  s->dct_error_sum[intra][i] += level;
3773  level -= s->dct_offset[intra][i];
3774  if(level<0) level=0;
3775  }else{
3776  s->dct_error_sum[intra][i] -= level;
3777  level += s->dct_offset[intra][i];
3778  if(level>0) level=0;
3779  }
3780  block[i]= level;
3781  }
3782  }
3783 }
3784 
3786  int16_t *block, int n,
3787  int qscale, int *overflow){
3788  const int *qmat;
3789  const uint16_t *matrix;
3790  const uint8_t *scantable= s->intra_scantable.scantable;
3791  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3792  int max=0;
3793  unsigned int threshold1, threshold2;
3794  int bias=0;
3795  int run_tab[65];
3796  int level_tab[65];
3797  int score_tab[65];
3798  int survivor[65];
3799  int survivor_count;
3800  int last_run=0;
3801  int last_level=0;
3802  int last_score= 0;
3803  int last_i;
3804  int coeff[2][64];
3805  int coeff_count[64];
3806  int qmul, qadd, start_i, last_non_zero, i, dc;
3807  const int esc_length= s->ac_esc_length;
3808  uint8_t * length;
3809  uint8_t * last_length;
3810  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3811 
3812  s->fdsp.fdct(block);
3813 
3814  if(s->dct_error_sum)
3815  s->denoise_dct(s, block);
3816  qmul= qscale*16;
3817  qadd= ((qscale-1)|1)*8;
3818 
3819  if (s->mb_intra) {
3820  int q;
3821  if (!s->h263_aic) {
3822  if (n < 4)
3823  q = s->y_dc_scale;
3824  else
3825  q = s->c_dc_scale;
3826  q = q << 3;
3827  } else{
3828  /* For AIC we skip quant/dequant of INTRADC */
3829  q = 1 << 3;
3830  qadd=0;
3831  }
3832 
3833  /* note: block[0] is assumed to be positive */
3834  block[0] = (block[0] + (q >> 1)) / q;
3835  start_i = 1;
3836  last_non_zero = 0;
3837  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3838  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
3839  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
3840  bias= 1<<(QMAT_SHIFT-1);
3841 
3842  if (n > 3 && s->intra_chroma_ac_vlc_length) {
3843  length = s->intra_chroma_ac_vlc_length;
3844  last_length= s->intra_chroma_ac_vlc_last_length;
3845  } else {
3846  length = s->intra_ac_vlc_length;
3847  last_length= s->intra_ac_vlc_last_length;
3848  }
3849  } else {
3850  start_i = 0;
3851  last_non_zero = -1;
3852  qmat = s->q_inter_matrix[qscale];
3853  matrix = s->inter_matrix;
3854  length = s->inter_ac_vlc_length;
3855  last_length= s->inter_ac_vlc_last_length;
3856  }
3857  last_i= start_i;
3858 
3859  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3860  threshold2= (threshold1<<1);
3861 
3862  for(i=63; i>=start_i; i--) {
3863  const int j = scantable[i];
3864  int level = block[j] * qmat[j];
3865 
3866  if(((unsigned)(level+threshold1))>threshold2){
3867  last_non_zero = i;
3868  break;
3869  }
3870  }
3871 
3872  for(i=start_i; i<=last_non_zero; i++) {
3873  const int j = scantable[i];
3874  int level = block[j] * qmat[j];
3875 
3876 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3877 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3878  if(((unsigned)(level+threshold1))>threshold2){
3879  if(level>0){
3880  level= (bias + level)>>QMAT_SHIFT;
3881  coeff[0][i]= level;
3882  coeff[1][i]= level-1;
3883 // coeff[2][k]= level-2;
3884  }else{
3885  level= (bias - level)>>QMAT_SHIFT;
3886  coeff[0][i]= -level;
3887  coeff[1][i]= -level+1;
3888 // coeff[2][k]= -level+2;
3889  }
3890  coeff_count[i]= FFMIN(level, 2);
3891  av_assert2(coeff_count[i]);
3892  max |=level;
3893  }else{
3894  coeff[0][i]= (level>>31)|1;
3895  coeff_count[i]= 1;
3896  }
3897  }
3898 
3899  *overflow= s->max_qcoeff < max; //overflow might have happened
3900 
3901  if(last_non_zero < start_i){
3902  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3903  return last_non_zero;
3904  }
3905 
3906  score_tab[start_i]= 0;
3907  survivor[0]= start_i;
3908  survivor_count= 1;
3909 
3910  for(i=start_i; i<=last_non_zero; i++){
3911  int level_index, j, zero_distortion;
3912  int dct_coeff= FFABS(block[ scantable[i] ]);
3913  int best_score=256*256*256*120;
3914 
3915  if (s->fdsp.fdct == ff_fdct_ifast)
3916  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3917  zero_distortion= dct_coeff*dct_coeff;
3918 
3919  for(level_index=0; level_index < coeff_count[i]; level_index++){
3920  int distortion;
3921  int level= coeff[level_index][i];
3922  const int alevel= FFABS(level);
3923  int unquant_coeff;
3924 
3925  av_assert2(level);
3926 
3927  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3928  unquant_coeff= alevel*qmul + qadd;
3929  } else if(s->out_format == FMT_MJPEG) {
3930  j = s->idsp.idct_permutation[scantable[i]];
3931  unquant_coeff = alevel * matrix[j] * 8;
3932  }else{ //MPEG1
3933  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
3934  if(s->mb_intra){
3935  unquant_coeff = (int)( alevel * qscale * matrix[j]) >> 3;
3936  unquant_coeff = (unquant_coeff - 1) | 1;
3937  }else{
3938  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[j])) >> 4;
3939  unquant_coeff = (unquant_coeff - 1) | 1;
3940  }
3941  unquant_coeff<<= 3;
3942  }
3943 
3944  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3945  level+=64;
3946  if((level&(~127)) == 0){
3947  for(j=survivor_count-1; j>=0; j--){
3948  int run= i - survivor[j];
3949  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3950  score += score_tab[i-run];
3951 
3952  if(score < best_score){
3953  best_score= score;
3954  run_tab[i+1]= run;
3955  level_tab[i+1]= level-64;
3956  }
3957  }
3958 
3959  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3960  for(j=survivor_count-1; j>=0; j--){
3961  int run= i - survivor[j];
3962  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3963  score += score_tab[i-run];
3964  if(score < last_score){
3965  last_score= score;
3966  last_run= run;
3967  last_level= level-64;
3968  last_i= i+1;
3969  }
3970  }
3971  }
3972  }else{
3973  distortion += esc_length*lambda;
3974  for(j=survivor_count-1; j>=0; j--){
3975  int run= i - survivor[j];
3976  int score= distortion + score_tab[i-run];
3977 
3978  if(score < best_score){
3979  best_score= score;
3980  run_tab[i+1]= run;
3981  level_tab[i+1]= level-64;
3982  }
3983  }
3984 
3985  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
3986  for(j=survivor_count-1; j>=0; j--){
3987  int run= i - survivor[j];
3988  int score= distortion + score_tab[i-run];
3989  if(score < last_score){
3990  last_score= score;
3991  last_run= run;
3992  last_level= level-64;
3993  last_i= i+1;
3994  }
3995  }
3996  }
3997  }
3998  }
3999 
4000  score_tab[i+1]= best_score;
4001 
4002  //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4003  if(last_non_zero <= 27){
4004  for(; survivor_count; survivor_count--){
4005  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4006  break;
4007  }
4008  }else{
4009  for(; survivor_count; survivor_count--){
4010  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4011  break;
4012  }
4013  }
4014 
4015  survivor[ survivor_count++ ]= i+1;
4016  }
4017 
4018  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4019  last_score= 256*256*256*120;
4020  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4021  int score= score_tab[i];
4022  if(i) score += lambda*2; //FIXME exacter?
4023 
4024  if(score < last_score){
4025  last_score= score;
4026  last_i= i;
4027  last_level= level_tab[i];
4028  last_run= run_tab[i];
4029  }
4030  }
4031  }
4032 
4033  s->coded_score[n] = last_score;
4034 
4035  dc= FFABS(block[0]);
4036  last_non_zero= last_i - 1;
4037  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4038 
4039  if(last_non_zero < start_i)
4040  return last_non_zero;
4041 
4042  if(last_non_zero == 0 && start_i == 0){
4043  int best_level= 0;
4044  int best_score= dc * dc;
4045 
4046  for(i=0; i<coeff_count[0]; i++){
4047  int level= coeff[i][0];
4048  int alevel= FFABS(level);
4049  int unquant_coeff, score, distortion;
4050 
4051  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4052  unquant_coeff= (alevel*qmul + qadd)>>3;
4053  }else{ //MPEG1
4054  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[0])) >> 4;
4055  unquant_coeff = (unquant_coeff - 1) | 1;
4056  }
4057  unquant_coeff = (unquant_coeff + 4) >> 3;
4058  unquant_coeff<<= 3 + 3;
4059 
4060  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4061  level+=64;
4062  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4063  else score= distortion + esc_length*lambda;
4064 
4065  if(score < best_score){
4066  best_score= score;
4067  best_level= level - 64;
4068  }
4069  }
4070  block[0]= best_level;
4071  s->coded_score[n] = best_score - dc*dc;
4072  if(best_level == 0) return -1;
4073  else return last_non_zero;
4074  }
4075 
4076  i= last_i;
4077  av_assert2(last_level);
4078 
4079  block[ perm_scantable[last_non_zero] ]= last_level;
4080  i -= last_run + 1;
4081 
4082  for(; i>start_i; i -= run_tab[i] + 1){
4083  block[ perm_scantable[i-1] ]= level_tab[i];
4084  }
4085 
4086  return last_non_zero;
4087 }
4088 
4089 //#define REFINE_STATS 1
4090 static int16_t basis[64][64];
4091 
4092 static void build_basis(uint8_t *perm){
4093  int i, j, x, y;
4094  emms_c();
4095  for(i=0; i<8; i++){
4096  for(j=0; j<8; j++){
4097  for(y=0; y<8; y++){
4098  for(x=0; x<8; x++){
4099  double s= 0.25*(1<<BASIS_SHIFT);
4100  int index= 8*i + j;
4101  int perm_index= perm[index];
4102  if(i==0) s*= sqrt(0.5);
4103  if(j==0) s*= sqrt(0.5);
4104  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4105  }
4106  }
4107  }
4108  }
4109 }
4110 
4111 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4112  int16_t *block, int16_t *weight, int16_t *orig,
4113  int n, int qscale){
4114  int16_t rem[64];
4115  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4116  const uint8_t *scantable= s->intra_scantable.scantable;
4117  const uint8_t *perm_scantable= s->intra_scantable.permutated;
4118 // unsigned int threshold1, threshold2;
4119 // int bias=0;
4120  int run_tab[65];
4121  int prev_run=0;
4122  int prev_level=0;
4123  int qmul, qadd, start_i, last_non_zero, i, dc;
4124  uint8_t * length;
4125  uint8_t * last_length;
4126  int lambda;
4127  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4128 #ifdef REFINE_STATS
4129 static int count=0;
4130 static int after_last=0;
4131 static int to_zero=0;
4132 static int from_zero=0;
4133 static int raise=0;
4134 static int lower=0;
4135 static int messed_sign=0;
4136 #endif
4137 
4138  if(basis[0][0] == 0)
4140 
4141  qmul= qscale*2;
4142  qadd= (qscale-1)|1;
4143  if (s->mb_intra) {
4144  if (!s->h263_aic) {
4145  if (n < 4)
4146  q = s->y_dc_scale;
4147  else
4148  q = s->c_dc_scale;
4149  } else{
4150  /* For AIC we skip quant/dequant of INTRADC */
4151  q = 1;
4152  qadd=0;
4153  }
4154  q <<= RECON_SHIFT-3;
4155  /* note: block[0] is assumed to be positive */
4156  dc= block[0]*q;
4157 // block[0] = (block[0] + (q >> 1)) / q;
4158  start_i = 1;
4159 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4160 // bias= 1<<(QMAT_SHIFT-1);
4161  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4162  length = s->intra_chroma_ac_vlc_length;
4163  last_length= s->intra_chroma_ac_vlc_last_length;
4164  } else {
4165  length = s->intra_ac_vlc_length;
4166  last_length= s->intra_ac_vlc_last_length;
4167  }
4168  } else {
4169  dc= 0;
4170  start_i = 0;
4171  length = s->inter_ac_vlc_length;
4172  last_length= s->inter_ac_vlc_last_length;
4173  }
4174  last_non_zero = s->block_last_index[n];
4175 
4176 #ifdef REFINE_STATS
4177 {START_TIMER
4178 #endif
4179  dc += (1<<(RECON_SHIFT-1));
4180  for(i=0; i<64; i++){
4181  rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
4182  }
4183 #ifdef REFINE_STATS
4184 STOP_TIMER("memset rem[]")}
4185 #endif
4186  sum=0;
4187  for(i=0; i<64; i++){
4188  int one= 36;
4189  int qns=4;
4190  int w;
4191 
4192  w= FFABS(weight[i]) + qns*one;
4193  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4194 
4195  weight[i] = w;
4196 // w=weight[i] = (63*qns + (w/2)) / w;
4197 
4198  av_assert2(w>0);
4199  av_assert2(w<(1<<6));
4200  sum += w*w;
4201  }
4202  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4203 #ifdef REFINE_STATS
4204 {START_TIMER
4205 #endif
4206  run=0;
4207  rle_index=0;
4208  for(i=start_i; i<=last_non_zero; i++){
4209  int j= perm_scantable[i];
4210  const int level= block[j];
4211  int coeff;
4212 
4213  if(level){
4214  if(level<0) coeff= qmul*level - qadd;
4215  else coeff= qmul*level + qadd;
4216  run_tab[rle_index++]=run;
4217  run=0;
4218 
4219  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4220  }else{
4221  run++;
4222  }
4223  }
4224 #ifdef REFINE_STATS
4225 if(last_non_zero>0){
4226 STOP_TIMER("init rem[]")
4227 }
4228 }
4229 
4230 {START_TIMER
4231 #endif
4232  for(;;){
4233  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4234  int best_coeff=0;
4235  int best_change=0;
4236  int run2, best_unquant_change=0, analyze_gradient;
4237 #ifdef REFINE_STATS
4238 {START_TIMER
4239 #endif
4240  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4241 
4242  if(analyze_gradient){
4243 #ifdef REFINE_STATS
4244 {START_TIMER
4245 #endif
4246  for(i=0; i<64; i++){
4247  int w= weight[i];
4248 
4249  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4250  }
4251 #ifdef REFINE_STATS
4252 STOP_TIMER("rem*w*w")}
4253 {START_TIMER
4254 #endif
4255  s->fdsp.fdct(d1);
4256 #ifdef REFINE_STATS
4257 STOP_TIMER("dct")}
4258 #endif
4259  }
4260 
4261  if(start_i){
4262  const int level= block[0];
4263  int change, old_coeff;
4264 
4265  av_assert2(s->mb_intra);
4266 
4267  old_coeff= q*level;
4268 
4269  for(change=-1; change<=1; change+=2){
4270  int new_level= level + change;
4271  int score, new_coeff;
4272 
4273  new_coeff= q*new_level;
4274  if(new_coeff >= 2048 || new_coeff < 0)
4275  continue;
4276 
4277  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4278  new_coeff - old_coeff);
4279  if(score<best_score){
4280  best_score= score;
4281  best_coeff= 0;
4282  best_change= change;
4283  best_unquant_change= new_coeff - old_coeff;
4284  }
4285  }
4286  }
4287 
4288  run=0;
4289  rle_index=0;
4290  run2= run_tab[rle_index++];
4291  prev_level=0;
4292  prev_run=0;
4293 
4294  for(i=start_i; i<64; i++){
4295  int j= perm_scantable[i];
4296  const int level= block[j];
4297  int change, old_coeff;
4298 
4299  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4300  break;
4301 
4302  if(level){
4303  if(level<0) old_coeff= qmul*level - qadd;
4304  else old_coeff= qmul*level + qadd;
4305  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4306  }else{
4307  old_coeff=0;
4308  run2--;
4309  av_assert2(run2>=0 || i >= last_non_zero );
4310  }
4311 
4312  for(change=-1; change<=1; change+=2){
4313  int new_level= level + change;
4314  int score, new_coeff, unquant_change;
4315 
4316  score=0;
4317  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4318  continue;
4319 
4320  if(new_level){
4321  if(new_level<0) new_coeff= qmul*new_level - qadd;
4322  else new_coeff= qmul*new_level + qadd;
4323  if(new_coeff >= 2048 || new_coeff <= -2048)
4324  continue;
4325  //FIXME check for overflow
4326 
4327  if(level){
4328  if(level < 63 && level > -63){
4329  if(i < last_non_zero)
4330  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4331  - length[UNI_AC_ENC_INDEX(run, level+64)];
4332  else
4333  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4334  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4335  }
4336  }else{
4337  av_assert2(FFABS(new_level)==1);
4338 
4339  if(analyze_gradient){
4340  int g= d1[ scantable[i] ];
4341  if(g && (g^new_level) >= 0)
4342  continue;
4343  }
4344 
4345  if(i < last_non_zero){
4346  int next_i= i + run2 + 1;
4347  int next_level= block[ perm_scantable[next_i] ] + 64;
4348 
4349  if(next_level&(~127))
4350  next_level= 0;
4351 
4352  if(next_i < last_non_zero)
4353  score += length[UNI_AC_ENC_INDEX(run, 65)]
4354  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4355  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4356  else
4357  score += length[UNI_AC_ENC_INDEX(run, 65)]
4358  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4359  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4360  }else{
4361  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4362  if(prev_level){
4363  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4364  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4365  }
4366  }
4367  }
4368  }else{
4369  new_coeff=0;
4370  av_assert2(FFABS(level)==1);
4371 
4372  if(i < last_non_zero){
4373  int next_i= i + run2 + 1;
4374  int next_level= block[ perm_scantable[next_i] ] + 64;
4375 
4376  if(next_level&(~127))
4377  next_level= 0;
4378 
4379  if(next_i < last_non_zero)
4380  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4381  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4382  - length[UNI_AC_ENC_INDEX(run, 65)];
4383  else
4384  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4385  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4386  - length[UNI_AC_ENC_INDEX(run, 65)];
4387  }else{
4388  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4389  if(prev_level){
4390  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4391  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4392  }
4393  }
4394  }
4395 
4396  score *= lambda;
4397 
4398  unquant_change= new_coeff - old_coeff;
4399  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4400 
4401  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4402  unquant_change);
4403  if(score<best_score){
4404  best_score= score;
4405  best_coeff= i;
4406  best_change= change;
4407  best_unquant_change= unquant_change;
4408  }
4409  }
4410  if(level){
4411  prev_level= level + 64;
4412  if(prev_level&(~127))
4413  prev_level= 0;
4414  prev_run= run;
4415  run=0;
4416  }else{
4417  run++;
4418  }
4419  }
4420 #ifdef REFINE_STATS
4421 STOP_TIMER("iterative step")}
4422 #endif
4423 
4424  if(best_change){
4425  int j= perm_scantable[ best_coeff ];
4426 
4427  block[j] += best_change;
4428 
4429  if(best_coeff > last_non_zero){
4430  last_non_zero= best_coeff;
4431  av_assert2(block[j]);
4432 #ifdef REFINE_STATS
4433 after_last++;
4434 #endif
4435  }else{
4436 #ifdef REFINE_STATS
4437 if(block[j]){
4438  if(block[j] - best_change){
4439  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4440  raise++;
4441  }else{
4442  lower++;
4443  }
4444  }else{
4445  from_zero++;
4446  }
4447 }else{
4448  to_zero++;
4449 }
4450 #endif
4451  for(; last_non_zero>=start_i; last_non_zero--){
4452  if(block[perm_scantable[last_non_zero]])
4453  break;
4454  }
4455  }
4456 #ifdef REFINE_STATS
4457 count++;
4458 if(256*256*256*64 % count == 0){
4459  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4460 }
4461 #endif
4462  run=0;
4463  rle_index=0;
4464  for(i=start_i; i<=last_non_zero; i++){
4465  int j= perm_scantable[i];
4466  const int level= block[j];
4467 
4468  if(level){
4469  run_tab[rle_index++]=run;
4470  run=0;
4471  }else{
4472  run++;
4473  }
4474  }
4475 
4476  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4477  }else{
4478  break;
4479  }
4480  }
4481 #ifdef REFINE_STATS
4482 if(last_non_zero>0){
4483 STOP_TIMER("iterative search")
4484 }
4485 }
4486 #endif
4487 
4488  return last_non_zero;
4489 }
4490 
4492  int16_t *block, int n,
4493  int qscale, int *overflow)
4494 {
4495  int i, j, level, last_non_zero, q, start_i;
4496  const int *qmat;
4497  const uint8_t *scantable= s->intra_scantable.scantable;
4498  int bias;
4499  int max=0;
4500  unsigned int threshold1, threshold2;
4501 
4502  s->fdsp.fdct(block);
4503 
4504  if(s->dct_error_sum)
4505  s->denoise_dct(s, block);
4506 
4507  if (s->mb_intra) {
4508  if (!s->h263_aic) {
4509  if (n < 4)
4510  q = s->y_dc_scale;
4511  else
4512  q = s->c_dc_scale;
4513  q = q << 3;
4514  } else
4515  /* For AIC we skip quant/dequant of INTRADC */
4516  q = 1 << 3;
4517 
4518  /* note: block[0] is assumed to be positive */
4519  block[0] = (block[0] + (q >> 1)) / q;
4520  start_i = 1;
4521  last_non_zero = 0;
4522  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4524  } else {
4525  start_i = 0;
4526  last_non_zero = -1;
4527  qmat = s->q_inter_matrix[qscale];
4529  }
4530  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4531  threshold2= (threshold1<<1);
4532  for(i=63;i>=start_i;i--) {
4533  j = scantable[i];
4534  level = block[j] * qmat[j];
4535 
4536  if(((unsigned)(level+threshold1))>threshold2){
4537  last_non_zero = i;
4538  break;
4539  }else{
4540  block[j]=0;
4541  }
4542  }
4543  for(i=start_i; i<=last_non_zero; i++) {
4544  j = scantable[i];
4545  level = block[j] * qmat[j];
4546 
4547 // if( bias+level >= (1<<QMAT_SHIFT)
4548 // || bias-level >= (1<<QMAT_SHIFT)){
4549  if(((unsigned)(level+threshold1))>threshold2){
4550  if(level>0){
4551  level= (bias + level)>>QMAT_SHIFT;
4552  block[j]= level;
4553  }else{
4554  level= (bias - level)>>QMAT_SHIFT;
4555  block[j]= -level;
4556  }
4557  max |=level;
4558  }else{
4559  block[j]=0;
4560  }
4561  }
4562  *overflow= s->max_qcoeff < max; //overflow might have happened
4563 
4564  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4565  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4567  scantable, last_non_zero);
4568 
4569  return last_non_zero;
4570 }
4571 
4572 #define OFFSET(x) offsetof(MpegEncContext, x)
4573 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4574 static const AVOption h263_options[] = {
4575  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4576  { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4577  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4579  { NULL },
4580 };
4581 
4582 static const AVClass h263_class = {
4583  .class_name = "H.263 encoder",
4584  .item_name = av_default_item_name,
4585  .option = h263_options,
4586  .version = LIBAVUTIL_VERSION_INT,
4587 };
4588 
4590  .name = "h263",
4591  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4592  .type = AVMEDIA_TYPE_VIDEO,
4593  .id = AV_CODEC_ID_H263,
4594  .priv_data_size = sizeof(MpegEncContext),
4596  .encode2 = ff_mpv_encode_picture,
4597  .close = ff_mpv_encode_end,
4598  .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4599  .priv_class = &h263_class,
4600 };
4601 
4602 static const AVOption h263p_options[] = {
4603  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4604  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4605  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4606  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4608  { NULL },
4609 };
4610 static const AVClass h263p_class = {
4611  .class_name = "H.263p encoder",
4612  .item_name = av_default_item_name,
4613  .option = h263p_options,
4614  .version = LIBAVUTIL_VERSION_INT,
4615 };
4616 
4618  .name = "h263p",
4619  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4620  .type = AVMEDIA_TYPE_VIDEO,
4621  .id = AV_CODEC_ID_H263P,
4622  .priv_data_size = sizeof(MpegEncContext),
4624  .encode2 = ff_mpv_encode_picture,
4625  .close = ff_mpv_encode_end,
4626  .capabilities = CODEC_CAP_SLICE_THREADS,
4627  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4628  .priv_class = &h263p_class,
4629 };
4630 
4631 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4632 
4634  .name = "msmpeg4v2",
4635  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4636  .type = AVMEDIA_TYPE_VIDEO,
4637  .id = AV_CODEC_ID_MSMPEG4V2,
4638  .priv_data_size = sizeof(MpegEncContext),
4640  .encode2 = ff_mpv_encode_picture,
4641  .close = ff_mpv_encode_end,
4642  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4643  .priv_class = &msmpeg4v2_class,
4644 };
4645 
4646 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4647 
4649  .name = "msmpeg4",
4650  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4651  .type = AVMEDIA_TYPE_VIDEO,
4652  .id = AV_CODEC_ID_MSMPEG4V3,
4653  .priv_data_size = sizeof(MpegEncContext),
4655  .encode2 = ff_mpv_encode_picture,
4656  .close = ff_mpv_encode_end,
4657  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4658  .priv_class = &msmpeg4v3_class,
4659 };
4660 
4662 
4664  .name = "wmv1",
4665  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4666  .type = AVMEDIA_TYPE_VIDEO,
4667  .id = AV_CODEC_ID_WMV1,
4668  .priv_data_size = sizeof(MpegEncContext),
4670  .encode2 = ff_mpv_encode_picture,
4671  .close = ff_mpv_encode_end,
4672  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4673  .priv_class = &wmv1_class,
4674 };
int last_time_base
Definition: mpegvideo.h:520
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:936
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:107
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1004
int chroma_elim_threshold
Definition: mpegvideo.h:251
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:472
IDCTDSPContext idsp
Definition: mpegvideo.h:367
int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
Definition: mpegvideo.c:776
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:475
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1724
const struct AVCodec * codec
Definition: avcodec.h:1248
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:300
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:689
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:3378
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1511
float v
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:262
const char * s
Definition: avisynth_c.h:669
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:72
#define RECON_SHIFT
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:103
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG4.
Definition: avutil.h:270
#define CONFIG_WMV2_ENCODER
Definition: config.h:1284
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:649
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:281
int esc3_level_length
Definition: mpegvideo.h:570
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2029
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
< number of bits to represent the fractional part of time (encoder only)
Definition: mpegvideo.h:519
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:102
This structure describes decoded (raw) audio or video data.
Definition: frame.h:163
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2919
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Allocate a Picture.
Definition: mpegvideo.c:652
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
Definition: mpegvideo.h:385
int mpeg_quant
0-> h263 quant 1-> mpeg quant
Definition: avcodec.h:1538
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegvideo.h:337
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
AVOption.
Definition: opt.h:255
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:414
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:288
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:404
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:887
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:323
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideo.h:132
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2661
#define CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:735
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG1 & B-frame MPEG4
Definition: mpegvideo.h:413
int pre_pass
= 1 for the pre pass
Definition: mpegvideo.h:181
#define CONFIG_RV10_ENCODER
Definition: config.h:1269
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:688
#define FF_MPV_GENERIC_CLASS(name)
Definition: mpegvideo.h:731
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:684
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:73
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:198
#define FF_CMP_NSSE
Definition: avcodec.h:1657
attribute_deprecated int rc_qmod_freq
Definition: avcodec.h:2294
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:181
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:110
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:433
const char * g
Definition: vf_curves.c:108
#define CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:734
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:760
AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2743
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:289
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideo.h:123
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:462
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
Definition: mpegvideo.c:3418
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:436
static int estimate_qp(MpegEncContext *s, int dry_run)
int acc
Definition: yuv2rgb.c:532
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1501
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:329
MJPEG encoder.
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:267
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:699
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1958
#define me
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:568
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:108
int num
numerator
Definition: rational.h:44
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:2461
int size
Definition: avcodec.h:1161
attribute_deprecated int lmax
Definition: avcodec.h:2398
enum AVCodecID codec_id
Definition: mpegvideo.h:244
const char * b
Definition: vf_curves.c:109
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:730
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1246
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:63
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:501
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:47
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:122
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1621
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:380
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:39
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1442
#define FF_MPV_FLAG_NAQ
Definition: mpegvideo.h:692
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
Definition: mpegvideo.h:391
static int select_input_picture(MpegEncContext *s)
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:442
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:139
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:1646
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int coded_score[12]
Definition: mpegvideo.h:454
mpegvideo header.
#define FF_ARRAY_ELEMS(a)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
int rtp_payload_size
Definition: avcodec.h:2463
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:70
int scene_change_score
Definition: mpegvideo.h:196
int mpv_flags
flags set by private options
Definition: mpegvideo.h:655
uint8_t permutated[64]
Definition: idctdsp.h:31
int intra_quant_bias
intra quantizer bias
Definition: avcodec.h:1740
static const AVClass h263_class
uint8_t run
Definition: svq3.c:149
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2725
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:445
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
Definition: mpegvideo.h:543
const uint16_t ff_h263_format[8][2]
Definition: h263data.h:239
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:452
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:268
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:120
int frame_skip_cmp
frame skip comparison function
Definition: avcodec.h:2427
#define FF_LAMBDA_SHIFT
Definition: avutil.h:218
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:372
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: avcodec.h:1015
AVCodec.
Definition: avcodec.h:3173
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:521
uint8_t(* mv_penalty)[MAX_DMV *2+1]
amount of bits needed to encode a MV
Definition: mpegvideo.h:202
int qscale
QP.
Definition: mpegvideo.h:341
int h263_aic
Advanded INTRA Coding (AIC)
Definition: mpegvideo.h:219
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
Definition: mpegvideo.h:387
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:313
int chroma_x_shift
Definition: mpegvideo.h:606
#define INPLACE_OFFSET
Definition: mpegvideo.h:80
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:246
uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2975
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:643
int field_select[2][2]
Definition: mpegvideo.h:412
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:647
#define CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:713
int scenechange_threshold
scene change detection threshold 0 is default, larger means fewer detected scene changes.
Definition: avcodec.h:1800
uint32_t ff_square_tab[512]
Definition: me_cmp.c:32
#define CONFIG_RV20_ENCODER
Definition: config.h:1270
#define FFALIGN(x, a)
Definition: common.h:86
int quant_precision
Definition: mpegvideo.h:532
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:3072
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:645
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1367
int modified_quant
Definition: mpegvideo.h:513
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:691
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:357
float rc_buffer_aggressivity
Definition: mpegvideo.h:666
int b_frame_score
Definition: mpegvideo.h:145
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:744
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:101
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:1582
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:263
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:512
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:622
int64_t time
time of current frame
Definition: mpegvideo.h:522
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1311
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4) ...
Definition: mpegvideo.h:399
if()
Definition: avfilter.c:975
uint8_t bits
Definition: crc.c:295
attribute_deprecated const char * rc_eq
Definition: avcodec.h:2317
attribute_deprecated float rc_buffer_aggressivity
Definition: avcodec.h:2339
uint8_t
#define av_cold
Definition: attributes.h:74
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:272
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:102
void(* get_pixels)(int16_t *block, const uint8_t *pixels, ptrdiff_t line_size)
Definition: pixblockdsp.h:27
AVOptions.
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:653
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:541
enum OutputFormat out_format
output format
Definition: mpegvideo.h:236
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:111
#define CONFIG_FAANDCT
Definition: config.h:514
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:468
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:200
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
uint16_t * chroma_intra_matrix
custom intra quantization matrix Code outside libavcodec should access this field using av_codec_g/se...
Definition: avcodec.h:3122
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:1697
AVCodec ff_h263_encoder
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
#define emms_c()
Definition: internal.h:50
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:249
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:486
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1353
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:419
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:112
int interlaced_dct
Definition: mpegvideo.h:611
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:458
int me_cmp
motion estimation comparison function
Definition: avcodec.h:1628
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:3359
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:62
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:315
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:2089
#define CHROMA_420
Definition: mpegvideo.h:603
int intra_dc_precision
Definition: mpegvideo.h:592
int repeat_first_field
Definition: mpegvideo.h:600
static AVFrame * frame
quarterpel DSP functions
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1247
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:388
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: avcodec.h:1160
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
Definition: pixfmt.h:81
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:524
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:191
#define lrintf(x)
Definition: libm_mips.h:70
#define CODEC_FLAG_MV0
Definition: avcodec.h:725
const uint8_t * scantable
Definition: idctdsp.h:30
int flags2
AVCodecContext.flags2.
Definition: mpegvideo.h:248
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:427
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:264
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:71
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:1561
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2492
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:443
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:690
#define CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:759
#define MAX_LEVEL
Definition: rl.h:35
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:52
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:347
int flipflop_rounding
Definition: mpegvideo.h:567
#define CHROMA_444
Definition: mpegvideo.h:605
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & mpeg1 specific
Definition: mpegvideo.h:581
uint8_t * mb_info_ptr
Definition: mpegvideo.h:504
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:821
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:3470
#define ROUNDED_DIV(a, b)
Definition: common.h:55
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:459
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1206
#define FF_CMP_VSSE
Definition: avcodec.h:1656
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:835
#define CODEC_FLAG_LOOP_FILTER
loop filter
Definition: avcodec.h:760
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:457
#define FF_MPV_FLAG_MV0
Definition: mpegvideo.h:693
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:234
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2830
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:322
enum AVCodecID id
Definition: avcodec.h:3187
int h263_plus
h263 plus headers
Definition: mpegvideo.h:241
H263DSPContext h263dsp
Definition: mpegvideo.h:374
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:291
int last_non_b_pict_type
used for mpeg4 gmc b-frames & ratecontrol
Definition: mpegvideo.h:352
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:73
int width
width and height of the video frame
Definition: frame.h:212
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:175
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:227
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1531
int last_dc[3]
last DC values for MPEG1
Definition: mpegvideo.h:320
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
attribute_deprecated float rc_initial_cplx
Definition: avcodec.h:2342
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:450
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1777
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:761
int64_t total_bits
Definition: mpegvideo.h:471
#define PTRDIFF_SPECIFIER
Definition: internal.h:248
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:330
#define ARCH_X86
Definition: config.h:38
int chroma_y_shift
Definition: mpegvideo.h:607
int strict_std_compliance
strictly follow the std (MPEG4, ...)
Definition: mpegvideo.h:252
av_default_item_name
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:537
#define AVERROR(e)
Definition: error.h:43
int frame_skip_threshold
frame skip threshold
Definition: avcodec.h:2406
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:85
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:1634
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
int qmax
maximum quantizer
Definition: avcodec.h:2275
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2057
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:43
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:360
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:180
ERContext er
Definition: mpegvideo.h:679
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:356
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegvideo.h:148
const char * r
Definition: vf_curves.c:107
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:196
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:336
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:641
PixblockDSPContext pdsp
Definition: mpegvideo.h:371
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:447
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:635
int h263_slice_structured
Definition: mpegvideo.h:511
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1333
uint8_t * buf
Definition: put_bits.h:38
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:223
#define CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:710
uint64_t error[AV_NUM_DATA_POINTERS]
Definition: mpegvideo.h:151
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2325
int64_t av_gcd(int64_t a, int64_t b)
Return the greatest common divisor of a and b.
Definition: mathematics.c:55
GLsizei GLsizei * length
Definition: opengl_enc.c:115
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:370
const char * name
Name of the codec implementation.
Definition: avcodec.h:3180
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:533
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:426
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:486
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:538
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
Deallocate a picture.
Definition: mpegvideo.c:709
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:394
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1609
#define FFMAX(a, b)
Definition: common.h:79
Libavcodec external API header.
#define CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:755
int64_t mb_var_sum_temp
Definition: mpegvideo.h:195
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1166
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:490
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2302
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in h263 (limit difference to -2..2)
Definition: ituh263enc.c:265
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:261
int * lambda_table
Definition: mpegvideo.h:345
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1828
int me_penalty_compensation
Definition: avcodec.h:1871
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
common internal API header
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:628
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:446
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2546
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:139
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
#define CHROMA_422
Definition: mpegvideo.h:604
int bit_rate
the average bitrate
Definition: avcodec.h:1303
float border_masking
Definition: mpegvideo.h:667
int progressive_frame
Definition: mpegvideo.h:609
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:234
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int ff_h263_get_gob_height(MpegEncContext *s)
Get the GOB height based on picture height.
Definition: h263.c:375
#define FFMIN(a, b)
Definition: common.h:81
int display_picture_number
picture number in display order
Definition: frame.h:270
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:463
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:583
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:245
float y
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in mpeg4
#define MAX_MB_BYTES
Definition: mpegvideo.h:78
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
int me_method
ME algorithm.
Definition: mpegvideo.h:395
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:147
int umvplus
== H263+ && unrestricted_mv
Definition: mpegvideo.h:509
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:309
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:440
ret
Definition: avfilter.c:974
int width
picture width / height.
Definition: avcodec.h:1412
int(* pix_sum)(uint8_t *pix, int line_size)
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:111
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:319
Picture.
Definition: mpegvideo.h:103
int alternate_scan
Definition: mpegvideo.h:598
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:2350
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:741
int b_frame_strategy
Definition: avcodec.h:1516
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:461
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:275
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:573
#define CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:754
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:146
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:218
MotionEstContext me
Definition: mpegvideo.h:417
int n
Definition: avisynth_c.h:589
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:94
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:1775
#define CONFIG_FLV_ENCODER
Definition: config.h:1237
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
attribute_deprecated float rc_qsquish
Definition: avcodec.h:2289
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:333
#define MAX_B_FRAMES
Definition: mpegvideo.h:74
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:444
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:261
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2751
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:428
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:515
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:432
int inter_quant_bias
inter quantizer bias
Definition: avcodec.h:1748
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:74
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:224
AVCodec ff_h263p_encoder
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:401
int frame_skip_factor
frame skip factor
Definition: avcodec.h:2413
int first_slice_line
used in mpeg4 too to handle resync markers
Definition: mpegvideo.h:566
int frame_pred_frame_dct
Definition: mpegvideo.h:593
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideo.h:126
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:25
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:398
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2545
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideo.c:104
int coded_picture_number
picture number in bitstream order
Definition: frame.h:266
#define AV_LOG_INFO
Standard information.
Definition: log.h:186
uint16_t inter_matrix[64]
Definition: mpegvideo.h:437
#define FF_LAMBDA_SCALE
Definition: avutil.h:219
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: frame.h:343
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:523
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:236
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:65
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:290
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:65
AVS_Value src
Definition: avisynth_c.h:524
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:344
#define CODEC_FLAG_NORMALIZE_AQP
Definition: avcodec.h:752
void ff_faandct(int16_t *data)
Definition: faandct.c:123
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
void ff_free_picture_tables(Picture *pic)
Definition: mpegvideo.c:561
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:46
int h263_flv
use flv h263 header
Definition: mpegvideo.h:242
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:269
enum AVCodecID codec_id
Definition: avcodec.h:1256
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:64
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:265
static av_const unsigned int ff_sqrt(unsigned int a)
Definition: mathops.h:214
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:81
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:370
#define START_TIMER
Definition: timer.h:86
int frame_bits
number of bits used for the previously encoded frame
Definition: avcodec.h:2485
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:448
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
main external API structure.
Definition: avcodec.h:1239
ScanTable intra_scantable
Definition: mpegvideo.h:223
int pre_me
prepass for motion estimation
Definition: avcodec.h:1683
int qmin
minimum quantizer
Definition: avcodec.h:2268
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:232
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:279
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:275
FDCTDSPContext fdsp
Definition: mpegvideo.h:364
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:1575
float rc_qmod_amp
Definition: mpegvideo.h:663
int luma_elim_threshold
Definition: mpegvideo.h:250
GLint GLenum type
Definition: opengl_enc.c:105
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1672
Picture * picture
main picture buffer
Definition: mpegvideo.h:271
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:536
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:449
int progressive_sequence
Definition: mpegvideo.h:586
uint16_t * intra_matrix
custom intra quantization matrix
Definition: avcodec.h:1785
h261codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:239
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:66
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:473
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
Definition: mpegvideo.h:392
int(* pix_norm1)(uint8_t *pix, int line_size)
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:143
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:105
#define FF_DEFAULT_QUANT_BIAS
Definition: avcodec.h:1741
struct AVFrame * f
Definition: mpegvideo.h:104
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:260
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:117
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:54
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:270
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:502
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
#define STRIDE_ALIGN
Definition: internal.h:45
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:115
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1329
int frame_skip_exp
frame skip exponent
Definition: avcodec.h:2420
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:113
#define MAX_MV
Definition: mpegvideo.h:68
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:423
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int f_code
forward MV resolution
Definition: mpegvideo.h:375
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1072
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:115
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:127
#define MAX_FCODE
Definition: mpegvideo.h:67
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
#define MAX_DMV
Definition: mpegvideo.h:69
#define CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:762
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1299
#define MV_DIR_FORWARD
Definition: mpegvideo.h:397
uint16_t * inter_matrix
custom inter quantization matrix
Definition: avcodec.h:1792
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:249
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:349
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
int bit_rate
wanted bit rate
Definition: mpegvideo.h:235
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int last_mv_dir
last mv_dir, used for b frame encoding
Definition: mpegvideo.h:582
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:265
int h263_pred
use mpeg4/h263 ac/dc predictions
Definition: mpegvideo.h:237
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:389
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:377
#define EDGE_WIDTH
Definition: mpegvideo.h:82
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:1589
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:995
static int64_t pts
Global timestamp for the audio frames.
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:1568
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:304
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:393
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
Definition: mpegvideo.h:390
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:324
uint8_t level
Definition: svq3.c:150
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1776
me_cmp_func sad[6]
Definition: me_cmp.h:56
int64_t mc_mb_var_sum_temp
Definition: mpegvideo.h:194
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:411
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
Definition: mpegvideo.h:386
me_cmp_func sse[6]
Definition: me_cmp.h:57
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
int noise_reduction
noise reduction strength
Definition: avcodec.h:1807
static int estimate_motion_thread(AVCodecContext *c, void *arg)
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:213
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:318
char * rc_eq
Definition: mpegvideo.h:670
int8_t * qscale_table
Definition: mpegvideo.h:108
#define MAX_RUN
Definition: rl.h:34
struct AVCodecContext * avctx
Definition: mpegvideo.h:230
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1433
PutBitContext pb
bit output
Definition: mpegvideo.h:286
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1249
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1248
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:1640
int quantizer_noise_shaping
Definition: mpegvideo.h:656
int(* dct_error_sum)[64]
Definition: mpegvideo.h:466
MECmpContext mecc
Definition: mpegvideo.h:368
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
float rc_initial_cplx
Definition: mpegvideo.h:665
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:79
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1778
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:868
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:265
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:106
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:736
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.h:262
uint8_t * dest[3]
Definition: mpegvideo.h:430
int shared
Definition: mpegvideo.h:149
static double c[64]
int last_pict_type
Definition: mpegvideo.h:351
#define CONFIG_H261_ENCODER
Definition: config.h:1239
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:446
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:346
static int16_t basis[64][64]
attribute_deprecated float border_masking
Definition: avcodec.h:1849
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:283
static int score_tab[256]
Definition: zmbvenc.c:59
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:297
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:317
Bi-dir predicted.
Definition: avutil.h:269
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
Definition: pixfmt.h:82
float rc_qsquish
ratecontrol qmin qmax limiting method 0-> clipping, 1-> use a nice continuous function to limit qscal...
Definition: mpegvideo.h:662
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideo.h:283
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:314
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
denominator
Definition: rational.h:45
#define CONFIG_H263_ENCODER
Definition: config.h:1240
attribute_deprecated float rc_qmod_amp
Definition: avcodec.h:2292
#define CONFIG_H263P_ENCODER
Definition: config.h:1241
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (h263)
Definition: mpegvideo.h:325
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:920
int trellis
trellis RD quantization
Definition: avcodec.h:2434
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:637
void ff_mpeg4_stuffing(PutBitContext *pbc)
add mpeg4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:100
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:628
#define STOP_TIMER(id)
Definition: timer.h:87
int slices
Number of slices.
Definition: avcodec.h:1974
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:1281
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:76
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:409
#define PICT_FRAME
Definition: mpegutils.h:35
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:487
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:1372
void(* diff_pixels)(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride)
Definition: pixblockdsp.h:30
int picture_structure
Definition: mpegvideo.h:590
int dia_size
ME diamond size & shape.
Definition: avcodec.h:1669
#define av_free(p)
int b_sensitivity
Adjust sensitivity of b_frame_strategy 1.
Definition: avcodec.h:1930
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:2791
VideoDSPContext vdsp
Definition: mpegvideo.h:373
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
#define av_log2
Definition: intmath.h:105
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1619
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:491
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1289
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:627
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:178
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:32
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:540
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:303
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:229
attribute_deprecated int error_rate
Definition: avcodec.h:2958
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:220
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1722
Picture ** reordered_input_picture
pointer to the next pictures in codedorder for encoding
Definition: mpegvideo.h:273
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1340
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:121
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:31
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1159
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:639
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:1090
int height
Definition: frame.h:212
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:247
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:652
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:435
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:120
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void INT64 start
Definition: avisynth_c.h:595
#define av_always_inline
Definition: attributes.h:37
#define M_PI
Definition: mathematics.h:46
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:73
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:441
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
attribute_deprecated int lmin
Definition: avcodec.h:2392
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:107
int me_method
Motion estimation algorithm used for video coding.
Definition: avcodec.h:1451
#define stride
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:300
int ff_find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1810
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:651
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:402
int rc_min_rate
minimum bitrate
Definition: avcodec.h:2332
int b_code
backward MV resolution for B Frames (mpeg4)
Definition: mpegvideo.h:376
#define CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:711
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:364
int dct_count[2]
Definition: mpegvideo.h:467
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:142
static int encode_frame(AVCodecContext *c, AVFrame *frame)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
Definition: avcodec.h:1137
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:120
int delay
Codec delay.
Definition: avcodec.h:1400
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2541
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1153
int ff_check_alignment(void)
Definition: me_cmp.c:915
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:463
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:138
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:3516
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:80
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:241
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:268
unsigned int lambda
lagrange multipler used in rate distortion
Definition: mpegvideo.h:343
AVCodec ff_msmpeg4v2_encoder
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:525
enum idct_permutation_type perm_type
Definition: idctdsp.h:95
HpelDSPContext hdsp
Definition: mpegvideo.h:366
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:474
static int16_t block[64]
Definition: dct-test.c:110