FFmpeg  1.2.12
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
30 #include "libavutil/imgutils.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33 #include "h264chroma.h"
34 #include "internal.h"
35 #include "mathops.h"
36 #include "mpegvideo.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "xvmc_internal.h"
40 #include "thread.h"
41 #include <limits.h>
42 
43 //#undef NDEBUG
44 //#include <assert.h>
45 
47  int16_t *block, int n, int qscale);
49  int16_t *block, int n, int qscale);
51  int16_t *block, int n, int qscale);
53  int16_t *block, int n, int qscale);
55  int16_t *block, int n, int qscale);
57  int16_t *block, int n, int qscale);
59  int16_t *block, int n, int qscale);
60 
61 
62 //#define DEBUG
63 
64 
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
68  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
69 };
70 
72 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
73  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 };
82 
83 static const uint8_t mpeg2_dc_scale_table1[128] = {
84 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
85  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 };
94 
95 static const uint8_t mpeg2_dc_scale_table2[128] = {
96 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
97  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 };
106 
107 static const uint8_t mpeg2_dc_scale_table3[128] = {
108 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
109  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 };
118 
119 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 };
125 
129 };
130 
131 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
132  int (*mv)[2][4][2],
133  int mb_x, int mb_y, int mb_intra, int mb_skipped)
134 {
135  MpegEncContext *s = opaque;
136 
137  s->mv_dir = mv_dir;
138  s->mv_type = mv_type;
139  s->mb_intra = mb_intra;
140  s->mb_skipped = mb_skipped;
141  s->mb_x = mb_x;
142  s->mb_y = mb_y;
143  memcpy(s->mv, mv, sizeof(*mv));
144 
147 
148  s->dsp.clear_blocks(s->block[0]);
149 
150  s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
151  s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
152  s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
153 
154  assert(ref == 0);
155  ff_MPV_decode_mb(s, s->block);
156 }
157 
159  const uint8_t *end,
160  uint32_t *av_restrict state)
161 {
162  int i;
163 
164  assert(p <= end);
165  if (p >= end)
166  return end;
167 
168  for (i = 0; i < 3; i++) {
169  uint32_t tmp = *state << 8;
170  *state = tmp + *(p++);
171  if (tmp == 0x100 || p == end)
172  return p;
173  }
174 
175  while (p < end) {
176  if (p[-1] > 1 ) p += 3;
177  else if (p[-2] ) p += 2;
178  else if (p[-3]|(p[-1]-1)) p++;
179  else {
180  p++;
181  break;
182  }
183  }
184 
185  p = FFMIN(p, end) - 4;
186  *state = AV_RB32(p);
187 
188  return p + 4;
189 }
190 
191 /* init common dct for both encoder and decoder */
193 {
194  ff_dsputil_init(&s->dsp, s->avctx);
195  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
197 
203  if (s->flags & CODEC_FLAG_BITEXACT)
206 
207 #if ARCH_X86
209 #elif ARCH_ALPHA
211 #elif ARCH_ARM
213 #elif HAVE_ALTIVEC
215 #elif ARCH_BFIN
217 #endif
218 
219  /* load & permutate scantables
220  * note: only wmv uses different ones
221  */
222  if (s->alternate_scan) {
225  } else {
228  }
231 
232  return 0;
233 }
234 
236 {
237  *dst = *src;
238  dst->f.type = FF_BUFFER_TYPE_COPY;
239 }
240 
245 {
246  pic->period_since_free = 0;
247  /* WM Image / Screen codecs allocate internal buffers with different
248  * dimensions / colorspaces; ignore user-defined callbacks for these. */
249  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
252  ff_thread_release_buffer(s->avctx, &pic->f);
253  else
256 }
257 
259 {
260  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
261 
262  // edge emu needs blocksize + filter length - 1
263  // (= 17x17 for halfpel / 21x21 for h264)
264  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
265  // at uvlinesize. It supports only YUV420 so 24x24 is enough
266  // linesize * interlaced * MBsize
267  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
268  fail);
269 
270  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
271  fail)
272  s->me.temp = s->me.scratchpad;
273  s->rd_scratchpad = s->me.scratchpad;
274  s->b_scratchpad = s->me.scratchpad;
275  s->obmc_scratchpad = s->me.scratchpad + 16;
276 
277  return 0;
278 fail:
280  return AVERROR(ENOMEM);
281 }
282 
287 {
288  int r, ret;
289 
290  if (s->avctx->hwaccel) {
291  assert(!pic->f.hwaccel_picture_private);
292  if (s->avctx->hwaccel->priv_data_size) {
294  if (!pic->f.hwaccel_picture_private) {
295  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
296  return -1;
297  }
298  }
299  }
300 
301  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
304  r = ff_thread_get_buffer(s->avctx, &pic->f);
305  else
306  r = avcodec_default_get_buffer(s->avctx, &pic->f);
307 
308  if (r < 0 || !pic->f.type || !pic->f.data[0]) {
309  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
310  r, pic->f.type, pic->f.data[0]);
312  return -1;
313  }
314 
315  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
316  s->uvlinesize != pic->f.linesize[1])) {
318  "get_buffer() failed (stride changed)\n");
319  free_frame_buffer(s, pic);
320  return -1;
321  }
322 
323  if (pic->f.linesize[1] != pic->f.linesize[2]) {
325  "get_buffer() failed (uv stride mismatch)\n");
326  free_frame_buffer(s, pic);
327  return -1;
328  }
329 
330  if (!s->edge_emu_buffer &&
331  (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
333  "get_buffer() failed to allocate context scratch buffers.\n");
334  free_frame_buffer(s, pic);
335  return ret;
336  }
337 
338  return 0;
339 }
340 
345 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
346 {
347  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
348 
349  // the + 1 is needed so memset(,,stride*height) does not sig11
350 
351  const int mb_array_size = s->mb_stride * s->mb_height;
352  const int b8_array_size = s->b8_stride * s->mb_height * 2;
353  const int b4_array_size = s->b4_stride * s->mb_height * 4;
354  int i;
355  int r = -1;
356 
357  if (shared) {
358  assert(pic->f.data[0]);
359  assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
361  } else {
362  assert(!pic->f.data[0]);
363 
364  if (alloc_frame_buffer(s, pic) < 0)
365  return -1;
366 
367  s->linesize = pic->f.linesize[0];
368  s->uvlinesize = pic->f.linesize[1];
369  }
370 
371  if (pic->f.qscale_table == NULL) {
372  if (s->encoding) {
373  FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
374  mb_array_size * sizeof(int16_t), fail)
376  mb_array_size * sizeof(int16_t), fail)
378  mb_array_size * sizeof(int8_t ), fail)
379  }
380 
382  mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
384  (big_mb_num + s->mb_stride) * sizeof(uint8_t),
385  fail)
387  (big_mb_num + s->mb_stride) * sizeof(uint32_t),
388  fail)
389  pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
390  pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
391  if (s->out_format == FMT_H264) {
392  for (i = 0; i < 2; i++) {
394  2 * (b4_array_size + 4) * sizeof(int16_t),
395  fail)
396  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
397  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
398  4 * mb_array_size * sizeof(uint8_t), fail)
399  }
400  pic->f.motion_subsample_log2 = 2;
401  } else if (s->out_format == FMT_H263 || s->encoding ||
402  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
403  for (i = 0; i < 2; i++) {
405  2 * (b8_array_size + 4) * sizeof(int16_t),
406  fail)
407  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
408  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
409  4 * mb_array_size * sizeof(uint8_t), fail)
410  }
411  pic->f.motion_subsample_log2 = 3;
412  }
413  if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
415  64 * mb_array_size * sizeof(int16_t) * 6, fail)
416  }
417  pic->f.qstride = s->mb_stride;
419  1 * sizeof(AVPanScan), fail)
420  }
421 
422  pic->owner2 = s;
423 
424  return 0;
425 fail: // for the FF_ALLOCZ_OR_GOTO macro
426  if (r >= 0)
427  free_frame_buffer(s, pic);
428  return -1;
429 }
430 
434 static void free_picture(MpegEncContext *s, Picture *pic)
435 {
436  int i;
437 
438  if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
439  free_frame_buffer(s, pic);
440  }
441 
442  av_freep(&pic->mb_var);
443  av_freep(&pic->mc_mb_var);
444  av_freep(&pic->mb_mean);
445  av_freep(&pic->f.mbskip_table);
447  pic->f.qscale_table = NULL;
448  av_freep(&pic->mb_type_base);
449  pic->f.mb_type = NULL;
450  av_freep(&pic->f.dct_coeff);
451  av_freep(&pic->f.pan_scan);
452  pic->f.mb_type = NULL;
453  for (i = 0; i < 2; i++) {
454  av_freep(&pic->motion_val_base[i]);
455  av_freep(&pic->f.ref_index[i]);
456  pic->f.motion_val[i] = NULL;
457  }
458 
459  if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
460  for (i = 0; i < 4; i++) {
461  pic->f.base[i] =
462  pic->f.data[i] = NULL;
463  }
464  pic->f.type = 0;
465  }
466 }
467 
469 {
470  int y_size = s->b8_stride * (2 * s->mb_height + 1);
471  int c_size = s->mb_stride * (s->mb_height + 1);
472  int yc_size = y_size + 2 * c_size;
473  int i;
474 
475  s->edge_emu_buffer =
476  s->me.scratchpad =
477  s->me.temp =
478  s->rd_scratchpad =
479  s->b_scratchpad =
480  s->obmc_scratchpad = NULL;
481 
482  if (s->encoding) {
483  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
484  ME_MAP_SIZE * sizeof(uint32_t), fail)
486  ME_MAP_SIZE * sizeof(uint32_t), fail)
487  if (s->avctx->noise_reduction) {
489  2 * 64 * sizeof(int), fail)
490  }
491  }
492  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
493  s->block = s->blocks[0];
494 
495  for (i = 0; i < 12; i++) {
496  s->pblocks[i] = &s->block[i];
497  }
498 
499  if (s->out_format == FMT_H263) {
500  /* ac values */
502  yc_size * sizeof(int16_t) * 16, fail);
503  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
504  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
505  s->ac_val[2] = s->ac_val[1] + c_size;
506  }
507 
508  return 0;
509 fail:
510  return -1; // free() through ff_MPV_common_end()
511 }
512 
514 {
515  if (s == NULL)
516  return;
517 
519  av_freep(&s->me.scratchpad);
520  s->me.temp =
521  s->rd_scratchpad =
522  s->b_scratchpad =
523  s->obmc_scratchpad = NULL;
524 
525  av_freep(&s->dct_error_sum);
526  av_freep(&s->me.map);
527  av_freep(&s->me.score_map);
528  av_freep(&s->blocks);
529  av_freep(&s->ac_val_base);
530  s->block = NULL;
531 }
532 
534 {
535 #define COPY(a) bak->a = src->a
536  COPY(edge_emu_buffer);
537  COPY(me.scratchpad);
538  COPY(me.temp);
539  COPY(rd_scratchpad);
540  COPY(b_scratchpad);
541  COPY(obmc_scratchpad);
542  COPY(me.map);
543  COPY(me.score_map);
544  COPY(blocks);
545  COPY(block);
546  COPY(start_mb_y);
547  COPY(end_mb_y);
548  COPY(me.map_generation);
549  COPY(pb);
550  COPY(dct_error_sum);
551  COPY(dct_count[0]);
552  COPY(dct_count[1]);
553  COPY(ac_val_base);
554  COPY(ac_val[0]);
555  COPY(ac_val[1]);
556  COPY(ac_val[2]);
557 #undef COPY
558 }
559 
561 {
562  MpegEncContext bak;
563  int i, ret;
564  // FIXME copy only needed parts
565  // START_TIMER
566  backup_duplicate_context(&bak, dst);
567  memcpy(dst, src, sizeof(MpegEncContext));
568  backup_duplicate_context(dst, &bak);
569  for (i = 0; i < 12; i++) {
570  dst->pblocks[i] = &dst->block[i];
571  }
572  if (!dst->edge_emu_buffer &&
573  (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
574  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
575  "scratch buffers.\n");
576  return ret;
577  }
578  // STOP_TIMER("update_duplicate_context")
579  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
580  return 0;
581 }
582 
584  const AVCodecContext *src)
585 {
586  int i;
587  int err;
588  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
589 
590  if (dst == src)
591  return 0;
592 
593  av_assert0(s != s1);
594 
595  // FIXME can parameters change on I-frames?
596  // in that case dst may need a reinit
597  if (!s->context_initialized) {
598  memcpy(s, s1, sizeof(MpegEncContext));
599 
600  s->avctx = dst;
601  s->bitstream_buffer = NULL;
603 
604  if (s1->context_initialized){
607  if((err = ff_MPV_common_init(s)) < 0){
608  memset(s, 0, sizeof(MpegEncContext));
609  s->avctx = dst;
610  return err;
611  }
612  }
613  }
614 
615  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
616  s->context_reinit = 0;
617  s->height = s1->height;
618  s->width = s1->width;
619  if ((err = ff_MPV_common_frame_size_change(s)) < 0)
620  return err;
621  }
622 
623  s->avctx->coded_height = s1->avctx->coded_height;
624  s->avctx->coded_width = s1->avctx->coded_width;
625  s->avctx->width = s1->avctx->width;
626  s->avctx->height = s1->avctx->height;
627 
628  s->coded_picture_number = s1->coded_picture_number;
629  s->picture_number = s1->picture_number;
630  s->input_picture_number = s1->input_picture_number;
631 
632  av_assert0(!s->picture || s->picture != s1->picture);
633  memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
634  memcpy(&s->last_picture, &s1->last_picture,
635  (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
636 
637  // reset s->picture[].f.extended_data to s->picture[].f.data
638  for (i = 0; i < s->picture_count; i++) {
639  s->picture[i].f.extended_data = s->picture[i].f.data;
640  s->picture[i].period_since_free ++;
641  }
642 
643  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
644  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
645  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
646 
647  // Error/bug resilience
648  s->next_p_frame_damaged = s1->next_p_frame_damaged;
649  s->workaround_bugs = s1->workaround_bugs;
650  s->padding_bug_score = s1->padding_bug_score;
651 
652  // MPEG4 timing info
653  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
654  (char *) &s1->shape - (char *) &s1->time_increment_bits);
655 
656  // B-frame info
657  s->max_b_frames = s1->max_b_frames;
658  s->low_delay = s1->low_delay;
659  s->droppable = s1->droppable;
660 
661  // DivX handling (doesn't work)
662  s->divx_packed = s1->divx_packed;
663 
664  if (s1->bitstream_buffer) {
665  if (s1->bitstream_buffer_size +
669  s1->allocated_bitstream_buffer_size);
670  s->bitstream_buffer_size = s1->bitstream_buffer_size;
671  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
672  s1->bitstream_buffer_size);
673  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
675  }
676 
677  // linesize dependend scratch buffer allocation
678  if (!s->edge_emu_buffer)
679  if (s1->linesize) {
680  if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
681  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
682  "scratch buffers.\n");
683  return AVERROR(ENOMEM);
684  }
685  } else {
686  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
687  "be allocated due to unknown size.\n");
688  }
689 
690  // MPEG2/interlacing info
691  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
692  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
693 
694  if (!s1->first_field) {
695  s->last_pict_type = s1->pict_type;
696  if (s1->current_picture_ptr)
697  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
698 
699  if (s1->pict_type != AV_PICTURE_TYPE_B) {
700  s->last_non_b_pict_type = s1->pict_type;
701  }
702  }
703 
704  return 0;
705 }
706 
714 {
715  s->y_dc_scale_table =
718  s->progressive_frame = 1;
719  s->progressive_sequence = 1;
721 
722  s->coded_picture_number = 0;
723  s->picture_number = 0;
724  s->input_picture_number = 0;
725 
726  s->picture_in_gop_number = 0;
727 
728  s->f_code = 1;
729  s->b_code = 1;
730 
731  s->picture_range_start = 0;
733 
734  s->slice_context_count = 1;
735 }
736 
743 {
745 }
746 
747 static int init_er(MpegEncContext *s)
748 {
749  ERContext *er = &s->er;
750  int mb_array_size = s->mb_height * s->mb_stride;
751  int i;
752 
753  er->avctx = s->avctx;
754  er->dsp = &s->dsp;
755 
756  er->mb_index2xy = s->mb_index2xy;
757  er->mb_num = s->mb_num;
758  er->mb_width = s->mb_width;
759  er->mb_height = s->mb_height;
760  er->mb_stride = s->mb_stride;
761  er->b8_stride = s->b8_stride;
762 
764  er->error_status_table = av_mallocz(mb_array_size);
765  if (!er->er_temp_buffer || !er->error_status_table)
766  goto fail;
767 
768  er->mbskip_table = s->mbskip_table;
769  er->mbintra_table = s->mbintra_table;
770 
771  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
772  er->dc_val[i] = s->dc_val[i];
773 
775  er->opaque = s;
776 
777  return 0;
778 fail:
779  av_freep(&er->er_temp_buffer);
781  return AVERROR(ENOMEM);
782 }
783 
788 {
789  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
790 
791  s->mb_width = (s->width + 15) / 16;
792  s->mb_stride = s->mb_width + 1;
793  s->b8_stride = s->mb_width * 2 + 1;
794  s->b4_stride = s->mb_width * 4 + 1;
795  mb_array_size = s->mb_height * s->mb_stride;
796  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
797 
798  /* set default edge pos, will be overriden
799  * in decode_header if needed */
800  s->h_edge_pos = s->mb_width * 16;
801  s->v_edge_pos = s->mb_height * 16;
802 
803  s->mb_num = s->mb_width * s->mb_height;
804 
805  s->block_wrap[0] =
806  s->block_wrap[1] =
807  s->block_wrap[2] =
808  s->block_wrap[3] = s->b8_stride;
809  s->block_wrap[4] =
810  s->block_wrap[5] = s->mb_stride;
811 
812  y_size = s->b8_stride * (2 * s->mb_height + 1);
813  c_size = s->mb_stride * (s->mb_height + 1);
814  yc_size = y_size + 2 * c_size;
815 
816  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
817  for (y = 0; y < s->mb_height; y++)
818  for (x = 0; x < s->mb_width; x++)
819  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
820 
821  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
822 
823  if (s->encoding) {
824  /* Allocate MV tables */
825  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
826  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
827  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
828  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
829  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
830  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
831  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
837 
838  /* Allocate MB type table */
839  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
840 
841  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
842 
844  mb_array_size * sizeof(float), fail);
846  mb_array_size * sizeof(float), fail);
847 
848  }
849 
850  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
852  /* interlaced direct mode decoding tables */
853  for (i = 0; i < 2; i++) {
854  int j, k;
855  for (j = 0; j < 2; j++) {
856  for (k = 0; k < 2; k++) {
858  s->b_field_mv_table_base[i][j][k],
859  mv_table_size * 2 * sizeof(int16_t),
860  fail);
861  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
862  s->mb_stride + 1;
863  }
864  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
865  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
866  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
867  }
868  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
869  }
870  }
871  if (s->out_format == FMT_H263) {
872  /* cbp values */
873  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
874  s->coded_block = s->coded_block_base + s->b8_stride + 1;
875 
876  /* cbp, ac_pred, pred_dir */
877  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
878  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
879  }
880 
881  if (s->h263_pred || s->h263_plus || !s->encoding) {
882  /* dc values */
883  // MN: we need these for error resilience of intra-frames
884  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
885  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
886  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
887  s->dc_val[2] = s->dc_val[1] + c_size;
888  for (i = 0; i < yc_size; i++)
889  s->dc_val_base[i] = 1024;
890  }
891 
892  /* which mb is a intra block */
893  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
894  memset(s->mbintra_table, 1, mb_array_size);
895 
896  /* init macroblock skip table */
897  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
898  // Note the + 1 is for a quicker mpeg4 slice_end detection
899 
900  return init_er(s);
901 fail:
902  return AVERROR(ENOMEM);
903 }
904 
910 {
911  int i;
912  int nb_slices = (HAVE_THREADS &&
914  s->avctx->thread_count : 1;
915 
916  if (s->encoding && s->avctx->slices)
917  nb_slices = s->avctx->slices;
918 
920  s->mb_height = (s->height + 31) / 32 * 2;
921  else if (s->codec_id != AV_CODEC_ID_H264)
922  s->mb_height = (s->height + 15) / 16;
923 
924  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
926  "decoding to AV_PIX_FMT_NONE is not supported.\n");
927  return -1;
928  }
929 
930  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
931  int max_slices;
932  if (s->mb_height)
933  max_slices = FFMIN(MAX_THREADS, s->mb_height);
934  else
935  max_slices = MAX_THREADS;
936  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
937  " reducing to %d\n", nb_slices, max_slices);
938  nb_slices = max_slices;
939  }
940 
941  if ((s->width || s->height) &&
942  av_image_check_size(s->width, s->height, 0, s->avctx))
943  return -1;
944 
946 
947  s->flags = s->avctx->flags;
948  s->flags2 = s->avctx->flags2;
949 
950  /* set chroma shifts */
952 
953  /* convert fourcc to upper case */
956 
958 
959  if (s->encoding) {
960  if (s->msmpeg4_version) {
962  2 * 2 * (MAX_LEVEL + 1) *
963  (MAX_RUN + 1) * 2 * sizeof(int), fail);
964  }
965  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
966 
967  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
968  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
969  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
970  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
971  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
972  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
975 
976  if (s->avctx->noise_reduction) {
977  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
978  }
979  }
980 
983  s->picture_count * sizeof(Picture), fail);
984  for (i = 0; i < s->picture_count; i++) {
986  }
987 
988  if (init_context_frame(s))
989  goto fail;
990 
991  s->parse_context.state = -1;
992 
993  s->context_initialized = 1;
994  s->thread_context[0] = s;
995 
996 // if (s->width && s->height) {
997  if (nb_slices > 1) {
998  for (i = 1; i < nb_slices; i++) {
999  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1000  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1001  }
1002 
1003  for (i = 0; i < nb_slices; i++) {
1004  if (init_duplicate_context(s->thread_context[i]) < 0)
1005  goto fail;
1006  s->thread_context[i]->start_mb_y =
1007  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1008  s->thread_context[i]->end_mb_y =
1009  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1010  }
1011  } else {
1012  if (init_duplicate_context(s) < 0)
1013  goto fail;
1014  s->start_mb_y = 0;
1015  s->end_mb_y = s->mb_height;
1016  }
1017  s->slice_context_count = nb_slices;
1018 // }
1019 
1020  return 0;
1021  fail:
1022  ff_MPV_common_end(s);
1023  return -1;
1024 }
1025 
1032 {
1033  int i, j, k;
1034 
1035  av_freep(&s->mb_type);
1042  s->p_mv_table = NULL;
1043  s->b_forw_mv_table = NULL;
1044  s->b_back_mv_table = NULL;
1047  s->b_direct_mv_table = NULL;
1048  for (i = 0; i < 2; i++) {
1049  for (j = 0; j < 2; j++) {
1050  for (k = 0; k < 2; k++) {
1051  av_freep(&s->b_field_mv_table_base[i][j][k]);
1052  s->b_field_mv_table[i][j][k] = NULL;
1053  }
1054  av_freep(&s->b_field_select_table[i][j]);
1055  av_freep(&s->p_field_mv_table_base[i][j]);
1056  s->p_field_mv_table[i][j] = NULL;
1057  }
1059  }
1060 
1061  av_freep(&s->dc_val_base);
1063  av_freep(&s->mbintra_table);
1064  av_freep(&s->cbp_table);
1065  av_freep(&s->pred_dir_table);
1066 
1067  av_freep(&s->mbskip_table);
1068 
1070  av_freep(&s->er.er_temp_buffer);
1071  av_freep(&s->mb_index2xy);
1072  av_freep(&s->lambda_table);
1073 
1074  av_freep(&s->cplx_tab);
1075  av_freep(&s->bits_tab);
1076 
1077  s->linesize = s->uvlinesize = 0;
1078 
1079  for (i = 0; i < 3; i++)
1081 
1082  return 0;
1083 }
1084 
1086 {
1087  int i, err = 0;
1088 
1089  if (!s->context_initialized)
1090  return AVERROR(EINVAL);
1091 
1092  if (s->slice_context_count > 1) {
1093  for (i = 0; i < s->slice_context_count; i++) {
1095  }
1096  for (i = 1; i < s->slice_context_count; i++) {
1097  av_freep(&s->thread_context[i]);
1098  }
1099  } else
1101 
1102  free_context_frame(s);
1103 
1104  if (s->picture)
1105  for (i = 0; i < s->picture_count; i++) {
1106  s->picture[i].needs_realloc = 1;
1107  }
1108 
1109  s->last_picture_ptr =
1110  s->next_picture_ptr =
1112 
1113  // init
1115  s->mb_height = (s->height + 31) / 32 * 2;
1116  else if (s->codec_id != AV_CODEC_ID_H264)
1117  s->mb_height = (s->height + 15) / 16;
1118 
1119  if ((s->width || s->height) &&
1120  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1121  goto fail;
1122 
1123  if ((err = init_context_frame(s)))
1124  goto fail;
1125 
1126  s->thread_context[0] = s;
1127 
1128  if (s->width && s->height) {
1129  int nb_slices = s->slice_context_count;
1130  if (nb_slices > 1) {
1131  for (i = 1; i < nb_slices; i++) {
1132  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1133  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1134  }
1135 
1136  for (i = 0; i < nb_slices; i++) {
1137  if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1138  goto fail;
1139  s->thread_context[i]->start_mb_y =
1140  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1141  s->thread_context[i]->end_mb_y =
1142  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1143  }
1144  } else {
1145  if (init_duplicate_context(s) < 0)
1146  goto fail;
1147  s->start_mb_y = 0;
1148  s->end_mb_y = s->mb_height;
1149  }
1150  s->slice_context_count = nb_slices;
1151  }
1152 
1153  return 0;
1154  fail:
1155  ff_MPV_common_end(s);
1156  return err;
1157 }
1158 
1159 /* init common structure for both encoder and decoder */
1161 {
1162  int i;
1163 
1164  if (s->slice_context_count > 1) {
1165  for (i = 0; i < s->slice_context_count; i++) {
1167  }
1168  for (i = 1; i < s->slice_context_count; i++) {
1169  av_freep(&s->thread_context[i]);
1170  }
1171  s->slice_context_count = 1;
1172  } else free_duplicate_context(s);
1173 
1175  s->parse_context.buffer_size = 0;
1176 
1179 
1180  av_freep(&s->avctx->stats_out);
1181  av_freep(&s->ac_stats);
1182 
1187  av_freep(&s->q_intra_matrix);
1188  av_freep(&s->q_inter_matrix);
1191  av_freep(&s->input_picture);
1193  av_freep(&s->dct_offset);
1194 
1195  if (s->picture && !s->avctx->internal->is_copy) {
1196  for (i = 0; i < s->picture_count; i++) {
1197  free_picture(s, &s->picture[i]);
1198  }
1199  }
1200  av_freep(&s->picture);
1201 
1202  free_context_frame(s);
1203 
1206 
1207  s->context_initialized = 0;
1208  s->last_picture_ptr =
1209  s->next_picture_ptr =
1211  s->linesize = s->uvlinesize = 0;
1212 }
1213 
1215  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1216 {
1217  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1218  uint8_t index_run[MAX_RUN + 1];
1219  int last, run, level, start, end, i;
1220 
1221  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1222  if (static_store && rl->max_level[0])
1223  return;
1224 
1225  /* compute max_level[], max_run[] and index_run[] */
1226  for (last = 0; last < 2; last++) {
1227  if (last == 0) {
1228  start = 0;
1229  end = rl->last;
1230  } else {
1231  start = rl->last;
1232  end = rl->n;
1233  }
1234 
1235  memset(max_level, 0, MAX_RUN + 1);
1236  memset(max_run, 0, MAX_LEVEL + 1);
1237  memset(index_run, rl->n, MAX_RUN + 1);
1238  for (i = start; i < end; i++) {
1239  run = rl->table_run[i];
1240  level = rl->table_level[i];
1241  if (index_run[run] == rl->n)
1242  index_run[run] = i;
1243  if (level > max_level[run])
1244  max_level[run] = level;
1245  if (run > max_run[level])
1246  max_run[level] = run;
1247  }
1248  if (static_store)
1249  rl->max_level[last] = static_store[last];
1250  else
1251  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1252  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1253  if (static_store)
1254  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1255  else
1256  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1257  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1258  if (static_store)
1259  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1260  else
1261  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1262  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1263  }
1264 }
1265 
1267 {
1268  int i, q;
1269 
1270  for (q = 0; q < 32; q++) {
1271  int qmul = q * 2;
1272  int qadd = (q - 1) | 1;
1273 
1274  if (q == 0) {
1275  qmul = 1;
1276  qadd = 0;
1277  }
1278  for (i = 0; i < rl->vlc.table_size; i++) {
1279  int code = rl->vlc.table[i][0];
1280  int len = rl->vlc.table[i][1];
1281  int level, run;
1282 
1283  if (len == 0) { // illegal code
1284  run = 66;
1285  level = MAX_LEVEL;
1286  } else if (len < 0) { // more bits needed
1287  run = 0;
1288  level = code;
1289  } else {
1290  if (code == rl->n) { // esc
1291  run = 66;
1292  level = 0;
1293  } else {
1294  run = rl->table_run[code] + 1;
1295  level = rl->table_level[code] * qmul + qadd;
1296  if (code >= rl->last) run += 192;
1297  }
1298  }
1299  rl->rl_vlc[q][i].len = len;
1300  rl->rl_vlc[q][i].level = level;
1301  rl->rl_vlc[q][i].run = run;
1302  }
1303  }
1304 }
1305 
1306 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1307 {
1308  int i;
1309 
1310  /* release non reference frames */
1311  for (i = 0; i < s->picture_count; i++) {
1312  if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1313  (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1314  (remove_current || &s->picture[i] != s->current_picture_ptr)
1315  /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1316  free_frame_buffer(s, &s->picture[i]);
1317  }
1318  }
1319 }
1320 
1321 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1322 {
1324  && pic->f.qscale_table //check if the frame has anything allocated
1325  && pic->period_since_free < s->avctx->thread_count)
1326  return 0;
1327  if (pic->f.data[0] == NULL)
1328  return 1;
1329  if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1330  if (!pic->owner2 || pic->owner2 == s)
1331  return 1;
1332  return 0;
1333 }
1334 
1335 static int find_unused_picture(MpegEncContext *s, int shared)
1336 {
1337  int i;
1338 
1339  if (shared) {
1340  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1341  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1342  return i;
1343  }
1344  } else {
1345  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1346  if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1347  return i; // FIXME
1348  }
1349  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1350  if (pic_is_unused(s, &s->picture[i]))
1351  return i;
1352  }
1353  }
1354 
1356  "Internal error, picture buffer overflow\n");
1357  /* We could return -1, but the codec would crash trying to draw into a
1358  * non-existing frame anyway. This is safer than waiting for a random crash.
1359  * Also the return of this is never useful, an encoder must only allocate
1360  * as much as allowed in the specification. This has no relationship to how
1361  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1362  * enough for such valid streams).
1363  * Plus, a decoder has to check stream validity and remove frames if too
1364  * many reference frames are around. Waiting for "OOM" is not correct at
1365  * all. Similarly, missing reference frames have to be replaced by
1366  * interpolated/MC frames, anything else is a bug in the codec ...
1367  */
1368  abort();
1369  return -1;
1370 }
1371 
1373 {
1374  int ret = find_unused_picture(s, shared);
1375 
1376  if (ret >= 0 && ret < s->picture_range_end) {
1377  if (s->picture[ret].needs_realloc) {
1378  s->picture[ret].needs_realloc = 0;
1379  free_picture(s, &s->picture[ret]);
1381  }
1382  }
1383  return ret;
1384 }
1385 
1387 {
1388  int intra, i;
1389 
1390  for (intra = 0; intra < 2; intra++) {
1391  if (s->dct_count[intra] > (1 << 16)) {
1392  for (i = 0; i < 64; i++) {
1393  s->dct_error_sum[intra][i] >>= 1;
1394  }
1395  s->dct_count[intra] >>= 1;
1396  }
1397 
1398  for (i = 0; i < 64; i++) {
1399  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1400  s->dct_count[intra] +
1401  s->dct_error_sum[intra][i] / 2) /
1402  (s->dct_error_sum[intra][i] + 1);
1403  }
1404  }
1405 }
1406 
1412 {
1413  int i;
1414  Picture *pic;
1415  s->mb_skipped = 0;
1416 
1417  if (!ff_thread_can_start_frame(avctx)) {
1418  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1419  return -1;
1420  }
1421 
1422  /* mark & release old frames */
1423  if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1424  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1426  s->last_picture_ptr->f.data[0]) {
1427  if (s->last_picture_ptr->owner2 == s)
1429  }
1430 
1431  /* release forgotten pictures */
1432  /* if (mpeg124/h263) */
1433  if (!s->encoding) {
1434  for (i = 0; i < s->picture_count; i++) {
1435  if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1436  &s->picture[i] != s->last_picture_ptr &&
1437  &s->picture[i] != s->next_picture_ptr &&
1438  s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1439  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1440  av_log(avctx, AV_LOG_ERROR,
1441  "releasing zombie picture\n");
1442  free_frame_buffer(s, &s->picture[i]);
1443  }
1444  }
1445  }
1446  }
1447 
1448  if (!s->encoding) {
1450 
1451  if (s->current_picture_ptr &&
1452  s->current_picture_ptr->f.data[0] == NULL) {
1453  // we already have a unused image
1454  // (maybe it was set before reading the header)
1455  pic = s->current_picture_ptr;
1456  } else {
1457  i = ff_find_unused_picture(s, 0);
1458  if (i < 0) {
1459  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1460  return i;
1461  }
1462  pic = &s->picture[i];
1463  }
1464 
1465  pic->f.reference = 0;
1466  if (!s->droppable) {
1467  if (s->codec_id == AV_CODEC_ID_H264)
1468  pic->f.reference = s->picture_structure;
1469  else if (s->pict_type != AV_PICTURE_TYPE_B)
1470  pic->f.reference = 3;
1471  }
1472 
1474 
1475  if (ff_alloc_picture(s, pic, 0) < 0)
1476  return -1;
1477 
1478  s->current_picture_ptr = pic;
1479  // FIXME use only the vars from current_pic
1481  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1483  if (s->picture_structure != PICT_FRAME)
1486  }
1490  }
1491 
1493  // if (s->flags && CODEC_FLAG_QSCALE)
1494  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1496 
1498 
1499  if (s->pict_type != AV_PICTURE_TYPE_B) {
1501  if (!s->droppable)
1503  }
1504  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1509  s->pict_type, s->droppable);
1510 
1511  if (s->codec_id != AV_CODEC_ID_H264) {
1512  if ((s->last_picture_ptr == NULL ||
1513  s->last_picture_ptr->f.data[0] == NULL) &&
1514  (s->pict_type != AV_PICTURE_TYPE_I ||
1515  s->picture_structure != PICT_FRAME)) {
1516  int h_chroma_shift, v_chroma_shift;
1518  &h_chroma_shift, &v_chroma_shift);
1519  if (s->pict_type != AV_PICTURE_TYPE_I)
1520  av_log(avctx, AV_LOG_ERROR,
1521  "warning: first frame is no keyframe\n");
1522  else if (s->picture_structure != PICT_FRAME)
1523  av_log(avctx, AV_LOG_INFO,
1524  "allocate dummy last picture for field based first keyframe\n");
1525 
1526  /* Allocate a dummy frame */
1527  i = ff_find_unused_picture(s, 0);
1528  if (i < 0) {
1529  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1530  return i;
1531  }
1532  s->last_picture_ptr = &s->picture[i];
1533  s->last_picture_ptr->f.key_frame = 0;
1534  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1535  s->last_picture_ptr = NULL;
1536  return -1;
1537  }
1538 
1539  memset(s->last_picture_ptr->f.data[0], 0x80,
1540  avctx->height * s->last_picture_ptr->f.linesize[0]);
1541  memset(s->last_picture_ptr->f.data[1], 0x80,
1542  (avctx->height >> v_chroma_shift) *
1543  s->last_picture_ptr->f.linesize[1]);
1544  memset(s->last_picture_ptr->f.data[2], 0x80,
1545  (avctx->height >> v_chroma_shift) *
1546  s->last_picture_ptr->f.linesize[2]);
1547 
1549  for(i=0; i<avctx->height; i++)
1550  memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1551  }
1552 
1553  ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1554  ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1555  s->last_picture_ptr->f.reference = 3;
1556  }
1557  if ((s->next_picture_ptr == NULL ||
1558  s->next_picture_ptr->f.data[0] == NULL) &&
1559  s->pict_type == AV_PICTURE_TYPE_B) {
1560  /* Allocate a dummy frame */
1561  i = ff_find_unused_picture(s, 0);
1562  if (i < 0) {
1563  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1564  return i;
1565  }
1566  s->next_picture_ptr = &s->picture[i];
1567  s->next_picture_ptr->f.key_frame = 0;
1568  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1569  s->next_picture_ptr = NULL;
1570  return -1;
1571  }
1572  ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1573  ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1574  s->next_picture_ptr->f.reference = 3;
1575  }
1576  }
1577 
1578  memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1579  memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1580  if (s->last_picture_ptr)
1582  if (s->next_picture_ptr)
1584 
1585  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1586  if (s->next_picture_ptr)
1587  s->next_picture_ptr->owner2 = s;
1588  if (s->last_picture_ptr)
1589  s->last_picture_ptr->owner2 = s;
1590  }
1591 
1592  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1593  s->last_picture_ptr->f.data[0]));
1594 
1595  if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1596  int i;
1597  for (i = 0; i < 4; i++) {
1599  s->current_picture.f.data[i] +=
1600  s->current_picture.f.linesize[i];
1601  }
1602  s->current_picture.f.linesize[i] *= 2;
1603  s->last_picture.f.linesize[i] *= 2;
1604  s->next_picture.f.linesize[i] *= 2;
1605  }
1606  }
1607 
1608  s->err_recognition = avctx->err_recognition;
1609 
1610  /* set dequantizer, we can't do it during init as
1611  * it might change for mpeg4 and we can't do it in the header
1612  * decode as init is not called for mpeg4 there yet */
1613  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1616  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1619  } else {
1622  }
1623 
1624  if (s->dct_error_sum) {
1625  assert(s->avctx->noise_reduction && s->encoding);
1627  }
1628 
1630  return ff_xvmc_field_start(s, avctx);
1631 
1632  return 0;
1633 }
1634 
1635 /* generic function for encode/decode called after a
1636  * frame has been coded/decoded. */
1638 {
1639  int i;
1640  /* redraw edges for the frame if decoding didn't complete */
1641  // just to make sure that all data is rendered.
1643  ff_xvmc_field_end(s);
1644  } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1645  !s->avctx->hwaccel &&
1647  s->unrestricted_mv &&
1649  !s->intra_only &&
1650  !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1651  !s->avctx->lowres
1652  ) {
1654  int hshift = desc->log2_chroma_w;
1655  int vshift = desc->log2_chroma_h;
1657  s->h_edge_pos, s->v_edge_pos,
1659  EDGE_TOP | EDGE_BOTTOM);
1661  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1662  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1663  EDGE_TOP | EDGE_BOTTOM);
1665  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1666  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1667  EDGE_TOP | EDGE_BOTTOM);
1668  }
1669 
1670  emms_c();
1671 
1672  s->last_pict_type = s->pict_type;
1674  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1676  }
1677 #if 0
1678  /* copy back current_picture variables */
1679  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1680  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1681  s->picture[i] = s->current_picture;
1682  break;
1683  }
1684  }
1685  assert(i < MAX_PICTURE_COUNT);
1686 #endif
1687 
1688  if (s->encoding) {
1689  /* release non-reference frames */
1690  for (i = 0; i < s->picture_count; i++) {
1691  if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1692  /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1693  free_frame_buffer(s, &s->picture[i]);
1694  }
1695  }
1696  }
1697  // clear copies, to avoid confusion
1698 #if 0
1699  memset(&s->last_picture, 0, sizeof(Picture));
1700  memset(&s->next_picture, 0, sizeof(Picture));
1701  memset(&s->current_picture, 0, sizeof(Picture));
1702 #endif
1704 
1707  }
1708 }
1709 
1717 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1718  int w, int h, int stride, int color)
1719 {
1720  int x, y, fr, f;
1721 
1722  sx = av_clip(sx, 0, w - 1);
1723  sy = av_clip(sy, 0, h - 1);
1724  ex = av_clip(ex, 0, w - 1);
1725  ey = av_clip(ey, 0, h - 1);
1726 
1727  buf[sy * stride + sx] += color;
1728 
1729  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1730  if (sx > ex) {
1731  FFSWAP(int, sx, ex);
1732  FFSWAP(int, sy, ey);
1733  }
1734  buf += sx + sy * stride;
1735  ex -= sx;
1736  f = ((ey - sy) << 16) / ex;
1737  for (x = 0; x <= ex; x++) {
1738  y = (x * f) >> 16;
1739  fr = (x * f) & 0xFFFF;
1740  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1741  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1742  }
1743  } else {
1744  if (sy > ey) {
1745  FFSWAP(int, sx, ex);
1746  FFSWAP(int, sy, ey);
1747  }
1748  buf += sx + sy * stride;
1749  ey -= sy;
1750  if (ey)
1751  f = ((ex - sx) << 16) / ey;
1752  else
1753  f = 0;
1754  for(y= 0; y <= ey; y++){
1755  x = (y*f) >> 16;
1756  fr = (y*f) & 0xFFFF;
1757  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1758  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1759  }
1760  }
1761 }
1762 
1770 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1771  int ey, int w, int h, int stride, int color)
1772 {
1773  int dx,dy;
1774 
1775  sx = av_clip(sx, -100, w + 100);
1776  sy = av_clip(sy, -100, h + 100);
1777  ex = av_clip(ex, -100, w + 100);
1778  ey = av_clip(ey, -100, h + 100);
1779 
1780  dx = ex - sx;
1781  dy = ey - sy;
1782 
1783  if (dx * dx + dy * dy > 3 * 3) {
1784  int rx = dx + dy;
1785  int ry = -dx + dy;
1786  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1787 
1788  // FIXME subpixel accuracy
1789  rx = ROUNDED_DIV(rx * 3 << 4, length);
1790  ry = ROUNDED_DIV(ry * 3 << 4, length);
1791 
1792  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1793  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1794  }
1795  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1796 }
1797 
1801 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
1802  uint8_t *visualization_buffer[3], int *low_delay,
1803  int mb_width, int mb_height, int mb_stride, int quarter_sample)
1804 {
1805  if ( avctx->hwaccel || !pict || !pict->mb_type
1807  return;
1808 
1809 
1810  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1811  int x,y;
1812 
1813  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1815  for (y = 0; y < mb_height; y++) {
1816  for (x = 0; x < mb_width; x++) {
1817  if (avctx->debug & FF_DEBUG_SKIP) {
1818  int count = mbskip_table[x + y * mb_stride];
1819  if (count > 9)
1820  count = 9;
1821  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1822  }
1823  if (avctx->debug & FF_DEBUG_QP) {
1824  av_log(avctx, AV_LOG_DEBUG, "%2d",
1825  pict->qscale_table[x + y * mb_stride]);
1826  }
1827  if (avctx->debug & FF_DEBUG_MB_TYPE) {
1828  int mb_type = pict->mb_type[x + y * mb_stride];
1829  // Type & MV direction
1830  if (IS_PCM(mb_type))
1831  av_log(avctx, AV_LOG_DEBUG, "P");
1832  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1833  av_log(avctx, AV_LOG_DEBUG, "A");
1834  else if (IS_INTRA4x4(mb_type))
1835  av_log(avctx, AV_LOG_DEBUG, "i");
1836  else if (IS_INTRA16x16(mb_type))
1837  av_log(avctx, AV_LOG_DEBUG, "I");
1838  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1839  av_log(avctx, AV_LOG_DEBUG, "d");
1840  else if (IS_DIRECT(mb_type))
1841  av_log(avctx, AV_LOG_DEBUG, "D");
1842  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1843  av_log(avctx, AV_LOG_DEBUG, "g");
1844  else if (IS_GMC(mb_type))
1845  av_log(avctx, AV_LOG_DEBUG, "G");
1846  else if (IS_SKIP(mb_type))
1847  av_log(avctx, AV_LOG_DEBUG, "S");
1848  else if (!USES_LIST(mb_type, 1))
1849  av_log(avctx, AV_LOG_DEBUG, ">");
1850  else if (!USES_LIST(mb_type, 0))
1851  av_log(avctx, AV_LOG_DEBUG, "<");
1852  else {
1853  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1854  av_log(avctx, AV_LOG_DEBUG, "X");
1855  }
1856 
1857  // segmentation
1858  if (IS_8X8(mb_type))
1859  av_log(avctx, AV_LOG_DEBUG, "+");
1860  else if (IS_16X8(mb_type))
1861  av_log(avctx, AV_LOG_DEBUG, "-");
1862  else if (IS_8X16(mb_type))
1863  av_log(avctx, AV_LOG_DEBUG, "|");
1864  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1865  av_log(avctx, AV_LOG_DEBUG, " ");
1866  else
1867  av_log(avctx, AV_LOG_DEBUG, "?");
1868 
1869 
1870  if (IS_INTERLACED(mb_type))
1871  av_log(avctx, AV_LOG_DEBUG, "=");
1872  else
1873  av_log(avctx, AV_LOG_DEBUG, " ");
1874  }
1875  }
1876  av_log(avctx, AV_LOG_DEBUG, "\n");
1877  }
1878  }
1879 
1880  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1881  (avctx->debug_mv)) {
1882  const int shift = 1 + quarter_sample;
1883  int mb_y;
1884  uint8_t *ptr;
1885  int i;
1886  int h_chroma_shift, v_chroma_shift, block_height;
1887  const int width = avctx->width;
1888  const int height = avctx->height;
1889  const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1890  const int mv_stride = (mb_width << mv_sample_log2) +
1891  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1892  *low_delay = 0; // needed to see the vectors without trashing the buffers
1893 
1894  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1895 
1896  for (i = 0; i < 3; i++) {
1897  size_t size= (i == 0) ? pict->linesize[i] * FFALIGN(height, 16):
1898  pict->linesize[i] * FFALIGN(height, 16) >> v_chroma_shift;
1899  visualization_buffer[i]= av_realloc(visualization_buffer[i], size);
1900  memcpy(visualization_buffer[i], pict->data[i], size);
1901  pict->data[i] = visualization_buffer[i];
1902  }
1903  pict->type = FF_BUFFER_TYPE_COPY;
1904  pict->opaque= NULL;
1905  ptr = pict->data[0];
1906  block_height = 16 >> v_chroma_shift;
1907 
1908  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1909  int mb_x;
1910  for (mb_x = 0; mb_x < mb_width; mb_x++) {
1911  const int mb_index = mb_x + mb_y * mb_stride;
1912  if ((avctx->debug_mv) && pict->motion_val[0]) {
1913  int type;
1914  for (type = 0; type < 3; type++) {
1915  int direction = 0;
1916  switch (type) {
1917  case 0:
1918  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1919  (pict->pict_type!= AV_PICTURE_TYPE_P))
1920  continue;
1921  direction = 0;
1922  break;
1923  case 1:
1924  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1925  (pict->pict_type!= AV_PICTURE_TYPE_B))
1926  continue;
1927  direction = 0;
1928  break;
1929  case 2:
1930  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1931  (pict->pict_type!= AV_PICTURE_TYPE_B))
1932  continue;
1933  direction = 1;
1934  break;
1935  }
1936  if (!USES_LIST(pict->mb_type[mb_index], direction))
1937  continue;
1938 
1939  if (IS_8X8(pict->mb_type[mb_index])) {
1940  int i;
1941  for (i = 0; i < 4; i++) {
1942  int sx = mb_x * 16 + 4 + 8 * (i & 1);
1943  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1944  int xy = (mb_x * 2 + (i & 1) +
1945  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1946  int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1947  int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1948  draw_arrow(ptr, sx, sy, mx, my, width,
1949  height, pict->linesize[0], 100);
1950  }
1951  } else if (IS_16X8(pict->mb_type[mb_index])) {
1952  int i;
1953  for (i = 0; i < 2; i++) {
1954  int sx = mb_x * 16 + 8;
1955  int sy = mb_y * 16 + 4 + 8 * i;
1956  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1957  int mx = (pict->motion_val[direction][xy][0] >> shift);
1958  int my = (pict->motion_val[direction][xy][1] >> shift);
1959 
1960  if (IS_INTERLACED(pict->mb_type[mb_index]))
1961  my *= 2;
1962 
1963  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1964  height, pict->linesize[0], 100);
1965  }
1966  } else if (IS_8X16(pict->mb_type[mb_index])) {
1967  int i;
1968  for (i = 0; i < 2; i++) {
1969  int sx = mb_x * 16 + 4 + 8 * i;
1970  int sy = mb_y * 16 + 8;
1971  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1972  int mx = pict->motion_val[direction][xy][0] >> shift;
1973  int my = pict->motion_val[direction][xy][1] >> shift;
1974 
1975  if (IS_INTERLACED(pict->mb_type[mb_index]))
1976  my *= 2;
1977 
1978  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1979  height, pict->linesize[0], 100);
1980  }
1981  } else {
1982  int sx= mb_x * 16 + 8;
1983  int sy= mb_y * 16 + 8;
1984  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
1985  int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1986  int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1987  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
1988  }
1989  }
1990  }
1991  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
1992  uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1993  0x0101010101010101ULL;
1994  int y;
1995  for (y = 0; y < block_height; y++) {
1996  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1997  (block_height * mb_y + y) *
1998  pict->linesize[1]) = c;
1999  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2000  (block_height * mb_y + y) *
2001  pict->linesize[2]) = c;
2002  }
2003  }
2004  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2005  pict->motion_val[0]) {
2006  int mb_type = pict->mb_type[mb_index];
2007  uint64_t u,v;
2008  int y;
2009 #define COLOR(theta, r) \
2010  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2011  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2012 
2013 
2014  u = v = 128;
2015  if (IS_PCM(mb_type)) {
2016  COLOR(120, 48)
2017  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2018  IS_INTRA16x16(mb_type)) {
2019  COLOR(30, 48)
2020  } else if (IS_INTRA4x4(mb_type)) {
2021  COLOR(90, 48)
2022  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2023  // COLOR(120, 48)
2024  } else if (IS_DIRECT(mb_type)) {
2025  COLOR(150, 48)
2026  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2027  COLOR(170, 48)
2028  } else if (IS_GMC(mb_type)) {
2029  COLOR(190, 48)
2030  } else if (IS_SKIP(mb_type)) {
2031  // COLOR(180, 48)
2032  } else if (!USES_LIST(mb_type, 1)) {
2033  COLOR(240, 48)
2034  } else if (!USES_LIST(mb_type, 0)) {
2035  COLOR(0, 48)
2036  } else {
2037  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2038  COLOR(300,48)
2039  }
2040 
2041  u *= 0x0101010101010101ULL;
2042  v *= 0x0101010101010101ULL;
2043  for (y = 0; y < block_height; y++) {
2044  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2045  (block_height * mb_y + y) * pict->linesize[1]) = u;
2046  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2047  (block_height * mb_y + y) * pict->linesize[2]) = v;
2048  }
2049 
2050  // segmentation
2051  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2052  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2053  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2054  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2055  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2056  }
2057  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2058  for (y = 0; y < 16; y++)
2059  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2060  pict->linesize[0]] ^= 0x80;
2061  }
2062  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2063  int dm = 1 << (mv_sample_log2 - 2);
2064  for (i = 0; i < 4; i++) {
2065  int sx = mb_x * 16 + 8 * (i & 1);
2066  int sy = mb_y * 16 + 8 * (i >> 1);
2067  int xy = (mb_x * 2 + (i & 1) +
2068  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2069  // FIXME bidir
2070  int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2071  if (mv[0] != mv[dm] ||
2072  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2073  for (y = 0; y < 8; y++)
2074  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2075  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2076  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2077  pict->linesize[0]) ^= 0x8080808080808080ULL;
2078  }
2079  }
2080 
2081  if (IS_INTERLACED(mb_type) &&
2082  avctx->codec->id == AV_CODEC_ID_H264) {
2083  // hmm
2084  }
2085  }
2086  mbskip_table[mb_index] = 0;
2087  }
2088  }
2089  }
2090 }
2091 
2093 {
2095  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2096 }
2097 
2098 static inline int hpel_motion_lowres(MpegEncContext *s,
2099  uint8_t *dest, uint8_t *src,
2100  int field_based, int field_select,
2101  int src_x, int src_y,
2102  int width, int height, int stride,
2103  int h_edge_pos, int v_edge_pos,
2104  int w, int h, h264_chroma_mc_func *pix_op,
2105  int motion_x, int motion_y)
2106 {
2107  const int lowres = s->avctx->lowres;
2108  const int op_index = FFMIN(lowres, 3);
2109  const int s_mask = (2 << lowres) - 1;
2110  int emu = 0;
2111  int sx, sy;
2112 
2113  if (s->quarter_sample) {
2114  motion_x /= 2;
2115  motion_y /= 2;
2116  }
2117 
2118  sx = motion_x & s_mask;
2119  sy = motion_y & s_mask;
2120  src_x += motion_x >> lowres + 1;
2121  src_y += motion_y >> lowres + 1;
2122 
2123  src += src_y * stride + src_x;
2124 
2125  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2126  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2127  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2128  (h + 1) << field_based, src_x,
2129  src_y << field_based,
2130  h_edge_pos,
2131  v_edge_pos);
2132  src = s->edge_emu_buffer;
2133  emu = 1;
2134  }
2135 
2136  sx = (sx << 2) >> lowres;
2137  sy = (sy << 2) >> lowres;
2138  if (field_select)
2139  src += s->linesize;
2140  pix_op[op_index](dest, src, stride, h, sx, sy);
2141  return emu;
2142 }
2143 
2144 /* apply one mpeg motion vector to the three components */
2146  uint8_t *dest_y,
2147  uint8_t *dest_cb,
2148  uint8_t *dest_cr,
2149  int field_based,
2150  int bottom_field,
2151  int field_select,
2152  uint8_t **ref_picture,
2153  h264_chroma_mc_func *pix_op,
2154  int motion_x, int motion_y,
2155  int h, int mb_y)
2156 {
2157  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2158  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2159  uvsx, uvsy;
2160  const int lowres = s->avctx->lowres;
2161  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2162  const int block_s = 8>>lowres;
2163  const int s_mask = (2 << lowres) - 1;
2164  const int h_edge_pos = s->h_edge_pos >> lowres;
2165  const int v_edge_pos = s->v_edge_pos >> lowres;
2166  linesize = s->current_picture.f.linesize[0] << field_based;
2167  uvlinesize = s->current_picture.f.linesize[1] << field_based;
2168 
2169  // FIXME obviously not perfect but qpel will not work in lowres anyway
2170  if (s->quarter_sample) {
2171  motion_x /= 2;
2172  motion_y /= 2;
2173  }
2174 
2175  if(field_based){
2176  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2177  }
2178 
2179  sx = motion_x & s_mask;
2180  sy = motion_y & s_mask;
2181  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2182  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2183 
2184  if (s->out_format == FMT_H263) {
2185  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2186  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2187  uvsrc_x = src_x >> 1;
2188  uvsrc_y = src_y >> 1;
2189  } else if (s->out_format == FMT_H261) {
2190  // even chroma mv's are full pel in H261
2191  mx = motion_x / 4;
2192  my = motion_y / 4;
2193  uvsx = (2 * mx) & s_mask;
2194  uvsy = (2 * my) & s_mask;
2195  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2196  uvsrc_y = mb_y * block_s + (my >> lowres);
2197  } else {
2198  if(s->chroma_y_shift){
2199  mx = motion_x / 2;
2200  my = motion_y / 2;
2201  uvsx = mx & s_mask;
2202  uvsy = my & s_mask;
2203  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2204  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2205  } else {
2206  if(s->chroma_x_shift){
2207  //Chroma422
2208  mx = motion_x / 2;
2209  uvsx = mx & s_mask;
2210  uvsy = motion_y & s_mask;
2211  uvsrc_y = src_y;
2212  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2213  } else {
2214  //Chroma444
2215  uvsx = motion_x & s_mask;
2216  uvsy = motion_y & s_mask;
2217  uvsrc_x = src_x;
2218  uvsrc_y = src_y;
2219  }
2220  }
2221  }
2222 
2223  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2224  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2225  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2226 
2227  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2228  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2229  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2230  linesize >> field_based, 17, 17 + field_based,
2231  src_x, src_y << field_based, h_edge_pos,
2232  v_edge_pos);
2233  ptr_y = s->edge_emu_buffer;
2234  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2235  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2236  s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2237  9 + field_based,
2238  uvsrc_x, uvsrc_y << field_based,
2239  h_edge_pos >> 1, v_edge_pos >> 1);
2240  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2241  9 + field_based,
2242  uvsrc_x, uvsrc_y << field_based,
2243  h_edge_pos >> 1, v_edge_pos >> 1);
2244  ptr_cb = uvbuf;
2245  ptr_cr = uvbuf + 16;
2246  }
2247  }
2248 
2249  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2250  if (bottom_field) {
2251  dest_y += s->linesize;
2252  dest_cb += s->uvlinesize;
2253  dest_cr += s->uvlinesize;
2254  }
2255 
2256  if (field_select) {
2257  ptr_y += s->linesize;
2258  ptr_cb += s->uvlinesize;
2259  ptr_cr += s->uvlinesize;
2260  }
2261 
2262  sx = (sx << 2) >> lowres;
2263  sy = (sy << 2) >> lowres;
2264  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2265 
2266  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2267  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2268  uvsx = (uvsx << 2) >> lowres;
2269  uvsy = (uvsy << 2) >> lowres;
2270  if (hc) {
2271  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2272  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2273  }
2274  }
2275  // FIXME h261 lowres loop filter
2276 }
2277 
2279  uint8_t *dest_cb, uint8_t *dest_cr,
2280  uint8_t **ref_picture,
2281  h264_chroma_mc_func * pix_op,
2282  int mx, int my)
2283 {
2284  const int lowres = s->avctx->lowres;
2285  const int op_index = FFMIN(lowres, 3);
2286  const int block_s = 8 >> lowres;
2287  const int s_mask = (2 << lowres) - 1;
2288  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2289  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2290  int emu = 0, src_x, src_y, offset, sx, sy;
2291  uint8_t *ptr;
2292 
2293  if (s->quarter_sample) {
2294  mx /= 2;
2295  my /= 2;
2296  }
2297 
2298  /* In case of 8X8, we construct a single chroma motion vector
2299  with a special rounding */
2300  mx = ff_h263_round_chroma(mx);
2301  my = ff_h263_round_chroma(my);
2302 
2303  sx = mx & s_mask;
2304  sy = my & s_mask;
2305  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2306  src_y = s->mb_y * block_s + (my >> lowres + 1);
2307 
2308  offset = src_y * s->uvlinesize + src_x;
2309  ptr = ref_picture[1] + offset;
2310  if (s->flags & CODEC_FLAG_EMU_EDGE) {
2311  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2312  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2314  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2315  ptr = s->edge_emu_buffer;
2316  emu = 1;
2317  }
2318  }
2319  sx = (sx << 2) >> lowres;
2320  sy = (sy << 2) >> lowres;
2321  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2322 
2323  ptr = ref_picture[2] + offset;
2324  if (emu) {
2325  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2326  src_x, src_y, h_edge_pos, v_edge_pos);
2327  ptr = s->edge_emu_buffer;
2328  }
2329  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2330 }
2331 
2343 static inline void MPV_motion_lowres(MpegEncContext *s,
2344  uint8_t *dest_y, uint8_t *dest_cb,
2345  uint8_t *dest_cr,
2346  int dir, uint8_t **ref_picture,
2347  h264_chroma_mc_func *pix_op)
2348 {
2349  int mx, my;
2350  int mb_x, mb_y, i;
2351  const int lowres = s->avctx->lowres;
2352  const int block_s = 8 >>lowres;
2353 
2354  mb_x = s->mb_x;
2355  mb_y = s->mb_y;
2356 
2357  switch (s->mv_type) {
2358  case MV_TYPE_16X16:
2359  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2360  0, 0, 0,
2361  ref_picture, pix_op,
2362  s->mv[dir][0][0], s->mv[dir][0][1],
2363  2 * block_s, mb_y);
2364  break;
2365  case MV_TYPE_8X8:
2366  mx = 0;
2367  my = 0;
2368  for (i = 0; i < 4; i++) {
2369  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2370  s->linesize) * block_s,
2371  ref_picture[0], 0, 0,
2372  (2 * mb_x + (i & 1)) * block_s,
2373  (2 * mb_y + (i >> 1)) * block_s,
2374  s->width, s->height, s->linesize,
2375  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2376  block_s, block_s, pix_op,
2377  s->mv[dir][i][0], s->mv[dir][i][1]);
2378 
2379  mx += s->mv[dir][i][0];
2380  my += s->mv[dir][i][1];
2381  }
2382 
2383  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2384  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2385  pix_op, mx, my);
2386  break;
2387  case MV_TYPE_FIELD:
2388  if (s->picture_structure == PICT_FRAME) {
2389  /* top field */
2390  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2391  1, 0, s->field_select[dir][0],
2392  ref_picture, pix_op,
2393  s->mv[dir][0][0], s->mv[dir][0][1],
2394  block_s, mb_y);
2395  /* bottom field */
2396  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2397  1, 1, s->field_select[dir][1],
2398  ref_picture, pix_op,
2399  s->mv[dir][1][0], s->mv[dir][1][1],
2400  block_s, mb_y);
2401  } else {
2402  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2403  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2404  ref_picture = s->current_picture_ptr->f.data;
2405 
2406  }
2407  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2408  0, 0, s->field_select[dir][0],
2409  ref_picture, pix_op,
2410  s->mv[dir][0][0],
2411  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2412  }
2413  break;
2414  case MV_TYPE_16X8:
2415  for (i = 0; i < 2; i++) {
2416  uint8_t **ref2picture;
2417 
2418  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2419  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2420  ref2picture = ref_picture;
2421  } else {
2422  ref2picture = s->current_picture_ptr->f.data;
2423  }
2424 
2425  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2426  0, 0, s->field_select[dir][i],
2427  ref2picture, pix_op,
2428  s->mv[dir][i][0], s->mv[dir][i][1] +
2429  2 * block_s * i, block_s, mb_y >> 1);
2430 
2431  dest_y += 2 * block_s * s->linesize;
2432  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2433  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2434  }
2435  break;
2436  case MV_TYPE_DMV:
2437  if (s->picture_structure == PICT_FRAME) {
2438  for (i = 0; i < 2; i++) {
2439  int j;
2440  for (j = 0; j < 2; j++) {
2441  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2442  1, j, j ^ i,
2443  ref_picture, pix_op,
2444  s->mv[dir][2 * i + j][0],
2445  s->mv[dir][2 * i + j][1],
2446  block_s, mb_y);
2447  }
2449  }
2450  } else {
2451  for (i = 0; i < 2; i++) {
2452  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2453  0, 0, s->picture_structure != i + 1,
2454  ref_picture, pix_op,
2455  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2456  2 * block_s, mb_y >> 1);
2457 
2458  // after put we make avg of the same block
2460 
2461  // opposite parity is always in the same
2462  // frame if this is second field
2463  if (!s->first_field) {
2464  ref_picture = s->current_picture_ptr->f.data;
2465  }
2466  }
2467  }
2468  break;
2469  default:
2470  av_assert2(0);
2471  }
2472 }
2473 
2478 {
2479  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2480  int my, off, i, mvs;
2481 
2482  if (s->picture_structure != PICT_FRAME || s->mcsel)
2483  goto unhandled;
2484 
2485  switch (s->mv_type) {
2486  case MV_TYPE_16X16:
2487  mvs = 1;
2488  break;
2489  case MV_TYPE_16X8:
2490  mvs = 2;
2491  break;
2492  case MV_TYPE_8X8:
2493  mvs = 4;
2494  break;
2495  default:
2496  goto unhandled;
2497  }
2498 
2499  for (i = 0; i < mvs; i++) {
2500  my = s->mv[dir][i][1]<<qpel_shift;
2501  my_max = FFMAX(my_max, my);
2502  my_min = FFMIN(my_min, my);
2503  }
2504 
2505  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2506 
2507  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2508 unhandled:
2509  return s->mb_height-1;
2510 }
2511 
2512 /* put block[] to dest[] */
2513 static inline void put_dct(MpegEncContext *s,
2514  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2515 {
2516  s->dct_unquantize_intra(s, block, i, qscale);
2517  s->dsp.idct_put (dest, line_size, block);
2518 }
2519 
2520 /* add block[] to dest[] */
2521 static inline void add_dct(MpegEncContext *s,
2522  int16_t *block, int i, uint8_t *dest, int line_size)
2523 {
2524  if (s->block_last_index[i] >= 0) {
2525  s->dsp.idct_add (dest, line_size, block);
2526  }
2527 }
2528 
2529 static inline void add_dequant_dct(MpegEncContext *s,
2530  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2531 {
2532  if (s->block_last_index[i] >= 0) {
2533  s->dct_unquantize_inter(s, block, i, qscale);
2534 
2535  s->dsp.idct_add (dest, line_size, block);
2536  }
2537 }
2538 
2543 {
2544  int wrap = s->b8_stride;
2545  int xy = s->block_index[0];
2546 
2547  s->dc_val[0][xy ] =
2548  s->dc_val[0][xy + 1 ] =
2549  s->dc_val[0][xy + wrap] =
2550  s->dc_val[0][xy + 1 + wrap] = 1024;
2551  /* ac pred */
2552  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2553  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2554  if (s->msmpeg4_version>=3) {
2555  s->coded_block[xy ] =
2556  s->coded_block[xy + 1 ] =
2557  s->coded_block[xy + wrap] =
2558  s->coded_block[xy + 1 + wrap] = 0;
2559  }
2560  /* chroma */
2561  wrap = s->mb_stride;
2562  xy = s->mb_x + s->mb_y * wrap;
2563  s->dc_val[1][xy] =
2564  s->dc_val[2][xy] = 1024;
2565  /* ac pred */
2566  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2567  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2568 
2569  s->mbintra_table[xy]= 0;
2570 }
2571 
2572 /* generic function called after a macroblock has been parsed by the
2573  decoder or after it has been encoded by the encoder.
2574 
2575  Important variables used:
2576  s->mb_intra : true if intra macroblock
2577  s->mv_dir : motion vector direction
2578  s->mv_type : motion vector type
2579  s->mv : motion vector
2580  s->interlaced_dct : true if interlaced dct used (mpeg2)
2581  */
2582 static av_always_inline
2584  int lowres_flag, int is_mpeg12)
2585 {
2586  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2588  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2589  return;
2590  }
2591 
2592  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2593  /* save DCT coefficients */
2594  int i,j;
2595  int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2596  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2597  for(i=0; i<6; i++){
2598  for(j=0; j<64; j++){
2599  *dct++ = block[i][s->dsp.idct_permutation[j]];
2600  av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2601  }
2602  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2603  }
2604  }
2605 
2606  s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2607 
2608  /* update DC predictors for P macroblocks */
2609  if (!s->mb_intra) {
2610  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2611  if(s->mbintra_table[mb_xy])
2613  } else {
2614  s->last_dc[0] =
2615  s->last_dc[1] =
2616  s->last_dc[2] = 128 << s->intra_dc_precision;
2617  }
2618  }
2619  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2620  s->mbintra_table[mb_xy]=1;
2621 
2622  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2623  uint8_t *dest_y, *dest_cb, *dest_cr;
2624  int dct_linesize, dct_offset;
2625  op_pixels_func (*op_pix)[4];
2626  qpel_mc_func (*op_qpix)[16];
2627  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2628  const int uvlinesize = s->current_picture.f.linesize[1];
2629  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2630  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2631 
2632  /* avoid copy if macroblock skipped in last frame too */
2633  /* skip only during decoding as we might trash the buffers during encoding a bit */
2634  if(!s->encoding){
2635  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2636 
2637  if (s->mb_skipped) {
2638  s->mb_skipped= 0;
2640  *mbskip_ptr = 1;
2641  } else if(!s->current_picture.f.reference) {
2642  *mbskip_ptr = 1;
2643  } else{
2644  *mbskip_ptr = 0; /* not skipped */
2645  }
2646  }
2647 
2648  dct_linesize = linesize << s->interlaced_dct;
2649  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2650 
2651  if(readable){
2652  dest_y= s->dest[0];
2653  dest_cb= s->dest[1];
2654  dest_cr= s->dest[2];
2655  }else{
2656  dest_y = s->b_scratchpad;
2657  dest_cb= s->b_scratchpad+16*linesize;
2658  dest_cr= s->b_scratchpad+32*linesize;
2659  }
2660 
2661  if (!s->mb_intra) {
2662  /* motion handling */
2663  /* decoding or more than one mb_type (MC was already done otherwise) */
2664  if(!s->encoding){
2665 
2667  if (s->mv_dir & MV_DIR_FORWARD) {
2670  0);
2671  }
2672  if (s->mv_dir & MV_DIR_BACKWARD) {
2675  0);
2676  }
2677  }
2678 
2679  if(lowres_flag){
2681 
2682  if (s->mv_dir & MV_DIR_FORWARD) {
2683  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2685  }
2686  if (s->mv_dir & MV_DIR_BACKWARD) {
2687  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2688  }
2689  }else{
2690  op_qpix= s->me.qpel_put;
2691  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2692  op_pix = s->dsp.put_pixels_tab;
2693  }else{
2694  op_pix = s->dsp.put_no_rnd_pixels_tab;
2695  }
2696  if (s->mv_dir & MV_DIR_FORWARD) {
2697  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2698  op_pix = s->dsp.avg_pixels_tab;
2699  op_qpix= s->me.qpel_avg;
2700  }
2701  if (s->mv_dir & MV_DIR_BACKWARD) {
2702  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2703  }
2704  }
2705  }
2706 
2707  /* skip dequant / idct if we are really late ;) */
2708  if(s->avctx->skip_idct){
2711  || s->avctx->skip_idct >= AVDISCARD_ALL)
2712  goto skip_idct;
2713  }
2714 
2715  /* add dct residue */
2717  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2718  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2719  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2720  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2721  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2722 
2723  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2724  if (s->chroma_y_shift){
2725  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2726  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2727  }else{
2728  dct_linesize >>= 1;
2729  dct_offset >>=1;
2730  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2731  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2732  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2733  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2734  }
2735  }
2736  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2737  add_dct(s, block[0], 0, dest_y , dct_linesize);
2738  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2739  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2740  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2741 
2742  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2743  if(s->chroma_y_shift){//Chroma420
2744  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2745  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2746  }else{
2747  //chroma422
2748  dct_linesize = uvlinesize << s->interlaced_dct;
2749  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2750 
2751  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2752  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2753  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2754  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2755  if(!s->chroma_x_shift){//Chroma444
2756  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2757  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2758  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2759  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2760  }
2761  }
2762  }//fi gray
2763  }
2765  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2766  }
2767  } else {
2768  /* dct only in intra block */
2770  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2771  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2772  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2773  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2774 
2775  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2776  if(s->chroma_y_shift){
2777  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2778  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2779  }else{
2780  dct_offset >>=1;
2781  dct_linesize >>=1;
2782  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2783  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2784  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2785  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2786  }
2787  }
2788  }else{
2789  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2790  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2791  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2792  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2793 
2794  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2795  if(s->chroma_y_shift){
2796  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2797  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2798  }else{
2799 
2800  dct_linesize = uvlinesize << s->interlaced_dct;
2801  dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2802 
2803  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2804  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2805  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2806  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2807  if(!s->chroma_x_shift){//Chroma444
2808  s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2809  s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2810  s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2811  s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2812  }
2813  }
2814  }//gray
2815  }
2816  }
2817 skip_idct:
2818  if(!readable){
2819  s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2820  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2821  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2822  }
2823  }
2824 }
2825 
2826 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2827 #if !CONFIG_SMALL
2828  if(s->out_format == FMT_MPEG1) {
2829  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2830  else MPV_decode_mb_internal(s, block, 0, 1);
2831  } else
2832 #endif
2833  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2834  else MPV_decode_mb_internal(s, block, 0, 0);
2835 }
2836 
2841  Picture *last, int y, int h, int picture_structure,
2842  int first_field, int draw_edges, int low_delay,
2843  int v_edge_pos, int h_edge_pos)
2844 {
2845  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2846  int hshift = desc->log2_chroma_w;
2847  int vshift = desc->log2_chroma_h;
2848  const int field_pic = picture_structure != PICT_FRAME;
2849  if(field_pic){
2850  h <<= 1;
2851  y <<= 1;
2852  }
2853 
2854  if (!avctx->hwaccel &&
2856  draw_edges &&
2857  cur->f.reference &&
2858  !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2859  int *linesize = cur->f.linesize;
2860  int sides = 0, edge_h;
2861  if (y==0) sides |= EDGE_TOP;
2862  if (y + h >= v_edge_pos)
2863  sides |= EDGE_BOTTOM;
2864 
2865  edge_h= FFMIN(h, v_edge_pos - y);
2866 
2867  dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2868  linesize[0], h_edge_pos, edge_h,
2869  EDGE_WIDTH, EDGE_WIDTH, sides);
2870  dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2871  linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2872  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2873  dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2874  linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2875  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2876  }
2877 
2878  h = FFMIN(h, avctx->height - y);
2879 
2880  if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2881 
2882  if (avctx->draw_horiz_band) {
2883  AVFrame *src;
2885  int i;
2886 
2887  if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2889  src = &cur->f;
2890  else if (last)
2891  src = &last->f;
2892  else
2893  return;
2894 
2895  if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2896  picture_structure == PICT_FRAME &&
2897  avctx->codec_id != AV_CODEC_ID_H264 &&
2898  avctx->codec_id != AV_CODEC_ID_SVQ3) {
2899  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2900  offset[i] = 0;
2901  }else{
2902  offset[0]= y * src->linesize[0];
2903  offset[1]=
2904  offset[2]= (y >> vshift) * src->linesize[1];
2905  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2906  offset[i] = 0;
2907  }
2908 
2909  emms_c();
2910 
2911  avctx->draw_horiz_band(avctx, src, offset,
2912  y, picture_structure, h);
2913  }
2914 }
2915 
2917 {
2918  int draw_edges = s->unrestricted_mv && !s->intra_only;
2920  &s->last_picture, y, h, s->picture_structure,
2921  s->first_field, draw_edges, s->low_delay,
2922  s->v_edge_pos, s->h_edge_pos);
2923 }
2924 
2925 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2926  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2927  const int uvlinesize = s->current_picture.f.linesize[1];
2928  const int mb_size= 4 - s->avctx->lowres;
2929 
2930  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2931  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2932  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2933  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2934  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2935  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2936  //block_index is not used by mpeg2, so it is not affected by chroma_format
2937 
2938  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2939  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2940  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2941 
2943  {
2944  if(s->picture_structure==PICT_FRAME){
2945  s->dest[0] += s->mb_y * linesize << mb_size;
2946  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2947  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2948  }else{
2949  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2950  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2951  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2953  }
2954  }
2955 }
2956 
2965 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2966 {
2967  int i;
2968  int16_t temp[64];
2969 
2970  if(last<=0) return;
2971  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2972 
2973  for(i=0; i<=last; i++){
2974  const int j= scantable[i];
2975  temp[j]= block[j];
2976  block[j]=0;
2977  }
2978 
2979  for(i=0; i<=last; i++){
2980  const int j= scantable[i];
2981  const int perm_j= permutation[j];
2982  block[perm_j]= temp[j];
2983  }
2984 }
2985 
2987  int i;
2988  MpegEncContext *s = avctx->priv_data;
2989 
2990  if(s==NULL || s->picture==NULL)
2991  return;
2992 
2993  for(i=0; i<s->picture_count; i++){
2994  if (s->picture[i].f.data[0] &&
2995  (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2996  s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2997  free_frame_buffer(s, &s->picture[i]);
2998  }
3000 
3001  s->mb_x= s->mb_y= 0;
3002  s->closed_gop= 0;
3003 
3004  s->parse_context.state= -1;
3006  s->parse_context.overread= 0;
3008  s->parse_context.index= 0;
3009  s->parse_context.last_index= 0;
3010  s->bitstream_buffer_size=0;
3011  s->pp_time=0;
3012 }
3013 
3015  int16_t *block, int n, int qscale)
3016 {
3017  int i, level, nCoeffs;
3018  const uint16_t *quant_matrix;
3019 
3020  nCoeffs= s->block_last_index[n];
3021 
3022  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3023  /* XXX: only mpeg1 */
3024  quant_matrix = s->intra_matrix;
3025  for(i=1;i<=nCoeffs;i++) {
3026  int j= s->intra_scantable.permutated[i];
3027  level = block[j];
3028  if (level) {
3029  if (level < 0) {
3030  level = -level;
3031  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3032  level = (level - 1) | 1;
3033  level = -level;
3034  } else {
3035  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3036  level = (level - 1) | 1;
3037  }
3038  block[j] = level;
3039  }
3040  }
3041 }
3042 
3044  int16_t *block, int n, int qscale)
3045 {
3046  int i, level, nCoeffs;
3047  const uint16_t *quant_matrix;
3048 
3049  nCoeffs= s->block_last_index[n];
3050 
3051  quant_matrix = s->inter_matrix;
3052  for(i=0; i<=nCoeffs; i++) {
3053  int j= s->intra_scantable.permutated[i];
3054  level = block[j];
3055  if (level) {
3056  if (level < 0) {
3057  level = -level;
3058  level = (((level << 1) + 1) * qscale *
3059  ((int) (quant_matrix[j]))) >> 4;
3060  level = (level - 1) | 1;
3061  level = -level;
3062  } else {
3063  level = (((level << 1) + 1) * qscale *
3064  ((int) (quant_matrix[j]))) >> 4;
3065  level = (level - 1) | 1;
3066  }
3067  block[j] = level;
3068  }
3069  }
3070 }
3071 
3073  int16_t *block, int n, int qscale)
3074 {
3075  int i, level, nCoeffs;
3076  const uint16_t *quant_matrix;
3077 
3078  if(s->alternate_scan) nCoeffs= 63;
3079  else nCoeffs= s->block_last_index[n];
3080 
3081  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3082  quant_matrix = s->intra_matrix;
3083  for(i=1;i<=nCoeffs;i++) {
3084  int j= s->intra_scantable.permutated[i];
3085  level = block[j];
3086  if (level) {
3087  if (level < 0) {
3088  level = -level;
3089  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3090  level = -level;
3091  } else {
3092  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3093  }
3094  block[j] = level;
3095  }
3096  }
3097 }
3098 
3100  int16_t *block, int n, int qscale)
3101 {
3102  int i, level, nCoeffs;
3103  const uint16_t *quant_matrix;
3104  int sum=-1;
3105 
3106  if(s->alternate_scan) nCoeffs= 63;
3107  else nCoeffs= s->block_last_index[n];
3108 
3109  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3110  sum += block[0];
3111  quant_matrix = s->intra_matrix;
3112  for(i=1;i<=nCoeffs;i++) {
3113  int j= s->intra_scantable.permutated[i];
3114  level = block[j];
3115  if (level) {
3116  if (level < 0) {
3117  level = -level;
3118  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3119  level = -level;
3120  } else {
3121  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3122  }
3123  block[j] = level;
3124  sum+=level;
3125  }
3126  }
3127  block[63]^=sum&1;
3128 }
3129 
3131  int16_t *block, int n, int qscale)
3132 {
3133  int i, level, nCoeffs;
3134  const uint16_t *quant_matrix;
3135  int sum=-1;
3136 
3137  if(s->alternate_scan) nCoeffs= 63;
3138  else nCoeffs= s->block_last_index[n];
3139 
3140  quant_matrix = s->inter_matrix;
3141  for(i=0; i<=nCoeffs; i++) {
3142  int j= s->intra_scantable.permutated[i];
3143  level = block[j];
3144  if (level) {
3145  if (level < 0) {
3146  level = -level;
3147  level = (((level << 1) + 1) * qscale *
3148  ((int) (quant_matrix[j]))) >> 4;
3149  level = -level;
3150  } else {
3151  level = (((level << 1) + 1) * qscale *
3152  ((int) (quant_matrix[j]))) >> 4;
3153  }
3154  block[j] = level;
3155  sum+=level;
3156  }
3157  }
3158  block[63]^=sum&1;
3159 }
3160 
3162  int16_t *block, int n, int qscale)
3163 {
3164  int i, level, qmul, qadd;
3165  int nCoeffs;
3166 
3167  assert(s->block_last_index[n]>=0);
3168 
3169  qmul = qscale << 1;
3170 
3171  if (!s->h263_aic) {
3172  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3173  qadd = (qscale - 1) | 1;
3174  }else{
3175  qadd = 0;
3176  }
3177  if(s->ac_pred)
3178  nCoeffs=63;
3179  else
3180  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3181 
3182  for(i=1; i<=nCoeffs; i++) {
3183  level = block[i];
3184  if (level) {
3185  if (level < 0) {
3186  level = level * qmul - qadd;
3187  } else {
3188  level = level * qmul + qadd;
3189  }
3190  block[i] = level;
3191  }
3192  }
3193 }
3194 
3196  int16_t *block, int n, int qscale)
3197 {
3198  int i, level, qmul, qadd;
3199  int nCoeffs;
3200 
3201  assert(s->block_last_index[n]>=0);
3202 
3203  qadd = (qscale - 1) | 1;
3204  qmul = qscale << 1;
3205 
3206  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3207 
3208  for(i=0; i<=nCoeffs; i++) {
3209  level = block[i];
3210  if (level) {
3211  if (level < 0) {
3212  level = level * qmul - qadd;
3213  } else {
3214  level = level * qmul + qadd;
3215  }
3216  block[i] = level;
3217  }
3218  }
3219 }
3220 
3224 void ff_set_qscale(MpegEncContext * s, int qscale)
3225 {
3226  if (qscale < 1)
3227  qscale = 1;
3228  else if (qscale > 31)
3229  qscale = 31;
3230 
3231  s->qscale = qscale;
3232  s->chroma_qscale= s->chroma_qscale_table[qscale];
3233 
3234  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3236 }
3237 
3239 {
3242 }
3243 
3245 {
3246  ERContext *er = &s->er;
3247 
3248  er->cur_pic = s->current_picture_ptr;
3249  er->last_pic = s->last_picture_ptr;
3250  er->next_pic = s->next_picture_ptr;
3251 
3252  er->pp_time = s->pp_time;
3253  er->pb_time = s->pb_time;
3254  er->quarter_sample = s->quarter_sample;
3256 
3257  ff_er_frame_start(er);
3258 }