FFmpeg  2.6.9
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
35 #include "libavutil/timer.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "h264chroma.h"
39 #include "idctdsp.h"
40 #include "internal.h"
41 #include "mathops.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
44 #include "mjpegenc.h"
45 #include "msmpeg4.h"
46 #include "qpeldsp.h"
47 #include "thread.h"
48 #include <limits.h>
49 
51 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
52  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
53  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
54 };
55 
57 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
58  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
59  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
60  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
61  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
62  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
63  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
64  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
65  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
66 };
67 
68 static const uint8_t mpeg2_dc_scale_table1[128] = {
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
70  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
71  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
72  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
73  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
74  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
75  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
76  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
77  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
78 };
79 
80 static const uint8_t mpeg2_dc_scale_table2[128] = {
81 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
82  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
83  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
84  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
85  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
86  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
87  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
88  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
89  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
90 };
91 
92 static const uint8_t mpeg2_dc_scale_table3[128] = {
93 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
94  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
95  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
96  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
97  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
98  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
99  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 };
103 
104 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
109 };
110 
112  0, 1, 2, 3, 8, 9, 16, 17,
113  10, 11, 4, 5, 6, 7, 15, 14,
114  13, 12, 19, 18, 24, 25, 32, 33,
115  26, 27, 20, 21, 22, 23, 28, 29,
116  30, 31, 34, 35, 40, 41, 48, 49,
117  42, 43, 36, 37, 38, 39, 44, 45,
118  46, 47, 50, 51, 56, 57, 58, 59,
119  52, 53, 54, 55, 60, 61, 62, 63,
120 };
121 
123  0, 8, 16, 24, 1, 9, 2, 10,
124  17, 25, 32, 40, 48, 56, 57, 49,
125  41, 33, 26, 18, 3, 11, 4, 12,
126  19, 27, 34, 42, 50, 58, 35, 43,
127  51, 59, 20, 28, 5, 13, 6, 14,
128  21, 29, 36, 44, 52, 60, 37, 45,
129  53, 61, 22, 30, 7, 15, 23, 31,
130  38, 46, 54, 62, 39, 47, 55, 63,
131 };
132 
134  int16_t *block, int n, int qscale)
135 {
136  int i, level, nCoeffs;
137  const uint16_t *quant_matrix;
138 
139  nCoeffs= s->block_last_index[n];
140 
141  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
142  /* XXX: only mpeg1 */
143  quant_matrix = s->intra_matrix;
144  for(i=1;i<=nCoeffs;i++) {
145  int j= s->intra_scantable.permutated[i];
146  level = block[j];
147  if (level) {
148  if (level < 0) {
149  level = -level;
150  level = (int)(level * qscale * quant_matrix[j]) >> 3;
151  level = (level - 1) | 1;
152  level = -level;
153  } else {
154  level = (int)(level * qscale * quant_matrix[j]) >> 3;
155  level = (level - 1) | 1;
156  }
157  block[j] = level;
158  }
159  }
160 }
161 
163  int16_t *block, int n, int qscale)
164 {
165  int i, level, nCoeffs;
166  const uint16_t *quant_matrix;
167 
168  nCoeffs= s->block_last_index[n];
169 
170  quant_matrix = s->inter_matrix;
171  for(i=0; i<=nCoeffs; i++) {
172  int j= s->intra_scantable.permutated[i];
173  level = block[j];
174  if (level) {
175  if (level < 0) {
176  level = -level;
177  level = (((level << 1) + 1) * qscale *
178  ((int) (quant_matrix[j]))) >> 4;
179  level = (level - 1) | 1;
180  level = -level;
181  } else {
182  level = (((level << 1) + 1) * qscale *
183  ((int) (quant_matrix[j]))) >> 4;
184  level = (level - 1) | 1;
185  }
186  block[j] = level;
187  }
188  }
189 }
190 
192  int16_t *block, int n, int qscale)
193 {
194  int i, level, nCoeffs;
195  const uint16_t *quant_matrix;
196 
197  if(s->alternate_scan) nCoeffs= 63;
198  else nCoeffs= s->block_last_index[n];
199 
200  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
201  quant_matrix = s->intra_matrix;
202  for(i=1;i<=nCoeffs;i++) {
203  int j= s->intra_scantable.permutated[i];
204  level = block[j];
205  if (level) {
206  if (level < 0) {
207  level = -level;
208  level = (int)(level * qscale * quant_matrix[j]) >> 3;
209  level = -level;
210  } else {
211  level = (int)(level * qscale * quant_matrix[j]) >> 3;
212  }
213  block[j] = level;
214  }
215  }
216 }
217 
219  int16_t *block, int n, int qscale)
220 {
221  int i, level, nCoeffs;
222  const uint16_t *quant_matrix;
223  int sum=-1;
224 
225  if(s->alternate_scan) nCoeffs= 63;
226  else nCoeffs= s->block_last_index[n];
227 
228  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
229  sum += block[0];
230  quant_matrix = s->intra_matrix;
231  for(i=1;i<=nCoeffs;i++) {
232  int j= s->intra_scantable.permutated[i];
233  level = block[j];
234  if (level) {
235  if (level < 0) {
236  level = -level;
237  level = (int)(level * qscale * quant_matrix[j]) >> 3;
238  level = -level;
239  } else {
240  level = (int)(level * qscale * quant_matrix[j]) >> 3;
241  }
242  block[j] = level;
243  sum+=level;
244  }
245  }
246  block[63]^=sum&1;
247 }
248 
250  int16_t *block, int n, int qscale)
251 {
252  int i, level, nCoeffs;
253  const uint16_t *quant_matrix;
254  int sum=-1;
255 
256  if(s->alternate_scan) nCoeffs= 63;
257  else nCoeffs= s->block_last_index[n];
258 
259  quant_matrix = s->inter_matrix;
260  for(i=0; i<=nCoeffs; i++) {
261  int j= s->intra_scantable.permutated[i];
262  level = block[j];
263  if (level) {
264  if (level < 0) {
265  level = -level;
266  level = (((level << 1) + 1) * qscale *
267  ((int) (quant_matrix[j]))) >> 4;
268  level = -level;
269  } else {
270  level = (((level << 1) + 1) * qscale *
271  ((int) (quant_matrix[j]))) >> 4;
272  }
273  block[j] = level;
274  sum+=level;
275  }
276  }
277  block[63]^=sum&1;
278 }
279 
281  int16_t *block, int n, int qscale)
282 {
283  int i, level, qmul, qadd;
284  int nCoeffs;
285 
286  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
287 
288  qmul = qscale << 1;
289 
290  if (!s->h263_aic) {
291  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
292  qadd = (qscale - 1) | 1;
293  }else{
294  qadd = 0;
295  }
296  if(s->ac_pred)
297  nCoeffs=63;
298  else
299  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
300 
301  for(i=1; i<=nCoeffs; i++) {
302  level = block[i];
303  if (level) {
304  if (level < 0) {
305  level = level * qmul - qadd;
306  } else {
307  level = level * qmul + qadd;
308  }
309  block[i] = level;
310  }
311  }
312 }
313 
315  int16_t *block, int n, int qscale)
316 {
317  int i, level, qmul, qadd;
318  int nCoeffs;
319 
320  av_assert2(s->block_last_index[n]>=0);
321 
322  qadd = (qscale - 1) | 1;
323  qmul = qscale << 1;
324 
325  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
326 
327  for(i=0; i<=nCoeffs; i++) {
328  level = block[i];
329  if (level) {
330  if (level < 0) {
331  level = level * qmul - qadd;
332  } else {
333  level = level * qmul + qadd;
334  }
335  block[i] = level;
336  }
337  }
338 }
339 
340 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
341  int (*mv)[2][4][2],
342  int mb_x, int mb_y, int mb_intra, int mb_skipped)
343 {
344  MpegEncContext *s = opaque;
345 
346  s->mv_dir = mv_dir;
347  s->mv_type = mv_type;
348  s->mb_intra = mb_intra;
349  s->mb_skipped = mb_skipped;
350  s->mb_x = mb_x;
351  s->mb_y = mb_y;
352  memcpy(s->mv, mv, sizeof(*mv));
353 
356 
357  s->bdsp.clear_blocks(s->block[0]);
358 
359  s->dest[0] = s->current_picture.f->data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
360  s->dest[1] = s->current_picture.f->data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
361  s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
362 
363  if (ref)
365  "Interlaced error concealment is not fully implemented\n");
366  ff_mpv_decode_mb(s, s->block);
367 }
368 
369 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
370 {
371  while(h--)
372  memset(dst + h*linesize, 128, 16);
373 }
374 
375 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
376 {
377  while(h--)
378  memset(dst + h*linesize, 128, 8);
379 }
380 
381 /* init common dct for both encoder and decoder */
383 {
384  ff_blockdsp_init(&s->bdsp, s->avctx);
385  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
386  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
389 
390  if (s->avctx->debug & FF_DEBUG_NOMC) {
391  int i;
392  for (i=0; i<4; i++) {
393  s->hdsp.avg_pixels_tab[0][i] = gray16;
394  s->hdsp.put_pixels_tab[0][i] = gray16;
395  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
396 
397  s->hdsp.avg_pixels_tab[1][i] = gray8;
398  s->hdsp.put_pixels_tab[1][i] = gray8;
399  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
400  }
401  }
402 
408  if (s->flags & CODEC_FLAG_BITEXACT)
411 
414 
415  if (ARCH_ALPHA)
417  if (ARCH_ARM)
419  if (ARCH_PPC)
421  if (ARCH_X86)
423 
424  return 0;
425 }
426 
428 {
429  ff_idctdsp_init(&s->idsp, s->avctx);
430 
431  /* load & permutate scantables
432  * note: only wmv uses different ones
433  */
434  if (s->alternate_scan) {
437  } else {
440  }
443 }
444 
445 static int frame_size_alloc(MpegEncContext *s, int linesize)
446 {
447  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
448 
450  return 0;
451 
452  if (linesize < 24) {
453  av_log(s->avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
454  return AVERROR_PATCHWELCOME;
455  }
456 
457  // edge emu needs blocksize + filter length - 1
458  // (= 17x17 for halfpel / 21x21 for h264)
459  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
460  // at uvlinesize. It supports only YUV420 so 24x24 is enough
461  // linesize * interlaced * MBsize
462  // we also use this buffer for encoding in encode_mb_internal() needig an additional 32 lines
463  FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size, 4 * 68,
464  fail);
465 
466  FF_ALLOCZ_ARRAY_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size, 4 * 16 * 2,
467  fail)
468  s->me.temp = s->me.scratchpad;
469  s->rd_scratchpad = s->me.scratchpad;
470  s->b_scratchpad = s->me.scratchpad;
471  s->obmc_scratchpad = s->me.scratchpad + 16;
472 
473  return 0;
474 fail:
476  return AVERROR(ENOMEM);
477 }
478 
479 /**
480  * Allocate a frame buffer
481  */
483 {
484  int edges_needed = av_codec_is_encoder(s->avctx->codec);
485  int r, ret;
486 
487  pic->tf.f = pic->f;
488  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
490  s->codec_id != AV_CODEC_ID_MSS2) {
491  if (edges_needed) {
492  pic->f->width = s->avctx->width + 2 * EDGE_WIDTH;
493  pic->f->height = s->avctx->height + 2 * EDGE_WIDTH;
494  }
495 
496  r = ff_thread_get_buffer(s->avctx, &pic->tf,
497  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
498  } else {
499  pic->f->width = s->avctx->width;
500  pic->f->height = s->avctx->height;
501  pic->f->format = s->avctx->pix_fmt;
502  r = avcodec_default_get_buffer2(s->avctx, pic->f, 0);
503  }
504 
505  if (r < 0 || !pic->f->buf[0]) {
506  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
507  r, pic->f->data[0]);
508  return -1;
509  }
510 
511  if (edges_needed) {
512  int i;
513  for (i = 0; pic->f->data[i]; i++) {
514  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
515  pic->f->linesize[i] +
516  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
517  pic->f->data[i] += offset;
518  }
519  pic->f->width = s->avctx->width;
520  pic->f->height = s->avctx->height;
521  }
522 
523  if (s->avctx->hwaccel) {
524  assert(!pic->hwaccel_picture_private);
527  if (!pic->hwaccel_priv_buf) {
528  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
529  return -1;
530  }
532  }
533  }
534 
535  if (s->linesize && (s->linesize != pic->f->linesize[0] ||
536  s->uvlinesize != pic->f->linesize[1])) {
538  "get_buffer() failed (stride changed)\n");
539  ff_mpeg_unref_picture(s, pic);
540  return -1;
541  }
542 
543  if (pic->f->linesize[1] != pic->f->linesize[2]) {
545  "get_buffer() failed (uv stride mismatch)\n");
546  ff_mpeg_unref_picture(s, pic);
547  return -1;
548  }
549 
550  if (!s->edge_emu_buffer &&
551  (ret = frame_size_alloc(s, pic->f->linesize[0])) < 0) {
553  "get_buffer() failed to allocate context scratch buffers.\n");
554  ff_mpeg_unref_picture(s, pic);
555  return ret;
556  }
557 
558  return 0;
559 }
560 
562 {
563  int i;
564 
565  pic->alloc_mb_width =
566  pic->alloc_mb_height = 0;
567 
574 
575  for (i = 0; i < 2; i++) {
577  av_buffer_unref(&pic->ref_index_buf[i]);
578  }
579 }
580 
582 {
583  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
584  const int mb_array_size = s->mb_stride * s->mb_height;
585  const int b8_array_size = s->b8_stride * s->mb_height * 2;
586  int i;
587 
588 
589  pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
590  pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
591  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
592  sizeof(uint32_t));
593  if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
594  return AVERROR(ENOMEM);
595 
596  if (s->encoding) {
597  pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
598  pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
599  pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
600  if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
601  return AVERROR(ENOMEM);
602  }
603 
604  if (s->out_format == FMT_H263 || s->encoding || s->avctx->debug_mv ||
606  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
607  int ref_index_size = 4 * mb_array_size;
608 
609  for (i = 0; mv_size && i < 2; i++) {
610  pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
611  pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
612  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
613  return AVERROR(ENOMEM);
614  }
615  }
616 
617  pic->alloc_mb_width = s->mb_width;
618  pic->alloc_mb_height = s->mb_height;
619 
620  return 0;
621 }
622 
624 {
625  int ret, i;
626 #define MAKE_WRITABLE(table) \
627 do {\
628  if (pic->table &&\
629  (ret = av_buffer_make_writable(&pic->table)) < 0)\
630  return ret;\
631 } while (0)
632 
633  MAKE_WRITABLE(mb_var_buf);
634  MAKE_WRITABLE(mc_mb_var_buf);
635  MAKE_WRITABLE(mb_mean_buf);
636  MAKE_WRITABLE(mbskip_table_buf);
637  MAKE_WRITABLE(qscale_table_buf);
638  MAKE_WRITABLE(mb_type_buf);
639 
640  for (i = 0; i < 2; i++) {
641  MAKE_WRITABLE(motion_val_buf[i]);
642  MAKE_WRITABLE(ref_index_buf[i]);
643  }
644 
645  return 0;
646 }
647 
648 /**
649  * Allocate a Picture.
650  * The pixels are allocated/set by calling get_buffer() if shared = 0
651  */
652 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
653 {
654  int i, ret;
655 
656  if (pic->qscale_table_buf)
657  if ( pic->alloc_mb_width != s->mb_width
658  || pic->alloc_mb_height != s->mb_height)
660 
661  if (shared) {
662  av_assert0(pic->f->data[0]);
663  pic->shared = 1;
664  } else {
665  av_assert0(!pic->f->buf[0]);
666 
667  if (alloc_frame_buffer(s, pic) < 0)
668  return -1;
669 
670  s->linesize = pic->f->linesize[0];
671  s->uvlinesize = pic->f->linesize[1];
672  }
673 
674  if (!pic->qscale_table_buf)
675  ret = alloc_picture_tables(s, pic);
676  else
677  ret = make_tables_writable(pic);
678  if (ret < 0)
679  goto fail;
680 
681  if (s->encoding) {
682  pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
683  pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
684  pic->mb_mean = pic->mb_mean_buf->data;
685  }
686 
687  pic->mbskip_table = pic->mbskip_table_buf->data;
688  pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
689  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
690 
691  if (pic->motion_val_buf[0]) {
692  for (i = 0; i < 2; i++) {
693  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
694  pic->ref_index[i] = pic->ref_index_buf[i]->data;
695  }
696  }
697 
698  return 0;
699 fail:
700  av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
701  ff_mpeg_unref_picture(s, pic);
703  return AVERROR(ENOMEM);
704 }
705 
706 /**
707  * Deallocate a picture.
708  */
710 {
711  int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
712 
713  pic->tf.f = pic->f;
714  /* WM Image / Screen codecs allocate internal buffers with different
715  * dimensions / colorspaces; ignore user-defined callbacks for these. */
716  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
719  ff_thread_release_buffer(s->avctx, &pic->tf);
720  else if (pic->f)
721  av_frame_unref(pic->f);
722 
724 
725  if (pic->needs_realloc)
727 
728  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
729 }
730 
732 {
733  int i;
734 
735 #define UPDATE_TABLE(table)\
736 do {\
737  if (src->table &&\
738  (!dst->table || dst->table->buffer != src->table->buffer)) {\
739  av_buffer_unref(&dst->table);\
740  dst->table = av_buffer_ref(src->table);\
741  if (!dst->table) {\
742  ff_free_picture_tables(dst);\
743  return AVERROR(ENOMEM);\
744  }\
745  }\
746 } while (0)
747 
748  UPDATE_TABLE(mb_var_buf);
749  UPDATE_TABLE(mc_mb_var_buf);
750  UPDATE_TABLE(mb_mean_buf);
751  UPDATE_TABLE(mbskip_table_buf);
752  UPDATE_TABLE(qscale_table_buf);
753  UPDATE_TABLE(mb_type_buf);
754  for (i = 0; i < 2; i++) {
755  UPDATE_TABLE(motion_val_buf[i]);
756  UPDATE_TABLE(ref_index_buf[i]);
757  }
758 
759  dst->mb_var = src->mb_var;
760  dst->mc_mb_var = src->mc_mb_var;
761  dst->mb_mean = src->mb_mean;
762  dst->mbskip_table = src->mbskip_table;
763  dst->qscale_table = src->qscale_table;
764  dst->mb_type = src->mb_type;
765  for (i = 0; i < 2; i++) {
766  dst->motion_val[i] = src->motion_val[i];
767  dst->ref_index[i] = src->ref_index[i];
768  }
769 
770  dst->alloc_mb_width = src->alloc_mb_width;
771  dst->alloc_mb_height = src->alloc_mb_height;
772 
773  return 0;
774 }
775 
777 {
778  int ret;
779 
780  av_assert0(!dst->f->buf[0]);
781  av_assert0(src->f->buf[0]);
782 
783  src->tf.f = src->f;
784  dst->tf.f = dst->f;
785  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
786  if (ret < 0)
787  goto fail;
788 
789  ret = update_picture_tables(dst, src);
790  if (ret < 0)
791  goto fail;
792 
793  if (src->hwaccel_picture_private) {
795  if (!dst->hwaccel_priv_buf)
796  goto fail;
798  }
799 
800  dst->field_picture = src->field_picture;
801  dst->mb_var_sum = src->mb_var_sum;
802  dst->mc_mb_var_sum = src->mc_mb_var_sum;
803  dst->b_frame_score = src->b_frame_score;
804  dst->needs_realloc = src->needs_realloc;
805  dst->reference = src->reference;
806  dst->shared = src->shared;
807 
808  return 0;
809 fail:
810  ff_mpeg_unref_picture(s, dst);
811  return ret;
812 }
813 
815 {
816  int16_t (*tmp)[64];
817 
818  tmp = s->pblocks[4];
819  s->pblocks[4] = s->pblocks[5];
820  s->pblocks[5] = tmp;
821 }
822 
824 {
825  int y_size = s->b8_stride * (2 * s->mb_height + 1);
826  int c_size = s->mb_stride * (s->mb_height + 1);
827  int yc_size = y_size + 2 * c_size;
828  int i;
829 
830  if (s->mb_height & 1)
831  yc_size += 2*s->b8_stride + 2*s->mb_stride;
832 
833  s->edge_emu_buffer =
834  s->me.scratchpad =
835  s->me.temp =
836  s->rd_scratchpad =
837  s->b_scratchpad =
838  s->obmc_scratchpad = NULL;
839 
840  if (s->encoding) {
841  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
842  ME_MAP_SIZE * sizeof(uint32_t), fail)
844  ME_MAP_SIZE * sizeof(uint32_t), fail)
845  if (s->avctx->noise_reduction) {
847  2 * 64 * sizeof(int), fail)
848  }
849  }
850  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
851  s->block = s->blocks[0];
852 
853  for (i = 0; i < 12; i++) {
854  s->pblocks[i] = &s->block[i];
855  }
856  if (s->avctx->codec_tag == AV_RL32("VCR2"))
857  exchange_uv(s);
858 
859  if (s->out_format == FMT_H263) {
860  /* ac values */
862  yc_size * sizeof(int16_t) * 16, fail);
863  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
864  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
865  s->ac_val[2] = s->ac_val[1] + c_size;
866  }
867 
868  return 0;
869 fail:
870  return -1; // free() through ff_mpv_common_end()
871 }
872 
874 {
875  if (!s)
876  return;
877 
879  av_freep(&s->me.scratchpad);
880  s->me.temp =
881  s->rd_scratchpad =
882  s->b_scratchpad =
883  s->obmc_scratchpad = NULL;
884 
885  av_freep(&s->dct_error_sum);
886  av_freep(&s->me.map);
887  av_freep(&s->me.score_map);
888  av_freep(&s->blocks);
889  av_freep(&s->ac_val_base);
890  s->block = NULL;
891 }
892 
894 {
895 #define COPY(a) bak->a = src->a
896  COPY(edge_emu_buffer);
897  COPY(me.scratchpad);
898  COPY(me.temp);
899  COPY(rd_scratchpad);
900  COPY(b_scratchpad);
901  COPY(obmc_scratchpad);
902  COPY(me.map);
903  COPY(me.score_map);
904  COPY(blocks);
905  COPY(block);
906  COPY(start_mb_y);
907  COPY(end_mb_y);
908  COPY(me.map_generation);
909  COPY(pb);
910  COPY(dct_error_sum);
911  COPY(dct_count[0]);
912  COPY(dct_count[1]);
913  COPY(ac_val_base);
914  COPY(ac_val[0]);
915  COPY(ac_val[1]);
916  COPY(ac_val[2]);
917 #undef COPY
918 }
919 
921 {
922  MpegEncContext bak;
923  int i, ret;
924  // FIXME copy only needed parts
925  // START_TIMER
926  backup_duplicate_context(&bak, dst);
927  memcpy(dst, src, sizeof(MpegEncContext));
928  backup_duplicate_context(dst, &bak);
929  for (i = 0; i < 12; i++) {
930  dst->pblocks[i] = &dst->block[i];
931  }
932  if (dst->avctx->codec_tag == AV_RL32("VCR2"))
933  exchange_uv(dst);
934  if (!dst->edge_emu_buffer &&
935  (ret = frame_size_alloc(dst, dst->linesize)) < 0) {
936  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
937  "scratch buffers.\n");
938  return ret;
939  }
940  // STOP_TIMER("update_duplicate_context")
941  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
942  return 0;
943 }
944 
946  const AVCodecContext *src)
947 {
948  int i, ret;
949  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
950 
951  if (dst == src)
952  return 0;
953 
954  av_assert0(s != s1);
955 
956  // FIXME can parameters change on I-frames?
957  // in that case dst may need a reinit
958  if (!s->context_initialized) {
959  int err;
960  memcpy(s, s1, sizeof(MpegEncContext));
961 
962  s->avctx = dst;
963  s->bitstream_buffer = NULL;
965 
966  if (s1->context_initialized){
967 // s->picture_range_start += MAX_PICTURE_COUNT;
968 // s->picture_range_end += MAX_PICTURE_COUNT;
969  ff_mpv_idct_init(s);
970  if((err = ff_mpv_common_init(s)) < 0){
971  memset(s, 0, sizeof(MpegEncContext));
972  s->avctx = dst;
973  return err;
974  }
975  }
976  }
977 
978  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
979  s->context_reinit = 0;
980  s->height = s1->height;
981  s->width = s1->width;
982  if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
983  return ret;
984  }
985 
986  s->avctx->coded_height = s1->avctx->coded_height;
987  s->avctx->coded_width = s1->avctx->coded_width;
988  s->avctx->width = s1->avctx->width;
989  s->avctx->height = s1->avctx->height;
990 
991  s->coded_picture_number = s1->coded_picture_number;
992  s->picture_number = s1->picture_number;
993 
994  av_assert0(!s->picture || s->picture != s1->picture);
995  if(s->picture)
996  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
997  ff_mpeg_unref_picture(s, &s->picture[i]);
998  if (s1->picture[i].f->buf[0] &&
999  (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
1000  return ret;
1001  }
1002 
1003 #define UPDATE_PICTURE(pic)\
1004 do {\
1005  ff_mpeg_unref_picture(s, &s->pic);\
1006  if (s1->pic.f && s1->pic.f->buf[0])\
1007  ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
1008  else\
1009  ret = update_picture_tables(&s->pic, &s1->pic);\
1010  if (ret < 0)\
1011  return ret;\
1012 } while (0)
1013 
1014  UPDATE_PICTURE(current_picture);
1015  UPDATE_PICTURE(last_picture);
1016  UPDATE_PICTURE(next_picture);
1017 
1018 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1019  ((pic && pic >= old_ctx->picture && \
1020  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
1021  &new_ctx->picture[pic - old_ctx->picture] : NULL)
1022 
1023  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
1024  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
1025  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
1026 
1027  // Error/bug resilience
1028  s->next_p_frame_damaged = s1->next_p_frame_damaged;
1029  s->workaround_bugs = s1->workaround_bugs;
1030  s->padding_bug_score = s1->padding_bug_score;
1031 
1032  // MPEG4 timing info
1033  memcpy(&s->last_time_base, &s1->last_time_base,
1034  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
1035  (char *) &s1->last_time_base);
1036 
1037  // B-frame info
1038  s->max_b_frames = s1->max_b_frames;
1039  s->low_delay = s1->low_delay;
1040  s->droppable = s1->droppable;
1041 
1042  // DivX handling (doesn't work)
1043  s->divx_packed = s1->divx_packed;
1044 
1045  if (s1->bitstream_buffer) {
1046  if (s1->bitstream_buffer_size +
1050  s1->allocated_bitstream_buffer_size);
1051  s->bitstream_buffer_size = s1->bitstream_buffer_size;
1052  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
1053  s1->bitstream_buffer_size);
1054  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
1056  }
1057 
1058  // linesize dependend scratch buffer allocation
1059  if (!s->edge_emu_buffer)
1060  if (s1->linesize) {
1061  if (frame_size_alloc(s, s1->linesize) < 0) {
1062  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
1063  "scratch buffers.\n");
1064  return AVERROR(ENOMEM);
1065  }
1066  } else {
1067  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
1068  "be allocated due to unknown size.\n");
1069  }
1070 
1071  // MPEG2/interlacing info
1072  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
1073  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
1074 
1075  if (!s1->first_field) {
1076  s->last_pict_type = s1->pict_type;
1077  if (s1->current_picture_ptr)
1078  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
1079  }
1080 
1081  return 0;
1082 }
1083 
1084 /**
1085  * Set the given MpegEncContext to common defaults
1086  * (same for encoding and decoding).
1087  * The changed fields will not depend upon the
1088  * prior state of the MpegEncContext.
1089  */
1091 {
1092  s->y_dc_scale_table =
1095  s->progressive_frame = 1;
1096  s->progressive_sequence = 1;
1098 
1099  s->coded_picture_number = 0;
1100  s->picture_number = 0;
1101 
1102  s->f_code = 1;
1103  s->b_code = 1;
1104 
1105  s->slice_context_count = 1;
1106 }
1107 
1108 /**
1109  * Set the given MpegEncContext to defaults for decoding.
1110  * the changed fields will not depend upon
1111  * the prior state of the MpegEncContext.
1112  */
1114 {
1116 }
1117 
1119 {
1120  s->avctx = avctx;
1121  s->width = avctx->coded_width;
1122  s->height = avctx->coded_height;
1123  s->codec_id = avctx->codec->id;
1124  s->workaround_bugs = avctx->workaround_bugs;
1125  s->flags = avctx->flags;
1126  s->flags2 = avctx->flags2;
1127 
1128  /* convert fourcc to upper case */
1129  s->codec_tag = avpriv_toupper4(avctx->codec_tag);
1130 }
1131 
1133 {
1134  ERContext *er = &s->er;
1135  int mb_array_size = s->mb_height * s->mb_stride;
1136  int i;
1137 
1138  er->avctx = s->avctx;
1139 
1140  er->mb_index2xy = s->mb_index2xy;
1141  er->mb_num = s->mb_num;
1142  er->mb_width = s->mb_width;
1143  er->mb_height = s->mb_height;
1144  er->mb_stride = s->mb_stride;
1145  er->b8_stride = s->b8_stride;
1146 
1148  er->error_status_table = av_mallocz(mb_array_size);
1149  if (!er->er_temp_buffer || !er->error_status_table)
1150  goto fail;
1151 
1152  er->mbskip_table = s->mbskip_table;
1153  er->mbintra_table = s->mbintra_table;
1154 
1155  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
1156  er->dc_val[i] = s->dc_val[i];
1157 
1159  er->opaque = s;
1160 
1161  return 0;
1162 fail:
1163  av_freep(&er->er_temp_buffer);
1165  return AVERROR(ENOMEM);
1166 }
1167 
1168 /**
1169  * Initialize and allocates MpegEncContext fields dependent on the resolution.
1170  */
1172 {
1173  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
1174 
1175  s->mb_width = (s->width + 15) / 16;
1176  s->mb_stride = s->mb_width + 1;
1177  s->b8_stride = s->mb_width * 2 + 1;
1178  mb_array_size = s->mb_height * s->mb_stride;
1179  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
1180 
1181  /* set default edge pos, will be overridden
1182  * in decode_header if needed */
1183  s->h_edge_pos = s->mb_width * 16;
1184  s->v_edge_pos = s->mb_height * 16;
1185 
1186  s->mb_num = s->mb_width * s->mb_height;
1187 
1188  s->block_wrap[0] =
1189  s->block_wrap[1] =
1190  s->block_wrap[2] =
1191  s->block_wrap[3] = s->b8_stride;
1192  s->block_wrap[4] =
1193  s->block_wrap[5] = s->mb_stride;
1194 
1195  y_size = s->b8_stride * (2 * s->mb_height + 1);
1196  c_size = s->mb_stride * (s->mb_height + 1);
1197  yc_size = y_size + 2 * c_size;
1198 
1199  if (s->mb_height & 1)
1200  yc_size += 2*s->b8_stride + 2*s->mb_stride;
1201 
1202  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
1203  for (y = 0; y < s->mb_height; y++)
1204  for (x = 0; x < s->mb_width; x++)
1205  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
1206 
1207  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
1208 
1209  if (s->encoding) {
1210  /* Allocate MV tables */
1211  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1212  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1213  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1214  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1215  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1216  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
1217  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
1223 
1224  /* Allocate MB type table */
1225  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
1226 
1227  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
1228 
1230  mb_array_size * sizeof(float), fail);
1232  mb_array_size * sizeof(float), fail);
1233 
1234  }
1235 
1236  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
1238  /* interlaced direct mode decoding tables */
1239  for (i = 0; i < 2; i++) {
1240  int j, k;
1241  for (j = 0; j < 2; j++) {
1242  for (k = 0; k < 2; k++) {
1244  s->b_field_mv_table_base[i][j][k],
1245  mv_table_size * 2 * sizeof(int16_t),
1246  fail);
1247  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
1248  s->mb_stride + 1;
1249  }
1250  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
1251  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
1252  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
1253  }
1254  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
1255  }
1256  }
1257  if (s->out_format == FMT_H263) {
1258  /* cbp values */
1259  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
1260  s->coded_block = s->coded_block_base + s->b8_stride + 1;
1261 
1262  /* cbp, ac_pred, pred_dir */
1263  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
1264  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
1265  }
1266 
1267  if (s->h263_pred || s->h263_plus || !s->encoding) {
1268  /* dc values */
1269  // MN: we need these for error resilience of intra-frames
1270  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
1271  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
1272  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
1273  s->dc_val[2] = s->dc_val[1] + c_size;
1274  for (i = 0; i < yc_size; i++)
1275  s->dc_val_base[i] = 1024;
1276  }
1277 
1278  /* which mb is a intra block */
1279  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
1280  memset(s->mbintra_table, 1, mb_array_size);
1281 
1282  /* init macroblock skip table */
1283  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
1284  // Note the + 1 is for a quicker mpeg4 slice_end detection
1285 
1286  return init_er(s);
1287 fail:
1288  return AVERROR(ENOMEM);
1289 }
1290 
1292 {
1293  int i, j, k;
1294 
1295  memset(&s->next_picture, 0, sizeof(s->next_picture));
1296  memset(&s->last_picture, 0, sizeof(s->last_picture));
1297  memset(&s->current_picture, 0, sizeof(s->current_picture));
1298  memset(&s->new_picture, 0, sizeof(s->new_picture));
1299 
1300  memset(s->thread_context, 0, sizeof(s->thread_context));
1301 
1302  s->me.map = NULL;
1303  s->me.score_map = NULL;
1304  s->dct_error_sum = NULL;
1305  s->block = NULL;
1306  s->blocks = NULL;
1307  memset(s->pblocks, 0, sizeof(s->pblocks));
1308  s->ac_val_base = NULL;
1309  s->ac_val[0] =
1310  s->ac_val[1] =
1311  s->ac_val[2] =NULL;
1312  s->edge_emu_buffer = NULL;
1313  s->me.scratchpad = NULL;
1314  s->me.temp =
1315  s->rd_scratchpad =
1316  s->b_scratchpad =
1317  s->obmc_scratchpad = NULL;
1318 
1319  s->parse_context.buffer = NULL;
1320  s->parse_context.buffer_size = 0;
1321  s->parse_context.overread = 0;
1322  s->bitstream_buffer = NULL;
1324  s->picture = NULL;
1325  s->mb_type = NULL;
1326  s->p_mv_table_base = NULL;
1332  s->p_mv_table = NULL;
1333  s->b_forw_mv_table = NULL;
1334  s->b_back_mv_table = NULL;
1337  s->b_direct_mv_table = NULL;
1338  for (i = 0; i < 2; i++) {
1339  for (j = 0; j < 2; j++) {
1340  for (k = 0; k < 2; k++) {
1341  s->b_field_mv_table_base[i][j][k] = NULL;
1342  s->b_field_mv_table[i][j][k] = NULL;
1343  }
1344  s->b_field_select_table[i][j] = NULL;
1345  s->p_field_mv_table_base[i][j] = NULL;
1346  s->p_field_mv_table[i][j] = NULL;
1347  }
1348  s->p_field_select_table[i] = NULL;
1349  }
1350 
1351  s->dc_val_base = NULL;
1352  s->coded_block_base = NULL;
1353  s->mbintra_table = NULL;
1354  s->cbp_table = NULL;
1355  s->pred_dir_table = NULL;
1356 
1357  s->mbskip_table = NULL;
1358 
1359  s->er.error_status_table = NULL;
1360  s->er.er_temp_buffer = NULL;
1361  s->mb_index2xy = NULL;
1362  s->lambda_table = NULL;
1363 
1364  s->cplx_tab = NULL;
1365  s->bits_tab = NULL;
1366 }
1367 
1368 /**
1369  * init common structure for both encoder and decoder.
1370  * this assumes that some variables like width/height are already set
1371  */
1373 {
1374  int i;
1375  int nb_slices = (HAVE_THREADS &&
1377  s->avctx->thread_count : 1;
1378 
1379  clear_context(s);
1380 
1381  if (s->encoding && s->avctx->slices)
1382  nb_slices = s->avctx->slices;
1383 
1385  s->mb_height = (s->height + 31) / 32 * 2;
1386  else
1387  s->mb_height = (s->height + 15) / 16;
1388 
1389  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1391  "decoding to AV_PIX_FMT_NONE is not supported.\n");
1392  return -1;
1393  }
1394 
1395  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1396  int max_slices;
1397  if (s->mb_height)
1398  max_slices = FFMIN(MAX_THREADS, s->mb_height);
1399  else
1400  max_slices = MAX_THREADS;
1401  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1402  " reducing to %d\n", nb_slices, max_slices);
1403  nb_slices = max_slices;
1404  }
1405 
1406  if ((s->width || s->height) &&
1407  av_image_check_size(s->width, s->height, 0, s->avctx))
1408  return -1;
1409 
1410  dct_init(s);
1411 
1412  s->flags = s->avctx->flags;
1413  s->flags2 = s->avctx->flags2;
1414 
1415  /* set chroma shifts */
1417  &s->chroma_x_shift,
1418  &s->chroma_y_shift);
1419 
1420 
1422  MAX_PICTURE_COUNT * sizeof(Picture), fail);
1423  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1424  s->picture[i].f = av_frame_alloc();
1425  if (!s->picture[i].f)
1426  goto fail;
1427  }
1428  s->next_picture.f = av_frame_alloc();
1429  if (!s->next_picture.f)
1430  goto fail;
1431  s->last_picture.f = av_frame_alloc();
1432  if (!s->last_picture.f)
1433  goto fail;
1435  if (!s->current_picture.f)
1436  goto fail;
1437  s->new_picture.f = av_frame_alloc();
1438  if (!s->new_picture.f)
1439  goto fail;
1440 
1441  if (init_context_frame(s))
1442  goto fail;
1443 
1444  s->parse_context.state = -1;
1445 
1446  s->context_initialized = 1;
1447  s->thread_context[0] = s;
1448 
1449 // if (s->width && s->height) {
1450  if (nb_slices > 1) {
1451  for (i = 1; i < nb_slices; i++) {
1452  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1453  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1454  }
1455 
1456  for (i = 0; i < nb_slices; i++) {
1457  if (init_duplicate_context(s->thread_context[i]) < 0)
1458  goto fail;
1459  s->thread_context[i]->start_mb_y =
1460  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1461  s->thread_context[i]->end_mb_y =
1462  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1463  }
1464  } else {
1465  if (init_duplicate_context(s) < 0)
1466  goto fail;
1467  s->start_mb_y = 0;
1468  s->end_mb_y = s->mb_height;
1469  }
1470  s->slice_context_count = nb_slices;
1471 // }
1472 
1473  return 0;
1474  fail:
1475  ff_mpv_common_end(s);
1476  return -1;
1477 }
1478 
1479 /**
1480  * Frees and resets MpegEncContext fields depending on the resolution.
1481  * Is used during resolution changes to avoid a full reinitialization of the
1482  * codec.
1483  */
1485 {
1486  int i, j, k;
1487 
1488  av_freep(&s->mb_type);
1495  s->p_mv_table = NULL;
1496  s->b_forw_mv_table = NULL;
1497  s->b_back_mv_table = NULL;
1500  s->b_direct_mv_table = NULL;
1501  for (i = 0; i < 2; i++) {
1502  for (j = 0; j < 2; j++) {
1503  for (k = 0; k < 2; k++) {
1504  av_freep(&s->b_field_mv_table_base[i][j][k]);
1505  s->b_field_mv_table[i][j][k] = NULL;
1506  }
1507  av_freep(&s->b_field_select_table[i][j]);
1508  av_freep(&s->p_field_mv_table_base[i][j]);
1509  s->p_field_mv_table[i][j] = NULL;
1510  }
1512  }
1513 
1514  av_freep(&s->dc_val_base);
1516  av_freep(&s->mbintra_table);
1517  av_freep(&s->cbp_table);
1518  av_freep(&s->pred_dir_table);
1519 
1520  av_freep(&s->mbskip_table);
1521 
1523  av_freep(&s->er.er_temp_buffer);
1524  av_freep(&s->mb_index2xy);
1525  av_freep(&s->lambda_table);
1526 
1527  av_freep(&s->cplx_tab);
1528  av_freep(&s->bits_tab);
1529 
1530  s->linesize = s->uvlinesize = 0;
1531 }
1532 
1534 {
1535  int i, err = 0;
1536 
1537  if (!s->context_initialized)
1538  return AVERROR(EINVAL);
1539 
1540  if (s->slice_context_count > 1) {
1541  for (i = 0; i < s->slice_context_count; i++) {
1543  }
1544  for (i = 1; i < s->slice_context_count; i++) {
1545  av_freep(&s->thread_context[i]);
1546  }
1547  } else
1549 
1550  free_context_frame(s);
1551 
1552  if (s->picture)
1553  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1554  s->picture[i].needs_realloc = 1;
1555  }
1556 
1557  s->last_picture_ptr =
1558  s->next_picture_ptr =
1560 
1561  // init
1563  s->mb_height = (s->height + 31) / 32 * 2;
1564  else
1565  s->mb_height = (s->height + 15) / 16;
1566 
1567  if ((s->width || s->height) &&
1568  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1569  goto fail;
1570 
1571  if ((err = init_context_frame(s)))
1572  goto fail;
1573 
1574  s->thread_context[0] = s;
1575 
1576  if (s->width && s->height) {
1577  int nb_slices = s->slice_context_count;
1578  if (nb_slices > 1) {
1579  for (i = 1; i < nb_slices; i++) {
1580  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1581  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1582  }
1583 
1584  for (i = 0; i < nb_slices; i++) {
1585  if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1586  goto fail;
1587  s->thread_context[i]->start_mb_y =
1588  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1589  s->thread_context[i]->end_mb_y =
1590  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1591  }
1592  } else {
1593  err = init_duplicate_context(s);
1594  if (err < 0)
1595  goto fail;
1596  s->start_mb_y = 0;
1597  s->end_mb_y = s->mb_height;
1598  }
1599  s->slice_context_count = nb_slices;
1600  }
1601 
1602  return 0;
1603  fail:
1604  ff_mpv_common_end(s);
1605  return err;
1606 }
1607 
1608 /* init common structure for both encoder and decoder */
1610 {
1611  int i;
1612 
1613  if (s->slice_context_count > 1) {
1614  for (i = 0; i < s->slice_context_count; i++) {
1616  }
1617  for (i = 1; i < s->slice_context_count; i++) {
1618  av_freep(&s->thread_context[i]);
1619  }
1620  s->slice_context_count = 1;
1621  } else free_duplicate_context(s);
1622 
1624  s->parse_context.buffer_size = 0;
1625 
1628 
1629  if (s->picture) {
1630  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1632  ff_mpeg_unref_picture(s, &s->picture[i]);
1633  av_frame_free(&s->picture[i].f);
1634  }
1635  }
1636  av_freep(&s->picture);
1649 
1650  free_context_frame(s);
1651 
1652  s->context_initialized = 0;
1653  s->last_picture_ptr =
1654  s->next_picture_ptr =
1656  s->linesize = s->uvlinesize = 0;
1657 }
1658 
1660  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1661 {
1662  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1663  uint8_t index_run[MAX_RUN + 1];
1664  int last, run, level, start, end, i;
1665 
1666  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1667  if (static_store && rl->max_level[0])
1668  return;
1669 
1670  /* compute max_level[], max_run[] and index_run[] */
1671  for (last = 0; last < 2; last++) {
1672  if (last == 0) {
1673  start = 0;
1674  end = rl->last;
1675  } else {
1676  start = rl->last;
1677  end = rl->n;
1678  }
1679 
1680  memset(max_level, 0, MAX_RUN + 1);
1681  memset(max_run, 0, MAX_LEVEL + 1);
1682  memset(index_run, rl->n, MAX_RUN + 1);
1683  for (i = start; i < end; i++) {
1684  run = rl->table_run[i];
1685  level = rl->table_level[i];
1686  if (index_run[run] == rl->n)
1687  index_run[run] = i;
1688  if (level > max_level[run])
1689  max_level[run] = level;
1690  if (run > max_run[level])
1691  max_run[level] = run;
1692  }
1693  if (static_store)
1694  rl->max_level[last] = static_store[last];
1695  else
1696  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1697  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1698  if (static_store)
1699  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1700  else
1701  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1702  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1703  if (static_store)
1704  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1705  else
1706  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1707  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1708  }
1709 }
1710 
1711 av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
1712 {
1713  int i, q;
1714  VLC_TYPE table[1500][2] = {{0}};
1715  VLC vlc = { .table = table, .table_allocated = static_size };
1716  av_assert0(static_size <= FF_ARRAY_ELEMS(table));
1717  init_vlc(&vlc, 9, rl->n + 1, &rl->table_vlc[0][1], 4, 2, &rl->table_vlc[0][0], 4, 2, INIT_VLC_USE_NEW_STATIC);
1718 
1719  for (q = 0; q < 32; q++) {
1720  int qmul = q * 2;
1721  int qadd = (q - 1) | 1;
1722 
1723  if (q == 0) {
1724  qmul = 1;
1725  qadd = 0;
1726  }
1727  for (i = 0; i < vlc.table_size; i++) {
1728  int code = vlc.table[i][0];
1729  int len = vlc.table[i][1];
1730  int level, run;
1731 
1732  if (len == 0) { // illegal code
1733  run = 66;
1734  level = MAX_LEVEL;
1735  } else if (len < 0) { // more bits needed
1736  run = 0;
1737  level = code;
1738  } else {
1739  if (code == rl->n) { // esc
1740  run = 66;
1741  level = 0;
1742  } else {
1743  run = rl->table_run[code] + 1;
1744  level = rl->table_level[code] * qmul + qadd;
1745  if (code >= rl->last) run += 192;
1746  }
1747  }
1748  rl->rl_vlc[q][i].len = len;
1749  rl->rl_vlc[q][i].level = level;
1750  rl->rl_vlc[q][i].run = run;
1751  }
1752  }
1753 }
1754 
1756 {
1757  int i;
1758 
1759  /* release non reference frames */
1760  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1761  if (!s->picture[i].reference)
1762  ff_mpeg_unref_picture(s, &s->picture[i]);
1763  }
1764 }
1765 
1766 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1767 {
1768  if (pic == s->last_picture_ptr)
1769  return 0;
1770  if (!pic->f->buf[0])
1771  return 1;
1772  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1773  return 1;
1774  return 0;
1775 }
1776 
1777 static int find_unused_picture(MpegEncContext *s, int shared)
1778 {
1779  int i;
1780 
1781  if (shared) {
1782  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1783  if (!s->picture[i].f->buf[0] && &s->picture[i] != s->last_picture_ptr)
1784  return i;
1785  }
1786  } else {
1787  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1788  if (pic_is_unused(s, &s->picture[i]))
1789  return i;
1790  }
1791  }
1792 
1794  "Internal error, picture buffer overflow\n");
1795  /* We could return -1, but the codec would crash trying to draw into a
1796  * non-existing frame anyway. This is safer than waiting for a random crash.
1797  * Also the return of this is never useful, an encoder must only allocate
1798  * as much as allowed in the specification. This has no relationship to how
1799  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1800  * enough for such valid streams).
1801  * Plus, a decoder has to check stream validity and remove frames if too
1802  * many reference frames are around. Waiting for "OOM" is not correct at
1803  * all. Similarly, missing reference frames have to be replaced by
1804  * interpolated/MC frames, anything else is a bug in the codec ...
1805  */
1806  abort();
1807  return -1;
1808 }
1809 
1811 {
1812  int ret = find_unused_picture(s, shared);
1813 
1814  if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1815  if (s->picture[ret].needs_realloc) {
1816  s->picture[ret].needs_realloc = 0;
1817  ff_free_picture_tables(&s->picture[ret]);
1818  ff_mpeg_unref_picture(s, &s->picture[ret]);
1819  }
1820  }
1821  return ret;
1822 }
1823 
1824 static void gray_frame(AVFrame *frame)
1825 {
1826  int i, h_chroma_shift, v_chroma_shift;
1827 
1828  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1829 
1830  for(i=0; i<frame->height; i++)
1831  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1832  for(i=0; i<FF_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1833  memset(frame->data[1] + frame->linesize[1]*i,
1834  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1835  memset(frame->data[2] + frame->linesize[2]*i,
1836  0x80, FF_CEIL_RSHIFT(frame->width, h_chroma_shift));
1837  }
1838 }
1839 
1840 /**
1841  * generic function called after decoding
1842  * the header and before a frame is decoded.
1843  */
1845 {
1846  int i, ret;
1847  Picture *pic;
1848  s->mb_skipped = 0;
1849 
1850  if (!ff_thread_can_start_frame(avctx)) {
1851  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1852  return -1;
1853  }
1854 
1855  /* mark & release old frames */
1856  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1858  s->last_picture_ptr->f->buf[0]) {
1860  }
1861 
1862  /* release forgotten pictures */
1863  /* if (mpeg124/h263) */
1864  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1865  if (&s->picture[i] != s->last_picture_ptr &&
1866  &s->picture[i] != s->next_picture_ptr &&
1867  s->picture[i].reference && !s->picture[i].needs_realloc) {
1868  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1869  av_log(avctx, AV_LOG_ERROR,
1870  "releasing zombie picture\n");
1871  ff_mpeg_unref_picture(s, &s->picture[i]);
1872  }
1873  }
1874 
1876 
1878 
1879  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1880  // we already have a unused image
1881  // (maybe it was set before reading the header)
1882  pic = s->current_picture_ptr;
1883  } else {
1884  i = ff_find_unused_picture(s, 0);
1885  if (i < 0) {
1886  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1887  return i;
1888  }
1889  pic = &s->picture[i];
1890  }
1891 
1892  pic->reference = 0;
1893  if (!s->droppable) {
1894  if (s->pict_type != AV_PICTURE_TYPE_B)
1895  pic->reference = 3;
1896  }
1897 
1899 
1900  if (ff_alloc_picture(s, pic, 0) < 0)
1901  return -1;
1902 
1903  s->current_picture_ptr = pic;
1904  // FIXME use only the vars from current_pic
1906  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1908  if (s->picture_structure != PICT_FRAME)
1911  }
1915 
1917  // if (s->flags && CODEC_FLAG_QSCALE)
1918  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1920 
1921  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1922  s->current_picture_ptr)) < 0)
1923  return ret;
1924 
1925  if (s->pict_type != AV_PICTURE_TYPE_B) {
1927  if (!s->droppable)
1929  }
1930  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1932  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1933  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1935  s->pict_type, s->droppable);
1936 
1937  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1938  (s->pict_type != AV_PICTURE_TYPE_I ||
1939  s->picture_structure != PICT_FRAME)) {
1940  int h_chroma_shift, v_chroma_shift;
1942  &h_chroma_shift, &v_chroma_shift);
1944  av_log(avctx, AV_LOG_DEBUG,
1945  "allocating dummy last picture for B frame\n");
1946  else if (s->pict_type != AV_PICTURE_TYPE_I)
1947  av_log(avctx, AV_LOG_ERROR,
1948  "warning: first frame is no keyframe\n");
1949  else if (s->picture_structure != PICT_FRAME)
1950  av_log(avctx, AV_LOG_DEBUG,
1951  "allocate dummy last picture for field based first keyframe\n");
1952 
1953  /* Allocate a dummy frame */
1954  i = ff_find_unused_picture(s, 0);
1955  if (i < 0) {
1956  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1957  return i;
1958  }
1959  s->last_picture_ptr = &s->picture[i];
1960 
1961  s->last_picture_ptr->reference = 3;
1962  s->last_picture_ptr->f->key_frame = 0;
1964 
1965  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1966  s->last_picture_ptr = NULL;
1967  return -1;
1968  }
1969 
1970  if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
1971  for(i=0; i<avctx->height; i++)
1972  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1973  0x80, avctx->width);
1974  for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1975  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1976  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1977  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1978  0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1979  }
1980 
1982  for(i=0; i<avctx->height; i++)
1983  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1984  }
1985  }
1986 
1987  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1988  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1989  }
1990  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1991  s->pict_type == AV_PICTURE_TYPE_B) {
1992  /* Allocate a dummy frame */
1993  i = ff_find_unused_picture(s, 0);
1994  if (i < 0) {
1995  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1996  return i;
1997  }
1998  s->next_picture_ptr = &s->picture[i];
1999 
2000  s->next_picture_ptr->reference = 3;
2001  s->next_picture_ptr->f->key_frame = 0;
2003 
2004  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
2005  s->next_picture_ptr = NULL;
2006  return -1;
2007  }
2008  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
2009  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
2010  }
2011 
2012 #if 0 // BUFREF-FIXME
2013  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
2014  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
2015 #endif
2016  if (s->last_picture_ptr) {
2018  if (s->last_picture_ptr->f->buf[0] &&
2019  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
2020  s->last_picture_ptr)) < 0)
2021  return ret;
2022  }
2023  if (s->next_picture_ptr) {
2025  if (s->next_picture_ptr->f->buf[0] &&
2026  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
2027  s->next_picture_ptr)) < 0)
2028  return ret;
2029  }
2030 
2032  s->last_picture_ptr->f->buf[0]));
2033 
2034  if (s->picture_structure!= PICT_FRAME) {
2035  int i;
2036  for (i = 0; i < 4; i++) {
2038  s->current_picture.f->data[i] +=
2039  s->current_picture.f->linesize[i];
2040  }
2041  s->current_picture.f->linesize[i] *= 2;
2042  s->last_picture.f->linesize[i] *= 2;
2043  s->next_picture.f->linesize[i] *= 2;
2044  }
2045  }
2046 
2047  s->err_recognition = avctx->err_recognition;
2048 
2049  /* set dequantizer, we can't do it during init as
2050  * it might change for mpeg4 and we can't do it in the header
2051  * decode as init is not called for mpeg4 there yet */
2052  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2055  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
2058  } else {
2061  }
2062 
2063  if (s->avctx->debug & FF_DEBUG_NOMC) {
2065  }
2066 
2067  return 0;
2068 }
2069 
2070 /* called after a frame has been decoded. */
2072 {
2073  emms_c();
2074 
2075  if (s->current_picture.reference)
2077 }
2078 
2079 
2080 #if FF_API_VISMV
2081 static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
2082 {
2083  if(*sx > *ex)
2084  return clip_line(ex, ey, sx, sy, maxx);
2085 
2086  if (*sx < 0) {
2087  if (*ex < 0)
2088  return 1;
2089  *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
2090  *sx = 0;
2091  }
2092 
2093  if (*ex > maxx) {
2094  if (*sx > maxx)
2095  return 1;
2096  *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
2097  *ex = maxx;
2098  }
2099  return 0;
2100 }
2101 
2102 
2103 /**
2104  * Draw a line from (ex, ey) -> (sx, sy).
2105  * @param w width of the image
2106  * @param h height of the image
2107  * @param stride stride/linesize of the image
2108  * @param color color of the arrow
2109  */
2110 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
2111  int w, int h, int stride, int color)
2112 {
2113  int x, y, fr, f;
2114 
2115  if (clip_line(&sx, &sy, &ex, &ey, w - 1))
2116  return;
2117  if (clip_line(&sy, &sx, &ey, &ex, h - 1))
2118  return;
2119 
2120  sx = av_clip(sx, 0, w - 1);
2121  sy = av_clip(sy, 0, h - 1);
2122  ex = av_clip(ex, 0, w - 1);
2123  ey = av_clip(ey, 0, h - 1);
2124 
2125  buf[sy * stride + sx] += color;
2126 
2127  if (FFABS(ex - sx) > FFABS(ey - sy)) {
2128  if (sx > ex) {
2129  FFSWAP(int, sx, ex);
2130  FFSWAP(int, sy, ey);
2131  }
2132  buf += sx + sy * stride;
2133  ex -= sx;
2134  f = ((ey - sy) << 16) / ex;
2135  for (x = 0; x <= ex; x++) {
2136  y = (x * f) >> 16;
2137  fr = (x * f) & 0xFFFF;
2138  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2139  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
2140  }
2141  } else {
2142  if (sy > ey) {
2143  FFSWAP(int, sx, ex);
2144  FFSWAP(int, sy, ey);
2145  }
2146  buf += sx + sy * stride;
2147  ey -= sy;
2148  if (ey)
2149  f = ((ex - sx) << 16) / ey;
2150  else
2151  f = 0;
2152  for(y= 0; y <= ey; y++){
2153  x = (y*f) >> 16;
2154  fr = (y*f) & 0xFFFF;
2155  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
2156  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2157  }
2158  }
2159 }
2160 
2161 /**
2162  * Draw an arrow from (ex, ey) -> (sx, sy).
2163  * @param w width of the image
2164  * @param h height of the image
2165  * @param stride stride/linesize of the image
2166  * @param color color of the arrow
2167  */
2168 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
2169  int ey, int w, int h, int stride, int color, int tail, int direction)
2170 {
2171  int dx,dy;
2172 
2173  if (direction) {
2174  FFSWAP(int, sx, ex);
2175  FFSWAP(int, sy, ey);
2176  }
2177 
2178  sx = av_clip(sx, -100, w + 100);
2179  sy = av_clip(sy, -100, h + 100);
2180  ex = av_clip(ex, -100, w + 100);
2181  ey = av_clip(ey, -100, h + 100);
2182 
2183  dx = ex - sx;
2184  dy = ey - sy;
2185 
2186  if (dx * dx + dy * dy > 3 * 3) {
2187  int rx = dx + dy;
2188  int ry = -dx + dy;
2189  int length = ff_sqrt((rx * rx + ry * ry) << 8);
2190 
2191  // FIXME subpixel accuracy
2192  rx = ROUNDED_DIV(rx * 3 << 4, length);
2193  ry = ROUNDED_DIV(ry * 3 << 4, length);
2194 
2195  if (tail) {
2196  rx = -rx;
2197  ry = -ry;
2198  }
2199 
2200  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2201  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2202  }
2203  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2204 }
2205 #endif
2206 
2207 static int add_mb(AVMotionVector *mb, uint32_t mb_type,
2208  int dst_x, int dst_y,
2209  int src_x, int src_y,
2210  int direction)
2211 {
2212  mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
2213  mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
2214  mb->src_x = src_x;
2215  mb->src_y = src_y;
2216  mb->dst_x = dst_x;
2217  mb->dst_y = dst_y;
2218  mb->source = direction ? 1 : -1;
2219  mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
2220  return 1;
2221 }
2222 
2223 /**
2224  * Print debugging info for the given picture.
2225  */
2226 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
2227  uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2228  int *low_delay,
2229  int mb_width, int mb_height, int mb_stride, int quarter_sample)
2230 {
2231  if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
2232  const int shift = 1 + quarter_sample;
2233  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2234  const int mv_stride = (mb_width << mv_sample_log2) +
2235  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2236  int mb_x, mb_y, mbcount = 0;
2237 
2238  /* size is width * height * 2 * 4 where 2 is for directions and 4 is
2239  * for the maximum number of MB (4 MB in case of IS_8x8) */
2240  AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
2241  if (!mvs)
2242  return;
2243 
2244  for (mb_y = 0; mb_y < mb_height; mb_y++) {
2245  for (mb_x = 0; mb_x < mb_width; mb_x++) {
2246  int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2247  for (direction = 0; direction < 2; direction++) {
2248  if (!USES_LIST(mb_type, direction))
2249  continue;
2250  if (IS_8X8(mb_type)) {
2251  for (i = 0; i < 4; i++) {
2252  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2253  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2254  int xy = (mb_x * 2 + (i & 1) +
2255  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2256  int mx = (motion_val[direction][xy][0] >> shift) + sx;
2257  int my = (motion_val[direction][xy][1] >> shift) + sy;
2258  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2259  }
2260  } else if (IS_16X8(mb_type)) {
2261  for (i = 0; i < 2; i++) {
2262  int sx = mb_x * 16 + 8;
2263  int sy = mb_y * 16 + 4 + 8 * i;
2264  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2265  int mx = (motion_val[direction][xy][0] >> shift);
2266  int my = (motion_val[direction][xy][1] >> shift);
2267 
2268  if (IS_INTERLACED(mb_type))
2269  my *= 2;
2270 
2271  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2272  }
2273  } else if (IS_8X16(mb_type)) {
2274  for (i = 0; i < 2; i++) {
2275  int sx = mb_x * 16 + 4 + 8 * i;
2276  int sy = mb_y * 16 + 8;
2277  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2278  int mx = motion_val[direction][xy][0] >> shift;
2279  int my = motion_val[direction][xy][1] >> shift;
2280 
2281  if (IS_INTERLACED(mb_type))
2282  my *= 2;
2283 
2284  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2285  }
2286  } else {
2287  int sx = mb_x * 16 + 8;
2288  int sy = mb_y * 16 + 8;
2289  int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2290  int mx = (motion_val[direction][xy][0]>>shift) + sx;
2291  int my = (motion_val[direction][xy][1]>>shift) + sy;
2292  mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2293  }
2294  }
2295  }
2296  }
2297 
2298  if (mbcount) {
2299  AVFrameSideData *sd;
2300 
2301  av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
2303  if (!sd) {
2304  av_freep(&mvs);
2305  return;
2306  }
2307  memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
2308  }
2309 
2310  av_freep(&mvs);
2311  }
2312 
2313  /* TODO: export all the following to make them accessible for users (and filters) */
2314  if (avctx->hwaccel || !mbtype_table
2316  return;
2317 
2318 
2319  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
2320  int x,y;
2321 
2322  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
2324  for (y = 0; y < mb_height; y++) {
2325  for (x = 0; x < mb_width; x++) {
2326  if (avctx->debug & FF_DEBUG_SKIP) {
2327  int count = mbskip_table[x + y * mb_stride];
2328  if (count > 9)
2329  count = 9;
2330  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
2331  }
2332  if (avctx->debug & FF_DEBUG_QP) {
2333  av_log(avctx, AV_LOG_DEBUG, "%2d",
2334  qscale_table[x + y * mb_stride]);
2335  }
2336  if (avctx->debug & FF_DEBUG_MB_TYPE) {
2337  int mb_type = mbtype_table[x + y * mb_stride];
2338  // Type & MV direction
2339  if (IS_PCM(mb_type))
2340  av_log(avctx, AV_LOG_DEBUG, "P");
2341  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
2342  av_log(avctx, AV_LOG_DEBUG, "A");
2343  else if (IS_INTRA4x4(mb_type))
2344  av_log(avctx, AV_LOG_DEBUG, "i");
2345  else if (IS_INTRA16x16(mb_type))
2346  av_log(avctx, AV_LOG_DEBUG, "I");
2347  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
2348  av_log(avctx, AV_LOG_DEBUG, "d");
2349  else if (IS_DIRECT(mb_type))
2350  av_log(avctx, AV_LOG_DEBUG, "D");
2351  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
2352  av_log(avctx, AV_LOG_DEBUG, "g");
2353  else if (IS_GMC(mb_type))
2354  av_log(avctx, AV_LOG_DEBUG, "G");
2355  else if (IS_SKIP(mb_type))
2356  av_log(avctx, AV_LOG_DEBUG, "S");
2357  else if (!USES_LIST(mb_type, 1))
2358  av_log(avctx, AV_LOG_DEBUG, ">");
2359  else if (!USES_LIST(mb_type, 0))
2360  av_log(avctx, AV_LOG_DEBUG, "<");
2361  else {
2362  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2363  av_log(avctx, AV_LOG_DEBUG, "X");
2364  }
2365 
2366  // segmentation
2367  if (IS_8X8(mb_type))
2368  av_log(avctx, AV_LOG_DEBUG, "+");
2369  else if (IS_16X8(mb_type))
2370  av_log(avctx, AV_LOG_DEBUG, "-");
2371  else if (IS_8X16(mb_type))
2372  av_log(avctx, AV_LOG_DEBUG, "|");
2373  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
2374  av_log(avctx, AV_LOG_DEBUG, " ");
2375  else
2376  av_log(avctx, AV_LOG_DEBUG, "?");
2377 
2378 
2379  if (IS_INTERLACED(mb_type))
2380  av_log(avctx, AV_LOG_DEBUG, "=");
2381  else
2382  av_log(avctx, AV_LOG_DEBUG, " ");
2383  }
2384  }
2385  av_log(avctx, AV_LOG_DEBUG, "\n");
2386  }
2387  }
2388 
2389  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
2390  (avctx->debug_mv)) {
2391  int mb_y;
2392  int i;
2393  int h_chroma_shift, v_chroma_shift, block_height;
2394 #if FF_API_VISMV
2395  const int shift = 1 + quarter_sample;
2396  uint8_t *ptr;
2397  const int width = avctx->width;
2398  const int height = avctx->height;
2399 #endif
2400  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
2401  const int mv_stride = (mb_width << mv_sample_log2) +
2402  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
2403 
2404  *low_delay = 0; // needed to see the vectors without trashing the buffers
2405 
2406  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
2407 
2408  av_frame_make_writable(pict);
2409 
2410  pict->opaque = NULL;
2411 #if FF_API_VISMV
2412  ptr = pict->data[0];
2413 #endif
2414  block_height = 16 >> v_chroma_shift;
2415 
2416  for (mb_y = 0; mb_y < mb_height; mb_y++) {
2417  int mb_x;
2418  for (mb_x = 0; mb_x < mb_width; mb_x++) {
2419  const int mb_index = mb_x + mb_y * mb_stride;
2420 #if FF_API_VISMV
2421  if ((avctx->debug_mv) && motion_val[0]) {
2422  int type;
2423  for (type = 0; type < 3; type++) {
2424  int direction = 0;
2425  switch (type) {
2426  case 0:
2427  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
2428  (pict->pict_type!= AV_PICTURE_TYPE_P))
2429  continue;
2430  direction = 0;
2431  break;
2432  case 1:
2433  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
2434  (pict->pict_type!= AV_PICTURE_TYPE_B))
2435  continue;
2436  direction = 0;
2437  break;
2438  case 2:
2439  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
2440  (pict->pict_type!= AV_PICTURE_TYPE_B))
2441  continue;
2442  direction = 1;
2443  break;
2444  }
2445  if (!USES_LIST(mbtype_table[mb_index], direction))
2446  continue;
2447 
2448  if (IS_8X8(mbtype_table[mb_index])) {
2449  int i;
2450  for (i = 0; i < 4; i++) {
2451  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2452  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2453  int xy = (mb_x * 2 + (i & 1) +
2454  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2455  int mx = (motion_val[direction][xy][0] >> shift) + sx;
2456  int my = (motion_val[direction][xy][1] >> shift) + sy;
2457  draw_arrow(ptr, sx, sy, mx, my, width,
2458  height, pict->linesize[0], 100, 0, direction);
2459  }
2460  } else if (IS_16X8(mbtype_table[mb_index])) {
2461  int i;
2462  for (i = 0; i < 2; i++) {
2463  int sx = mb_x * 16 + 8;
2464  int sy = mb_y * 16 + 4 + 8 * i;
2465  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2466  int mx = (motion_val[direction][xy][0] >> shift);
2467  int my = (motion_val[direction][xy][1] >> shift);
2468 
2469  if (IS_INTERLACED(mbtype_table[mb_index]))
2470  my *= 2;
2471 
2472  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2473  height, pict->linesize[0], 100, 0, direction);
2474  }
2475  } else if (IS_8X16(mbtype_table[mb_index])) {
2476  int i;
2477  for (i = 0; i < 2; i++) {
2478  int sx = mb_x * 16 + 4 + 8 * i;
2479  int sy = mb_y * 16 + 8;
2480  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2481  int mx = motion_val[direction][xy][0] >> shift;
2482  int my = motion_val[direction][xy][1] >> shift;
2483 
2484  if (IS_INTERLACED(mbtype_table[mb_index]))
2485  my *= 2;
2486 
2487  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2488  height, pict->linesize[0], 100, 0, direction);
2489  }
2490  } else {
2491  int sx= mb_x * 16 + 8;
2492  int sy= mb_y * 16 + 8;
2493  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2494  int mx= (motion_val[direction][xy][0]>>shift) + sx;
2495  int my= (motion_val[direction][xy][1]>>shift) + sy;
2496  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
2497  }
2498  }
2499  }
2500 #endif
2501  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2502  uint64_t c = (qscale_table[mb_index] * 128 / 31) *
2503  0x0101010101010101ULL;
2504  int y;
2505  for (y = 0; y < block_height; y++) {
2506  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2507  (block_height * mb_y + y) *
2508  pict->linesize[1]) = c;
2509  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2510  (block_height * mb_y + y) *
2511  pict->linesize[2]) = c;
2512  }
2513  }
2514  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2515  motion_val[0]) {
2516  int mb_type = mbtype_table[mb_index];
2517  uint64_t u,v;
2518  int y;
2519 #define COLOR(theta, r) \
2520  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2521  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2522 
2523 
2524  u = v = 128;
2525  if (IS_PCM(mb_type)) {
2526  COLOR(120, 48)
2527  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2528  IS_INTRA16x16(mb_type)) {
2529  COLOR(30, 48)
2530  } else if (IS_INTRA4x4(mb_type)) {
2531  COLOR(90, 48)
2532  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2533  // COLOR(120, 48)
2534  } else if (IS_DIRECT(mb_type)) {
2535  COLOR(150, 48)
2536  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2537  COLOR(170, 48)
2538  } else if (IS_GMC(mb_type)) {
2539  COLOR(190, 48)
2540  } else if (IS_SKIP(mb_type)) {
2541  // COLOR(180, 48)
2542  } else if (!USES_LIST(mb_type, 1)) {
2543  COLOR(240, 48)
2544  } else if (!USES_LIST(mb_type, 0)) {
2545  COLOR(0, 48)
2546  } else {
2547  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2548  COLOR(300,48)
2549  }
2550 
2551  u *= 0x0101010101010101ULL;
2552  v *= 0x0101010101010101ULL;
2553  for (y = 0; y < block_height; y++) {
2554  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2555  (block_height * mb_y + y) * pict->linesize[1]) = u;
2556  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2557  (block_height * mb_y + y) * pict->linesize[2]) = v;
2558  }
2559 
2560  // segmentation
2561  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2562  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2563  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2564  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2565  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2566  }
2567  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2568  for (y = 0; y < 16; y++)
2569  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2570  pict->linesize[0]] ^= 0x80;
2571  }
2572  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2573  int dm = 1 << (mv_sample_log2 - 2);
2574  for (i = 0; i < 4; i++) {
2575  int sx = mb_x * 16 + 8 * (i & 1);
2576  int sy = mb_y * 16 + 8 * (i >> 1);
2577  int xy = (mb_x * 2 + (i & 1) +
2578  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2579  // FIXME bidir
2580  int32_t *mv = (int32_t *) &motion_val[0][xy];
2581  if (mv[0] != mv[dm] ||
2582  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2583  for (y = 0; y < 8; y++)
2584  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2585  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2586  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2587  pict->linesize[0]) ^= 0x8080808080808080ULL;
2588  }
2589  }
2590 
2591  if (IS_INTERLACED(mb_type) &&
2592  avctx->codec->id == AV_CODEC_ID_H264) {
2593  // hmm
2594  }
2595  }
2596  mbskip_table[mb_index] = 0;
2597  }
2598  }
2599  }
2600 }
2601 
2603 {
2605  p->qscale_table, p->motion_val, &s->low_delay,
2606  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2607 }
2608 
2610 {
2612  int offset = 2*s->mb_stride + 1;
2613  if(!ref)
2614  return AVERROR(ENOMEM);
2615  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2616  ref->size -= offset;
2617  ref->data += offset;
2618  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2619 }
2620 
2622  uint8_t *dest, uint8_t *src,
2623  int field_based, int field_select,
2624  int src_x, int src_y,
2625  int width, int height, ptrdiff_t stride,
2626  int h_edge_pos, int v_edge_pos,
2627  int w, int h, h264_chroma_mc_func *pix_op,
2628  int motion_x, int motion_y)
2629 {
2630  const int lowres = s->avctx->lowres;
2631  const int op_index = FFMIN(lowres, 3);
2632  const int s_mask = (2 << lowres) - 1;
2633  int emu = 0;
2634  int sx, sy;
2635 
2636  if (s->quarter_sample) {
2637  motion_x /= 2;
2638  motion_y /= 2;
2639  }
2640 
2641  sx = motion_x & s_mask;
2642  sy = motion_y & s_mask;
2643  src_x += motion_x >> lowres + 1;
2644  src_y += motion_y >> lowres + 1;
2645 
2646  src += src_y * stride + src_x;
2647 
2648  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2649  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2651  s->linesize, s->linesize,
2652  w + 1, (h + 1) << field_based,
2653  src_x, src_y << field_based,
2654  h_edge_pos, v_edge_pos);
2655  src = s->edge_emu_buffer;
2656  emu = 1;
2657  }
2658 
2659  sx = (sx << 2) >> lowres;
2660  sy = (sy << 2) >> lowres;
2661  if (field_select)
2662  src += s->linesize;
2663  pix_op[op_index](dest, src, stride, h, sx, sy);
2664  return emu;
2665 }
2666 
2667 /* apply one mpeg motion vector to the three components */
2669  uint8_t *dest_y,
2670  uint8_t *dest_cb,
2671  uint8_t *dest_cr,
2672  int field_based,
2673  int bottom_field,
2674  int field_select,
2675  uint8_t **ref_picture,
2676  h264_chroma_mc_func *pix_op,
2677  int motion_x, int motion_y,
2678  int h, int mb_y)
2679 {
2680  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2681  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2682  ptrdiff_t uvlinesize, linesize;
2683  const int lowres = s->avctx->lowres;
2684  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2685  const int block_s = 8>>lowres;
2686  const int s_mask = (2 << lowres) - 1;
2687  const int h_edge_pos = s->h_edge_pos >> lowres;
2688  const int v_edge_pos = s->v_edge_pos >> lowres;
2689  linesize = s->current_picture.f->linesize[0] << field_based;
2690  uvlinesize = s->current_picture.f->linesize[1] << field_based;
2691 
2692  // FIXME obviously not perfect but qpel will not work in lowres anyway
2693  if (s->quarter_sample) {
2694  motion_x /= 2;
2695  motion_y /= 2;
2696  }
2697 
2698  if(field_based){
2699  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2700  }
2701 
2702  sx = motion_x & s_mask;
2703  sy = motion_y & s_mask;
2704  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2705  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2706 
2707  if (s->out_format == FMT_H263) {
2708  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2709  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2710  uvsrc_x = src_x >> 1;
2711  uvsrc_y = src_y >> 1;
2712  } else if (s->out_format == FMT_H261) {
2713  // even chroma mv's are full pel in H261
2714  mx = motion_x / 4;
2715  my = motion_y / 4;
2716  uvsx = (2 * mx) & s_mask;
2717  uvsy = (2 * my) & s_mask;
2718  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2719  uvsrc_y = mb_y * block_s + (my >> lowres);
2720  } else {
2721  if(s->chroma_y_shift){
2722  mx = motion_x / 2;
2723  my = motion_y / 2;
2724  uvsx = mx & s_mask;
2725  uvsy = my & s_mask;
2726  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2727  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2728  } else {
2729  if(s->chroma_x_shift){
2730  //Chroma422
2731  mx = motion_x / 2;
2732  uvsx = mx & s_mask;
2733  uvsy = motion_y & s_mask;
2734  uvsrc_y = src_y;
2735  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2736  } else {
2737  //Chroma444
2738  uvsx = motion_x & s_mask;
2739  uvsy = motion_y & s_mask;
2740  uvsrc_x = src_x;
2741  uvsrc_y = src_y;
2742  }
2743  }
2744  }
2745 
2746  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2747  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2748  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2749 
2750  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2751  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2752  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2753  linesize >> field_based, linesize >> field_based,
2754  17, 17 + field_based,
2755  src_x, src_y << field_based, h_edge_pos,
2756  v_edge_pos);
2757  ptr_y = s->edge_emu_buffer;
2758  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2759  uint8_t *ubuf = s->edge_emu_buffer + 18 * s->linesize;
2760  uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
2761  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
2762  uvlinesize >> field_based, uvlinesize >> field_based,
2763  9, 9 + field_based,
2764  uvsrc_x, uvsrc_y << field_based,
2765  h_edge_pos >> 1, v_edge_pos >> 1);
2766  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
2767  uvlinesize >> field_based,uvlinesize >> field_based,
2768  9, 9 + field_based,
2769  uvsrc_x, uvsrc_y << field_based,
2770  h_edge_pos >> 1, v_edge_pos >> 1);
2771  ptr_cb = ubuf;
2772  ptr_cr = vbuf;
2773  }
2774  }
2775 
2776  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
2777  if (bottom_field) {
2778  dest_y += s->linesize;
2779  dest_cb += s->uvlinesize;
2780  dest_cr += s->uvlinesize;
2781  }
2782 
2783  if (field_select) {
2784  ptr_y += s->linesize;
2785  ptr_cb += s->uvlinesize;
2786  ptr_cr += s->uvlinesize;
2787  }
2788 
2789  sx = (sx << 2) >> lowres;
2790  sy = (sy << 2) >> lowres;
2791  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2792 
2793  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2794  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2795  uvsx = (uvsx << 2) >> lowres;
2796  uvsy = (uvsy << 2) >> lowres;
2797  if (hc) {
2798  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2799  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2800  }
2801  }
2802  // FIXME h261 lowres loop filter
2803 }
2804 
2806  uint8_t *dest_cb, uint8_t *dest_cr,
2807  uint8_t **ref_picture,
2808  h264_chroma_mc_func * pix_op,
2809  int mx, int my)
2810 {
2811  const int lowres = s->avctx->lowres;
2812  const int op_index = FFMIN(lowres, 3);
2813  const int block_s = 8 >> lowres;
2814  const int s_mask = (2 << lowres) - 1;
2815  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2816  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2817  int emu = 0, src_x, src_y, sx, sy;
2818  ptrdiff_t offset;
2819  uint8_t *ptr;
2820 
2821  if (s->quarter_sample) {
2822  mx /= 2;
2823  my /= 2;
2824  }
2825 
2826  /* In case of 8X8, we construct a single chroma motion vector
2827  with a special rounding */
2828  mx = ff_h263_round_chroma(mx);
2829  my = ff_h263_round_chroma(my);
2830 
2831  sx = mx & s_mask;
2832  sy = my & s_mask;
2833  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2834  src_y = s->mb_y * block_s + (my >> lowres + 1);
2835 
2836  offset = src_y * s->uvlinesize + src_x;
2837  ptr = ref_picture[1] + offset;
2838  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2839  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2841  s->uvlinesize, s->uvlinesize,
2842  9, 9,
2843  src_x, src_y, h_edge_pos, v_edge_pos);
2844  ptr = s->edge_emu_buffer;
2845  emu = 1;
2846  }
2847  sx = (sx << 2) >> lowres;
2848  sy = (sy << 2) >> lowres;
2849  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2850 
2851  ptr = ref_picture[2] + offset;
2852  if (emu) {
2854  s->uvlinesize, s->uvlinesize,
2855  9, 9,
2856  src_x, src_y, h_edge_pos, v_edge_pos);
2857  ptr = s->edge_emu_buffer;
2858  }
2859  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2860 }
2861 
2862 /**
2863  * motion compensation of a single macroblock
2864  * @param s context
2865  * @param dest_y luma destination pointer
2866  * @param dest_cb chroma cb/u destination pointer
2867  * @param dest_cr chroma cr/v destination pointer
2868  * @param dir direction (0->forward, 1->backward)
2869  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2870  * @param pix_op halfpel motion compensation function (average or put normally)
2871  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2872  */
2873 static inline void MPV_motion_lowres(MpegEncContext *s,
2874  uint8_t *dest_y, uint8_t *dest_cb,
2875  uint8_t *dest_cr,
2876  int dir, uint8_t **ref_picture,
2877  h264_chroma_mc_func *pix_op)
2878 {
2879  int mx, my;
2880  int mb_x, mb_y, i;
2881  const int lowres = s->avctx->lowres;
2882  const int block_s = 8 >>lowres;
2883 
2884  mb_x = s->mb_x;
2885  mb_y = s->mb_y;
2886 
2887  switch (s->mv_type) {
2888  case MV_TYPE_16X16:
2889  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2890  0, 0, 0,
2891  ref_picture, pix_op,
2892  s->mv[dir][0][0], s->mv[dir][0][1],
2893  2 * block_s, mb_y);
2894  break;
2895  case MV_TYPE_8X8:
2896  mx = 0;
2897  my = 0;
2898  for (i = 0; i < 4; i++) {
2899  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2900  s->linesize) * block_s,
2901  ref_picture[0], 0, 0,
2902  (2 * mb_x + (i & 1)) * block_s,
2903  (2 * mb_y + (i >> 1)) * block_s,
2904  s->width, s->height, s->linesize,
2905  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2906  block_s, block_s, pix_op,
2907  s->mv[dir][i][0], s->mv[dir][i][1]);
2908 
2909  mx += s->mv[dir][i][0];
2910  my += s->mv[dir][i][1];
2911  }
2912 
2913  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2914  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2915  pix_op, mx, my);
2916  break;
2917  case MV_TYPE_FIELD:
2918  if (s->picture_structure == PICT_FRAME) {
2919  /* top field */
2920  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2921  1, 0, s->field_select[dir][0],
2922  ref_picture, pix_op,
2923  s->mv[dir][0][0], s->mv[dir][0][1],
2924  block_s, mb_y);
2925  /* bottom field */
2926  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2927  1, 1, s->field_select[dir][1],
2928  ref_picture, pix_op,
2929  s->mv[dir][1][0], s->mv[dir][1][1],
2930  block_s, mb_y);
2931  } else {
2932  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2933  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2934  ref_picture = s->current_picture_ptr->f->data;
2935 
2936  }
2937  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2938  0, 0, s->field_select[dir][0],
2939  ref_picture, pix_op,
2940  s->mv[dir][0][0],
2941  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2942  }
2943  break;
2944  case MV_TYPE_16X8:
2945  for (i = 0; i < 2; i++) {
2946  uint8_t **ref2picture;
2947 
2948  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2949  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2950  ref2picture = ref_picture;
2951  } else {
2952  ref2picture = s->current_picture_ptr->f->data;
2953  }
2954 
2955  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2956  0, 0, s->field_select[dir][i],
2957  ref2picture, pix_op,
2958  s->mv[dir][i][0], s->mv[dir][i][1] +
2959  2 * block_s * i, block_s, mb_y >> 1);
2960 
2961  dest_y += 2 * block_s * s->linesize;
2962  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2963  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2964  }
2965  break;
2966  case MV_TYPE_DMV:
2967  if (s->picture_structure == PICT_FRAME) {
2968  for (i = 0; i < 2; i++) {
2969  int j;
2970  for (j = 0; j < 2; j++) {
2971  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2972  1, j, j ^ i,
2973  ref_picture, pix_op,
2974  s->mv[dir][2 * i + j][0],
2975  s->mv[dir][2 * i + j][1],
2976  block_s, mb_y);
2977  }
2979  }
2980  } else {
2981  for (i = 0; i < 2; i++) {
2982  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2983  0, 0, s->picture_structure != i + 1,
2984  ref_picture, pix_op,
2985  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2986  2 * block_s, mb_y >> 1);
2987 
2988  // after put we make avg of the same block
2990 
2991  // opposite parity is always in the same
2992  // frame if this is second field
2993  if (!s->first_field) {
2994  ref_picture = s->current_picture_ptr->f->data;
2995  }
2996  }
2997  }
2998  break;
2999  default:
3000  av_assert2(0);
3001  }
3002 }
3003 
3004 /**
3005  * find the lowest MB row referenced in the MVs
3006  */
3008 {
3009  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
3010  int my, off, i, mvs;
3011 
3012  if (s->picture_structure != PICT_FRAME || s->mcsel)
3013  goto unhandled;
3014 
3015  switch (s->mv_type) {
3016  case MV_TYPE_16X16:
3017  mvs = 1;
3018  break;
3019  case MV_TYPE_16X8:
3020  mvs = 2;
3021  break;
3022  case MV_TYPE_8X8:
3023  mvs = 4;
3024  break;
3025  default:
3026  goto unhandled;
3027  }
3028 
3029  for (i = 0; i < mvs; i++) {
3030  my = s->mv[dir][i][1]<<qpel_shift;
3031  my_max = FFMAX(my_max, my);
3032  my_min = FFMIN(my_min, my);
3033  }
3034 
3035  off = (FFMAX(-my_min, my_max) + 63) >> 6;
3036 
3037  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
3038 unhandled:
3039  return s->mb_height-1;
3040 }
3041 
3042 /* put block[] to dest[] */
3043 static inline void put_dct(MpegEncContext *s,
3044  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
3045 {
3046  s->dct_unquantize_intra(s, block, i, qscale);
3047  s->idsp.idct_put(dest, line_size, block);
3048 }
3049 
3050 /* add block[] to dest[] */
3051 static inline void add_dct(MpegEncContext *s,
3052  int16_t *block, int i, uint8_t *dest, int line_size)
3053 {
3054  if (s->block_last_index[i] >= 0) {
3055  s->idsp.idct_add(dest, line_size, block);
3056  }
3057 }
3058 
3059 static inline void add_dequant_dct(MpegEncContext *s,
3060  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
3061 {
3062  if (s->block_last_index[i] >= 0) {
3063  s->dct_unquantize_inter(s, block, i, qscale);
3064 
3065  s->idsp.idct_add(dest, line_size, block);
3066  }
3067 }
3068 
3069 /**
3070  * Clean dc, ac, coded_block for the current non-intra MB.
3071  */
3073 {
3074  int wrap = s->b8_stride;
3075  int xy = s->block_index[0];
3076 
3077  s->dc_val[0][xy ] =
3078  s->dc_val[0][xy + 1 ] =
3079  s->dc_val[0][xy + wrap] =
3080  s->dc_val[0][xy + 1 + wrap] = 1024;
3081  /* ac pred */
3082  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
3083  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
3084  if (s->msmpeg4_version>=3) {
3085  s->coded_block[xy ] =
3086  s->coded_block[xy + 1 ] =
3087  s->coded_block[xy + wrap] =
3088  s->coded_block[xy + 1 + wrap] = 0;
3089  }
3090  /* chroma */
3091  wrap = s->mb_stride;
3092  xy = s->mb_x + s->mb_y * wrap;
3093  s->dc_val[1][xy] =
3094  s->dc_val[2][xy] = 1024;
3095  /* ac pred */
3096  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
3097  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
3098 
3099  s->mbintra_table[xy]= 0;
3100 }
3101 
3102 /* generic function called after a macroblock has been parsed by the
3103  decoder or after it has been encoded by the encoder.
3104 
3105  Important variables used:
3106  s->mb_intra : true if intra macroblock
3107  s->mv_dir : motion vector direction
3108  s->mv_type : motion vector type
3109  s->mv : motion vector
3110  s->interlaced_dct : true if interlaced dct used (mpeg2)
3111  */
3112 static av_always_inline
3114  int lowres_flag, int is_mpeg12)
3115 {
3116  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
3117 
3118  if (CONFIG_XVMC &&
3119  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
3120  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
3121  return;
3122  }
3123 
3124  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
3125  /* print DCT coefficients */
3126  int i,j;
3127  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
3128  for(i=0; i<6; i++){
3129  for(j=0; j<64; j++){
3130  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
3131  block[i][s->idsp.idct_permutation[j]]);
3132  }
3133  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3134  }
3135  }
3136 
3137  s->current_picture.qscale_table[mb_xy] = s->qscale;
3138 
3139  /* update DC predictors for P macroblocks */
3140  if (!s->mb_intra) {
3141  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
3142  if(s->mbintra_table[mb_xy])
3144  } else {
3145  s->last_dc[0] =
3146  s->last_dc[1] =
3147  s->last_dc[2] = 128 << s->intra_dc_precision;
3148  }
3149  }
3150  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
3151  s->mbintra_table[mb_xy]=1;
3152 
3153  if ( (s->flags&CODEC_FLAG_PSNR)
3155  || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
3156  uint8_t *dest_y, *dest_cb, *dest_cr;
3157  int dct_linesize, dct_offset;
3158  op_pixels_func (*op_pix)[4];
3159  qpel_mc_func (*op_qpix)[16];
3160  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3161  const int uvlinesize = s->current_picture.f->linesize[1];
3162  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
3163  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
3164 
3165  /* avoid copy if macroblock skipped in last frame too */
3166  /* skip only during decoding as we might trash the buffers during encoding a bit */
3167  if(!s->encoding){
3168  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
3169 
3170  if (s->mb_skipped) {
3171  s->mb_skipped= 0;
3173  *mbskip_ptr = 1;
3174  } else if(!s->current_picture.reference) {
3175  *mbskip_ptr = 1;
3176  } else{
3177  *mbskip_ptr = 0; /* not skipped */
3178  }
3179  }
3180 
3181  dct_linesize = linesize << s->interlaced_dct;
3182  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
3183 
3184  if(readable){
3185  dest_y= s->dest[0];
3186  dest_cb= s->dest[1];
3187  dest_cr= s->dest[2];
3188  }else{
3189  dest_y = s->b_scratchpad;
3190  dest_cb= s->b_scratchpad+16*linesize;
3191  dest_cr= s->b_scratchpad+32*linesize;
3192  }
3193 
3194  if (!s->mb_intra) {
3195  /* motion handling */
3196  /* decoding or more than one mb_type (MC was already done otherwise) */
3197  if(!s->encoding){
3198 
3200  if (s->mv_dir & MV_DIR_FORWARD) {
3203  0);
3204  }
3205  if (s->mv_dir & MV_DIR_BACKWARD) {
3208  0);
3209  }
3210  }
3211 
3212  if(lowres_flag){
3214 
3215  if (s->mv_dir & MV_DIR_FORWARD) {
3216  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
3218  }
3219  if (s->mv_dir & MV_DIR_BACKWARD) {
3220  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
3221  }
3222  }else{
3223  op_qpix = s->me.qpel_put;
3224  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
3225  op_pix = s->hdsp.put_pixels_tab;
3226  }else{
3227  op_pix = s->hdsp.put_no_rnd_pixels_tab;
3228  }
3229  if (s->mv_dir & MV_DIR_FORWARD) {
3230  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
3231  op_pix = s->hdsp.avg_pixels_tab;
3232  op_qpix= s->me.qpel_avg;
3233  }
3234  if (s->mv_dir & MV_DIR_BACKWARD) {
3235  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
3236  }
3237  }
3238  }
3239 
3240  /* skip dequant / idct if we are really late ;) */
3241  if(s->avctx->skip_idct){
3244  || s->avctx->skip_idct >= AVDISCARD_ALL)
3245  goto skip_idct;
3246  }
3247 
3248  /* add dct residue */
3250  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
3251  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3252  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3253  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3254  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3255 
3256  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3257  if (s->chroma_y_shift){
3258  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3259  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3260  }else{
3261  dct_linesize >>= 1;
3262  dct_offset >>=1;
3263  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3264  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3265  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3266  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3267  }
3268  }
3269  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
3270  add_dct(s, block[0], 0, dest_y , dct_linesize);
3271  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3272  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3273  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3274 
3275  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3276  if(s->chroma_y_shift){//Chroma420
3277  add_dct(s, block[4], 4, dest_cb, uvlinesize);
3278  add_dct(s, block[5], 5, dest_cr, uvlinesize);
3279  }else{
3280  //chroma422
3281  dct_linesize = uvlinesize << s->interlaced_dct;
3282  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3283 
3284  add_dct(s, block[4], 4, dest_cb, dct_linesize);
3285  add_dct(s, block[5], 5, dest_cr, dct_linesize);
3286  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3287  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3288  if(!s->chroma_x_shift){//Chroma444
3289  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3290  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3291  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3292  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3293  }
3294  }
3295  }//fi gray
3296  }
3298  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
3299  }
3300  } else {
3301  /* dct only in intra block */
3303  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
3304  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
3305  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
3306  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
3307 
3308  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3309  if(s->chroma_y_shift){
3310  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
3311  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
3312  }else{
3313  dct_offset >>=1;
3314  dct_linesize >>=1;
3315  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
3316  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
3317  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
3318  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
3319  }
3320  }
3321  }else{
3322  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
3323  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
3324  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3325  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3326 
3327  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
3328  if(s->chroma_y_shift){
3329  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
3330  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
3331  }else{
3332 
3333  dct_linesize = uvlinesize << s->interlaced_dct;
3334  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
3335 
3336  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
3337  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
3338  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3339  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3340  if(!s->chroma_x_shift){//Chroma444
3341  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
3342  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
3343  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3344  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3345  }
3346  }
3347  }//gray
3348  }
3349  }
3350 skip_idct:
3351  if(!readable){
3352  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
3353  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
3354  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
3355  }
3356  }
3357 }
3358 
3359 void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
3360 {
3361 #if !CONFIG_SMALL
3362  if(s->out_format == FMT_MPEG1) {
3363  if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 1);
3364  else mpv_decode_mb_internal(s, block, 0, 1);
3365  } else
3366 #endif
3367  if(s->avctx->lowres) mpv_decode_mb_internal(s, block, 1, 0);
3368  else mpv_decode_mb_internal(s, block, 0, 0);
3369 }
3370 
3372 {
3375  s->first_field, s->low_delay);
3376 }
3377 
3378 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
3379  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
3380  const int uvlinesize = s->current_picture.f->linesize[1];
3381  const int mb_size= 4 - s->avctx->lowres;
3382 
3383  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3384  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3385  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3386  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3387  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3388  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3389  //block_index is not used by mpeg2, so it is not affected by chroma_format
3390 
3391  s->dest[0] = s->current_picture.f->data[0] + ((s->mb_x - 1) << mb_size);
3392  s->dest[1] = s->current_picture.f->data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3393  s->dest[2] = s->current_picture.f->data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3394 
3396  {
3397  if(s->picture_structure==PICT_FRAME){
3398  s->dest[0] += s->mb_y * linesize << mb_size;
3399  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3400  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3401  }else{
3402  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3403  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3404  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3406  }
3407  }
3408 }
3409 
3410 /**
3411  * Permute an 8x8 block.
3412  * @param block the block which will be permuted according to the given permutation vector
3413  * @param permutation the permutation vector
3414  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3415  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3416  * (inverse) permutated to scantable order!
3417  */
3418 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3419 {
3420  int i;
3421  int16_t temp[64];
3422 
3423  if(last<=0) return;
3424  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3425 
3426  for(i=0; i<=last; i++){
3427  const int j= scantable[i];
3428  temp[j]= block[j];
3429  block[j]=0;
3430  }
3431 
3432  for(i=0; i<=last; i++){
3433  const int j= scantable[i];
3434  const int perm_j= permutation[j];
3435  block[perm_j]= temp[j];
3436  }
3437 }
3438 
3440  int i;
3441  MpegEncContext *s = avctx->priv_data;
3442 
3443  if (!s || !s->picture)
3444  return;
3445 
3446  for (i = 0; i < MAX_PICTURE_COUNT; i++)
3447  ff_mpeg_unref_picture(s, &s->picture[i]);
3449 
3453 
3454  s->mb_x= s->mb_y= 0;
3455  s->closed_gop= 0;
3456 
3457  s->parse_context.state= -1;
3459  s->parse_context.overread= 0;
3461  s->parse_context.index= 0;
3462  s->parse_context.last_index= 0;
3463  s->bitstream_buffer_size=0;
3464  s->pp_time=0;
3465 }
3466 
3467 /**
3468  * set qscale and update qscale dependent variables.
3469  */
3470 void ff_set_qscale(MpegEncContext * s, int qscale)
3471 {
3472  if (qscale < 1)
3473  qscale = 1;
3474  else if (qscale > 31)
3475  qscale = 31;
3476 
3477  s->qscale = qscale;
3478  s->chroma_qscale= s->chroma_qscale_table[qscale];
3479 
3480  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3482 }
3483 
3485 {
3488 }
int last_time_base
Definition: mpegvideo.h:520
int bitstream_buffer_size
Definition: mpegvideo.h:548
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free
Definition: mpegvideo.h:162
int last
number of values for last = 0
Definition: rl.h:40
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2575
IDCTDSPContext idsp
Definition: mpegvideo.h:367
int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
Definition: mpegvideo.c:776
#define NULL
Definition: coverity.c:32
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:823
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
Definition: avcodec.h:1248
int16_t(* b_bidir_back_mv_table_base)[2]
Definition: mpegvideo.h:381
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:43
int table_size
Definition: get_bits.h:66
discard all frames except keyframes
Definition: avcodec.h:666
int8_t * ref_index[2]
Definition: mpegvideo.h:120
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:3378
float v
int picture_number
Definition: mpegvideo.h:262
const char * s
Definition: avisynth_c.h:669
#define MAX_PICTURE_COUNT
Definition: mpegvideo.h:72
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:126
ScanTable intra_v_scantable
Definition: mpegvideo.h:225
AVBufferRef * mb_var_buf
Definition: mpegvideo.h:122
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
static int shift(int a, int b)
Definition: sonic.c:82
#define CONFIG_WMV2_ENCODER
Definition: config.h:1284
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:649
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
This structure describes decoded (raw) audio or video data.
Definition: frame.h:163
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
Definition: internal.h:156
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Allocate a Picture.
Definition: mpegvideo.c:652
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
Definition: mpegvideo.h:385
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegvideo.h:337
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2585
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:288
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:404
#define MAKE_WRITABLE(table)
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:323
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegvideo.h:132
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1422
#define ARCH_PPC
Definition: config.h:29
static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
Definition: mpegvideo.c:2081
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define IS_GMC(a)
Definition: mpegutils.h:81
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:181
#define CONFIG_XVMC
Definition: config.h:465
AVFrame * f
Definition: thread.h:36
int16_t src_x
Absolute source position.
Definition: motion_vector.h:38
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:2805
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideo.c:111
uint8_t * coded_block_base
Definition: mpegvideo.h:326
else temp
Definition: vf_mcdeint.c:257
static int update_picture_tables(Picture *dst, Picture *src)
Definition: mpegvideo.c:731
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:433
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:289
uint16_t * mb_var
Table for MB variances.
Definition: mpegvideo.h:123
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
Definition: mpegvideo.c:3418
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:329
MJPEG encoder.
av_cold void ff_init_vlc_rl(RLTable *rl, unsigned static_size)
Definition: mpegvideo.c:1711
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:267
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:27
void * opaque
for some private data of the user
Definition: frame.h:338
#define me
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:375
static void gray_frame(AVFrame *frame)
Definition: mpegvideo.c:1824
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:568
static const uint8_t mpeg2_dc_scale_table3[128]
Definition: mpegvideo.c:92
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
Definition: mpegvideo.h:146
#define HAVE_INTRINSICS_NEON
Definition: config.h:210
uint8_t * bitstream_buffer
Definition: mpegvideo.h:547
enum AVCodecID codec_id
Definition: mpegvideo.h:244
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:74
void(* clear_blocks)(int16_t *blocks)
Definition: blockdsp.h:36
int field_picture
whether or not the picture was encoded in separate fields
Definition: mpegvideo.h:140
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:49
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1442
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
Definition: mpegvideo.h:391
int16_t(* p_mv_table_base)[2]
Definition: mpegvideo.h:377
static int make_tables_writable(Picture *pic)
Definition: mpegvideo.c:623
uint8_t raster_end[64]
Definition: idctdsp.h:32
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
#define VLC_TYPE
Definition: get_bits.h:61
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
uint32_t * score_map
map to store the scores
Definition: mpegvideo.h:168
mpegvideo header.
#define FF_ARRAY_ELEMS(a)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
#define FF_DEBUG_VIS_MV_B_BACK
Definition: avcodec.h:2602
discard all
Definition: avcodec.h:667
uint8_t permutated[64]
Definition: idctdsp.h:31
const int8_t * table_level
Definition: rl.h:43
uint8_t run
Definition: svq3.c:149
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:873
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2725
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2586
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
Definition: mpegvideo.h:543
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:268
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
int frame_start_found
Definition: parser.h:34
int qscale
QP.
Definition: mpegvideo.h:341
RLTable.
Definition: rl.h:38
int h263_aic
Advanded INTRA Coding (AIC)
Definition: mpegvideo.h:219
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
Definition: mpegvideo.h:387
int chroma_x_shift
Definition: mpegvideo.h:606
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:246
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:643
int field_select[2][2]
Definition: mpegvideo.h:412
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:647
int block_wrap[6]
Definition: mpegvideo.h:429
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:162
Macro definitions for various function/variable attributes.
#define FFALIGN(x, a)
Definition: common.h:86
int16_t(* b_back_mv_table_base)[2]
Definition: mpegvideo.h:379
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:893
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:3072
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:645
#define COLOR(theta, r)
#define FF_DEBUG_QP
Definition: avcodec.h:2568
int b_frame_score
Definition: mpegvideo.h:145
int av_codec_is_encoder(const AVCodec *codec)
Definition: utils.c:187
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegvideo.h:128
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:744
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2642
#define USES_LIST(a, list)
Definition: mpegutils.h:95
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:3371
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int8_t * max_run[2]
encoding & decoding
Definition: rl.h:46
int context_reinit
Definition: mpegvideo.h:677
int16_t * dc_val_base
Definition: mpegvideo.h:321
if()
Definition: avfilter.c:975
uint8_t
#define av_cold
Definition: attributes.h:74
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
enum OutputFormat out_format
output format
Definition: mpegvideo.h:236
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:86
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1533
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Definition: mpegvideo.h:335
#define FF_DEBUG_NOMC
Definition: avcodec.h:2590
Multithreading support functions.
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Definition: avcodec.h:832
qpel_mc_func(* qpel_put)[16]
Definition: mpegvideo.h:200
int16_t dst_x
Absolute destination position.
Definition: motion_vector.h:42
#define emms_c()
Definition: internal.h:50
static void release_unused_pictures(MpegEncContext *s)
Definition: mpegvideo.c:1755
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Definition: mpegvideo.h:419
int interlaced_dct
Definition: mpegvideo.h:611
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:3359
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:315
int intra_dc_precision
Definition: mpegvideo.h:592
static AVFrame * frame
static int pic_is_unused(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:1766
quarterpel DSP functions
void ff_mpv_common_init_ppc(MpegEncContext *s)
static int alloc_picture_tables(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:581
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:388
float * cplx_tab
Definition: mpegvideo.h:673
int32_t source
Where the current macroblock comes from; negative value when it comes from the past, positive value when it comes from the future.
Definition: motion_vector.h:30
int8_t * max_level[2]
encoding & decoding
Definition: rl.h:45
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:3660
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:84
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
Definition: avcodec.h:3383
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:524
AVBufferRef * mb_type_buf
Definition: mpegvideo.h:113
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
Definition: mpegvideo.h:339
static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: mpegvideo.c:340
int flags2
AVCodecContext.flags2.
Definition: mpegvideo.h:248
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:359
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:757
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:427
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:264
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2735
static av_always_inline void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:3113
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
Definition: mpegvideo.h:254
high precision timer, useful to profile code
int16_t(*[2][2] p_field_mv_table_base)[2]
Definition: mpegvideo.h:383
#define MAX_LEVEL
Definition: rl.h:35
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:821
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:3470
#define ROUNDED_DIV(a, b)
Definition: common.h:55
AVBufferRef * mb_mean_buf
Definition: mpegvideo.h:131
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:369
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:234
ThreadFrame tf
Definition: mpegvideo.h:105
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:322
enum AVCodecID id
Definition: avcodec.h:3187
int h263_plus
h263 plus headers
Definition: mpegvideo.h:241
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:291
unsigned int buffer_size
Definition: parser.h:32
int width
width and height of the video frame
Definition: frame.h:212
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:175
uint8_t * mbskip_table
Definition: mpegvideo.h:117
int last_dc[3]
last DC values for MPEG1
Definition: mpegvideo.h:320
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:3051
static int ff_h263_round_chroma(int x)
Definition: mpegvideo.h:844
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:761
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:330
#define ARCH_X86
Definition: config.h:38
int chroma_y_shift
Definition: mpegvideo.h:607
static int find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1777
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:537
#define AVERROR(e)
Definition: error.h:43
int frame_skip_threshold
frame skip threshold
Definition: avcodec.h:2406
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
#define CODEC_FLAG2_EXPORT_MVS
Export motion vectors through frame side data.
Definition: avcodec.h:771
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2057
static const struct endianess table[]
ERContext er
Definition: mpegvideo.h:679
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2770
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:356
uint8_t w
Width and height of the block.
Definition: motion_vector.h:34
int reference
Definition: mpegvideo.h:148
const char * r
Definition: vf_curves.c:107
#define FF_DEBUG_VIS_MV_B_FOR
Definition: avcodec.h:2601
int capabilities
Codec capabilities.
Definition: avcodec.h:3192
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:196
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegvideo.h:336
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:218
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:641
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:635
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1333
#define wrap(func)
Definition: neontest.h:62
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:3043
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
Definition: parser.h:36
#define PICT_TOP_FIELD
Definition: mpegutils.h:33
GLsizei GLsizei * length
Definition: opengl_enc.c:115
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:133
#define IS_SKIP(a)
Definition: mpegutils.h:77
#define FF_DEBUG_SKIP
Definition: avcodec.h:2576
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:533
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:426
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw a line from (ex, ey) -> (sx, sy).
Definition: mpegvideo.c:2110
int low_delay
no reordering needed / has no b-frames
Definition: mpegvideo.h:538
void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
Deallocate a picture.
Definition: mpegvideo.c:709
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:394
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1609
#define FFMAX(a, b)
Definition: common.h:79
Libavcodec external API header.
int8_t len
Definition: get_bits.h:71
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:447
uint8_t * mbintra_table
int * mb_index2xy
Definition: get_bits.h:63
static const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideo.c:50
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:3439
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:261
int * lambda_table
Definition: mpegvideo.h:345
uint8_t * error_status_table
AVBufferRef * hwaccel_priv_buf
Definition: mpegvideo.h:134
common internal API header
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:628
#define MAX_THREADS
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color, int tail, int direction)
Draw an arrow from (ex, ey) -> (sx, sy).
Definition: mpegvideo.c:2168
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideo.c:122
int n
number of entries of table_vlc minus 1
Definition: rl.h:39
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:241
int err_recognition
Definition: mpegvideo.h:495
AVBufferRef * motion_val_buf[2]
Definition: mpegvideo.h:110
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1476
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:2609
int progressive_frame
Definition: mpegvideo.h:609
#define IS_16X8(a)
Definition: mpegutils.h:83
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:234
#define UPDATE_PICTURE(pic)
int top_field_first
Definition: mpegvideo.h:594
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2610
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
uint8_t * er_temp_buffer
int overread
the number of bytes which where irreversibly read from the next frame
Definition: parser.h:35
#define FFMIN(a, b)
Definition: common.h:81
int last_index
Definition: parser.h:31
float y
#define IS_DIRECT(a)
Definition: mpegutils.h:80
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
Definition: mpegvideo.h:494
#define ARCH_ARM
Definition: config.h:19
const uint16_t(* table_vlc)[2]
Definition: rl.h:41
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:249
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:309
ret
Definition: avfilter.c:974
int width
picture width / height.
Definition: avcodec.h:1412
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
Definition: mpegvideo.h:331
int16_t(*[2] motion_val)[2]
Definition: mpegvideo.h:111
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:319
Picture.
Definition: mpegvideo.h:103
#define FF_CEIL_RSHIFT(a, b)
Definition: common.h:57
int alternate_scan
Definition: mpegvideo.h:598
unsigned int allocated_bitstream_buffer_size
Definition: mpegvideo.h:549
void * hwaccel_picture_private
hardware accelerator private data
Definition: mpegvideo.h:138
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
Definition: mpegvideo.h:328
int32_t
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
const int8_t * table_run
Definition: rl.h:42
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
int16_t(*[2][2][2] b_field_mv_table_base)[2]
Definition: mpegvideo.h:384
int16_t(* b_forw_mv_table_base)[2]
Definition: mpegvideo.h:378
#define AV_RL32
Definition: intreadwrite.h:146
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:625
#define CONFIG_GRAY
Definition: config.h:453
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:218
MotionEstContext me
Definition: mpegvideo.h:417
float u
int n
Definition: avisynth_c.h:589
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:94
int mb_decision
macroblock decision mode
Definition: avcodec.h:1775
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:77
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:333
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: imgconvert.c:43
#define ME_MAP_SIZE
Definition: mpegvideo.h:76
#define FF_DEBUG_MB_TYPE
Definition: avcodec.h:2567
#define INIT_VLC_USE_NEW_STATIC
Definition: get_bits.h:474
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:945
RL_VLC_ELEM * rl_vlc[32]
decoding only
Definition: rl.h:47
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:1113
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2751
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:428
#define IS_INTRA16x16(a)
Definition: mpegutils.h:72
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:432
int first_field
is 1 for the first field of a field picture 0 otherwise
Definition: mpegvideo.h:612
int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:3007
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static const int8_t mv[256][2]
Definition: 4xm.c:77
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:224
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:70
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:401
int frame_skip_factor
frame skip factor
Definition: avcodec.h:2413
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:1291
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:81
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegvideo.h:126
AVBufferRef * qscale_table_buf
Definition: mpegvideo.h:107
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:398
int16_t(* b_bidir_forw_mv_table_base)[2]
Definition: mpegvideo.h:380
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideo.c:104
int coded_picture_number
picture number in bitstream order
Definition: frame.h:266
uint16_t inter_matrix[64]
Definition: mpegvideo.h:437
#define IS_INTERLACED(a)
Definition: mpegutils.h:79
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegvideo.h:129
uint8_t * buffer
Definition: parser.h:29
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:290
AVS_Value src
Definition: avisynth_c.h:524
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:717
void ff_free_picture_tables(Picture *pic)
Definition: mpegvideo.c:561
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2763
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:269
enum AVCodecID codec_id
Definition: avcodec.h:1256
BlockDSPContext bdsp
Definition: mpegvideo.h:363
static av_const unsigned int ff_sqrt(unsigned int a)
Definition: mathops.h:214
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:58
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
Definition: avcodec.h:2932
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
int debug
debug
Definition: avcodec.h:2563
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1239
ScanTable intra_scantable
Definition: mpegvideo.h:223
uint8_t * data
The data buffer.
Definition: buffer.h:89
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
Definition: mpegvideo.h:327
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:232
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1271
uint8_t * data
Definition: frame.h:129
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:191
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideo.c:56
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: get_bits.h:457
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:403
void * buf
Definition: avisynth_c.h:595
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:2602
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:2226
GLint GLenum type
Definition: opengl_enc.c:105
uint32_t state
contains the last few bytes in MSB order
Definition: parser.h:33
Picture * picture
main picture buffer
Definition: mpegvideo.h:271
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
int progressive_sequence
Definition: mpegvideo.h:586
BYTE int const BYTE int int int height
Definition: avisynth_c.h:714
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2762
int coded_height
Definition: avcodec.h:1422
#define IS_16X16(a)
Definition: mpegutils.h:82
ScanTable intra_h_scantable
Definition: mpegvideo.h:224
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:80
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
Definition: mpegvideo.h:392
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
Definition: mpegvideo.h:334
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:564
int closed_gop
MPEG1/2 GOP is closed.
Definition: mpegvideo.h:348
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideo.h:143
#define UPDATE_TABLE(table)
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:3652
struct AVFrame * f
Definition: mpegvideo.h:104
#define IS_8X16(a)
Definition: mpegutils.h:84
uint8_t * index_run[2]
encoding only
Definition: rl.h:44
int context_initialized
Definition: mpegvideo.h:259
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:117
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:270
#define s1
Definition: regdef.h:38
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1844
static int add_mb(AVMotionVector *mb, uint32_t mb_type, int dst_x, int dst_y, int src_x, int src_y, int direction)
Definition: mpegvideo.c:2207
int f_code
forward MV resolution
Definition: mpegvideo.h:375
#define COPY(a)
AVCodecContext * avctx
#define MV_DIR_FORWARD
Definition: mpegvideo.h:397
int max_b_frames
max number of b-frames for encoding
Definition: mpegvideo.h:249
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:349
int size
Size of data in bytes.
Definition: buffer.h:93
int h263_pred
use mpeg4/h263 ac/dc predictions
Definition: mpegvideo.h:237
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
Definition: mpegvideo.h:389
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:377
#define EDGE_WIDTH
Definition: mpegvideo.h:82
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:420
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:1171
#define IS_PCM(a)
Definition: mpegutils.h:73
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:393
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
Definition: mpegvideo.h:390
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:324
uint8_t level
Definition: svq3.c:150
qpel_mc_func(* qpel_avg)[16]
Definition: mpegvideo.h:201
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:411
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
Definition: mpegvideo.h:386
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:266
int noise_reduction
noise reduction strength
Definition: avcodec.h:1807
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
Definition: h264chroma.h:24
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:280
MpegEncContext.
Definition: mpegvideo.h:213
uint8_t run
Definition: get_bits.h:72
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:318
int8_t * qscale_table
Definition: mpegvideo.h:108
#define MAX_RUN
Definition: rl.h:34
struct AVCodecContext * avctx
Definition: mpegvideo.h:230
A reference to a data buffer.
Definition: buffer.h:81
discard all non reference
Definition: avcodec.h:663
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static void exchange_uv(MpegEncContext *s)
Definition: mpegvideo.c:814
MpegVideoDSPContext mdsp
Definition: mpegvideo.h:369
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:513
int(* dct_error_sum)[64]
Definition: mpegvideo.h:466
uint64_t flags
Extra flag information.
Definition: motion_vector.h:47
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1778
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:265
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo.c:1118
AVBufferRef * mbskip_table_buf
Definition: mpegvideo.h:116
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:736
#define ARCH_ALPHA
Definition: config.h:18
uint8_t * dest[3]
Definition: mpegvideo.h:430
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:129
int shared
Definition: mpegvideo.h:149
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:382
static double c[64]
int last_pict_type
Definition: mpegvideo.h:351
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:314
int16_t * dc_val[3]
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:297
uint8_t * obmc_scratchpad
Definition: mpegvideo.h:338
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
Allocate a frame buffer.
Definition: mpegvideo.c:482
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:317
Bi-dir predicted.
Definition: avutil.h:269
int index
Definition: parser.h:30
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2507
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (h263)
Definition: mpegvideo.h:325
uint32_t * map
map to avoid duplicate evaluations
Definition: mpegvideo.h:167
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:920
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:637
static int lowres
Definition: ffplay.c:323
H264ChromaContext h264chroma
Definition: mpegvideo.h:365
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:628
#define IS_INTRA(x, y)
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
Definition: h264chroma.h:28
int slices
Number of slices.
Definition: avcodec.h:1974
void * priv_data
Definition: avcodec.h:1281
#define PICT_FRAME
Definition: mpegutils.h:35
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:1372
#define IS_INTRA4x4(a)
Definition: mpegutils.h:71
int picture_structure
Definition: mpegvideo.h:590
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
VideoDSPContext vdsp
Definition: mpegvideo.h:373
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:241
#define IS_8X8(a)
Definition: mpegutils.h:85
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:364
int len
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:2071
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:405
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3372
av_cold void ff_init_rl(RLTable *rl, uint8_t static_store[2][2 *MAX_RUN+MAX_LEVEL+3])
Definition: mpegvideo.c:1659
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:627
ParseContext parse_context
Definition: mpegvideo.h:497
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:3059
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:303
AVBufferRef * mc_mb_var_buf
Definition: mpegvideo.h:125
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:229
static const uint8_t mpeg2_dc_scale_table1[128]
Definition: mpegvideo.c:68
#define IS_ACPRED(a)
Definition: mpegutils.h:90
#define CONFIG_WMV2_DECODER
Definition: config.h:770
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:2668
int16_t level
Definition: get_bits.h:70
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1340
#define HAVE_THREADS
Definition: config.h:340
static int init_er(MpegEncContext *s)
Definition: mpegvideo.c:1132
static const uint8_t mpeg2_dc_scale_table2[128]
Definition: mpegvideo.c:80
int chroma_qscale
chroma QP
Definition: mpegvideo.h:342
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:639
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2014
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:1090
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:1484
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:2621
int height
Definition: frame.h:212
int flags
AVCodecContext.flags (HQ, MV4, ...)
Definition: mpegvideo.h:247
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:435
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegvideo.h:114
#define av_freep(p)
void INT64 start
Definition: avisynth_c.h:595
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
Definition: mpegvideo.h:253
ScanTable inter_scantable
if inter == intra then intra should be used to reduce tha cache usage
Definition: mpegvideo.h:222
#define av_always_inline
Definition: attributes.h:37
uint8_t * temp
Definition: mpegvideo.h:165
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:169
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Definition: common.h:84
#define stride
int debug_mv
debug Code outside libavcodec should access this field using AVOptions
Definition: avcodec.h:2599
int ff_find_unused_picture(MpegEncContext *s, int shared)
Definition: mpegvideo.c:1810
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
Definition: mpegvideo.h:402
#define FF_DEBUG_VIS_MV_P_FOR
Definition: avcodec.h:2600
int16_t(* b_direct_mv_table_base)[2]
Definition: mpegvideo.h:382
int b_code
backward MV resolution for B Frames (mpeg4)
Definition: mpegvideo.h:376
float * bits_tab
Definition: mpegvideo.h:673
uint8_t * mbskip_table
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideo.h:142
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:3484
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:967
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:2873
static int frame_size_alloc(MpegEncContext *s, int linesize)
Definition: mpegvideo.c:445
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:138
Predicted.
Definition: avutil.h:268
AVBufferRef * ref_index_buf[2]
Definition: mpegvideo.h:119
HpelDSPContext hdsp
Definition: mpegvideo.h:366
static int width
static int16_t block[64]
Definition: dct-test.c:110