FFmpeg  4.3
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "h264chroma.h"
38 #include "idctdsp.h"
39 #include "internal.h"
40 #include "mathops.h"
41 #include "mpeg_er.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
44 #include "mpegvideodata.h"
45 #include "mjpegenc.h"
46 #include "msmpeg4.h"
47 #include "qpeldsp.h"
48 #include "thread.h"
49 #include "wmv2.h"
50 #include <limits.h>
51 
53  int16_t *block, int n, int qscale)
54 {
55  int i, level, nCoeffs;
56  const uint16_t *quant_matrix;
57 
58  nCoeffs= s->block_last_index[n];
59 
60  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
61  /* XXX: only MPEG-1 */
62  quant_matrix = s->intra_matrix;
63  for(i=1;i<=nCoeffs;i++) {
64  int j= s->intra_scantable.permutated[i];
65  level = block[j];
66  if (level) {
67  if (level < 0) {
68  level = -level;
69  level = (int)(level * qscale * quant_matrix[j]) >> 3;
70  level = (level - 1) | 1;
71  level = -level;
72  } else {
73  level = (int)(level * qscale * quant_matrix[j]) >> 3;
74  level = (level - 1) | 1;
75  }
76  block[j] = level;
77  }
78  }
79 }
80 
82  int16_t *block, int n, int qscale)
83 {
84  int i, level, nCoeffs;
85  const uint16_t *quant_matrix;
86 
87  nCoeffs= s->block_last_index[n];
88 
89  quant_matrix = s->inter_matrix;
90  for(i=0; i<=nCoeffs; i++) {
91  int j= s->intra_scantable.permutated[i];
92  level = block[j];
93  if (level) {
94  if (level < 0) {
95  level = -level;
96  level = (((level << 1) + 1) * qscale *
97  ((int) (quant_matrix[j]))) >> 4;
98  level = (level - 1) | 1;
99  level = -level;
100  } else {
101  level = (((level << 1) + 1) * qscale *
102  ((int) (quant_matrix[j]))) >> 4;
103  level = (level - 1) | 1;
104  }
105  block[j] = level;
106  }
107  }
108 }
109 
111  int16_t *block, int n, int qscale)
112 {
113  int i, level, nCoeffs;
114  const uint16_t *quant_matrix;
115 
116  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
117  else qscale <<= 1;
118 
119  if(s->alternate_scan) nCoeffs= 63;
120  else nCoeffs= s->block_last_index[n];
121 
122  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
123  quant_matrix = s->intra_matrix;
124  for(i=1;i<=nCoeffs;i++) {
125  int j= s->intra_scantable.permutated[i];
126  level = block[j];
127  if (level) {
128  if (level < 0) {
129  level = -level;
130  level = (int)(level * qscale * quant_matrix[j]) >> 4;
131  level = -level;
132  } else {
133  level = (int)(level * qscale * quant_matrix[j]) >> 4;
134  }
135  block[j] = level;
136  }
137  }
138 }
139 
141  int16_t *block, int n, int qscale)
142 {
143  int i, level, nCoeffs;
144  const uint16_t *quant_matrix;
145  int sum=-1;
146 
147  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
148  else qscale <<= 1;
149 
150  if(s->alternate_scan) nCoeffs= 63;
151  else nCoeffs= s->block_last_index[n];
152 
153  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
154  sum += block[0];
155  quant_matrix = s->intra_matrix;
156  for(i=1;i<=nCoeffs;i++) {
157  int j= s->intra_scantable.permutated[i];
158  level = block[j];
159  if (level) {
160  if (level < 0) {
161  level = -level;
162  level = (int)(level * qscale * quant_matrix[j]) >> 4;
163  level = -level;
164  } else {
165  level = (int)(level * qscale * quant_matrix[j]) >> 4;
166  }
167  block[j] = level;
168  sum+=level;
169  }
170  }
171  block[63]^=sum&1;
172 }
173 
175  int16_t *block, int n, int qscale)
176 {
177  int i, level, nCoeffs;
178  const uint16_t *quant_matrix;
179  int sum=-1;
180 
181  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
182  else qscale <<= 1;
183 
184  if(s->alternate_scan) nCoeffs= 63;
185  else nCoeffs= s->block_last_index[n];
186 
187  quant_matrix = s->inter_matrix;
188  for(i=0; i<=nCoeffs; i++) {
189  int j= s->intra_scantable.permutated[i];
190  level = block[j];
191  if (level) {
192  if (level < 0) {
193  level = -level;
194  level = (((level << 1) + 1) * qscale *
195  ((int) (quant_matrix[j]))) >> 5;
196  level = -level;
197  } else {
198  level = (((level << 1) + 1) * qscale *
199  ((int) (quant_matrix[j]))) >> 5;
200  }
201  block[j] = level;
202  sum+=level;
203  }
204  }
205  block[63]^=sum&1;
206 }
207 
209  int16_t *block, int n, int qscale)
210 {
211  int i, level, qmul, qadd;
212  int nCoeffs;
213 
214  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
215 
216  qmul = qscale << 1;
217 
218  if (!s->h263_aic) {
219  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
220  qadd = (qscale - 1) | 1;
221  }else{
222  qadd = 0;
223  }
224  if(s->ac_pred)
225  nCoeffs=63;
226  else
227  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
228 
229  for(i=1; i<=nCoeffs; i++) {
230  level = block[i];
231  if (level) {
232  if (level < 0) {
233  level = level * qmul - qadd;
234  } else {
235  level = level * qmul + qadd;
236  }
237  block[i] = level;
238  }
239  }
240 }
241 
243  int16_t *block, int n, int qscale)
244 {
245  int i, level, qmul, qadd;
246  int nCoeffs;
247 
248  av_assert2(s->block_last_index[n]>=0);
249 
250  qadd = (qscale - 1) | 1;
251  qmul = qscale << 1;
252 
253  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
254 
255  for(i=0; i<=nCoeffs; i++) {
256  level = block[i];
257  if (level) {
258  if (level < 0) {
259  level = level * qmul - qadd;
260  } else {
261  level = level * qmul + qadd;
262  }
263  block[i] = level;
264  }
265  }
266 }
267 
268 
269 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
270 {
271  while(h--)
272  memset(dst + h*linesize, 128, 16);
273 }
274 
275 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
276 {
277  while(h--)
278  memset(dst + h*linesize, 128, 8);
279 }
280 
281 /* init common dct for both encoder and decoder */
283 {
284  ff_blockdsp_init(&s->bdsp, s->avctx);
285  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
286  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
287  ff_mpegvideodsp_init(&s->mdsp);
288  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
289 
290  if (s->avctx->debug & FF_DEBUG_NOMC) {
291  int i;
292  for (i=0; i<4; i++) {
293  s->hdsp.avg_pixels_tab[0][i] = gray16;
294  s->hdsp.put_pixels_tab[0][i] = gray16;
295  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
296 
297  s->hdsp.avg_pixels_tab[1][i] = gray8;
298  s->hdsp.put_pixels_tab[1][i] = gray8;
299  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
300  }
301  }
302 
303  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
304  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
305  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
306  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
307  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
308  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
309  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
310  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
311 
314 
315  if (ARCH_ALPHA)
317  if (ARCH_ARM)
319  if (ARCH_PPC)
321  if (ARCH_X86)
323  if (ARCH_MIPS)
325 
326  return 0;
327 }
328 
330 {
331  if (s->codec_id == AV_CODEC_ID_MPEG4)
332  s->idsp.mpeg4_studio_profile = s->studio_profile;
333  ff_idctdsp_init(&s->idsp, s->avctx);
334 
335  /* load & permutate scantables
336  * note: only wmv uses different ones
337  */
338  if (s->alternate_scan) {
339  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
340  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
341  } else {
342  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
343  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
344  }
345  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
346  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
347 }
348 
350 {
351  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
352  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
353  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
354  &s->linesize, &s->uvlinesize);
355 }
356 
358 {
359  int y_size = s->b8_stride * (2 * s->mb_height + 1);
360  int c_size = s->mb_stride * (s->mb_height + 1);
361  int yc_size = y_size + 2 * c_size;
362  int i;
363 
364  if (s->mb_height & 1)
365  yc_size += 2*s->b8_stride + 2*s->mb_stride;
366 
367  s->sc.edge_emu_buffer =
368  s->me.scratchpad =
369  s->me.temp =
370  s->sc.rd_scratchpad =
371  s->sc.b_scratchpad =
372  s->sc.obmc_scratchpad = NULL;
373 
374  if (s->encoding) {
375  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
376  ME_MAP_SIZE * sizeof(uint32_t), fail)
377  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
378  ME_MAP_SIZE * sizeof(uint32_t), fail)
379  if (s->noise_reduction) {
380  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
381  2 * 64 * sizeof(int), fail)
382  }
383  }
384  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
385  s->block = s->blocks[0];
386 
387  for (i = 0; i < 12; i++) {
388  s->pblocks[i] = &s->block[i];
389  }
390 
391  FF_ALLOCZ_OR_GOTO(s->avctx, s->block32, sizeof(*s->block32), fail)
392  s->dpcm_direction = 0;
393  FF_ALLOCZ_OR_GOTO(s->avctx, s->dpcm_macroblock, sizeof(*s->dpcm_macroblock), fail)
394 
395  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
396  // exchange uv
397  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
398  }
399 
400  if (s->out_format == FMT_H263) {
401  /* ac values */
402  FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
403  yc_size * sizeof(int16_t) * 16, fail);
404  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
405  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
406  s->ac_val[2] = s->ac_val[1] + c_size;
407  }
408 
409  return 0;
410 fail:
411  return AVERROR(ENOMEM); // free() through ff_mpv_common_end()
412 }
413 
415 {
416  if (!s)
417  return;
418 
419  av_freep(&s->sc.edge_emu_buffer);
420  av_freep(&s->me.scratchpad);
421  s->me.temp =
422  s->sc.rd_scratchpad =
423  s->sc.b_scratchpad =
424  s->sc.obmc_scratchpad = NULL;
425 
426  av_freep(&s->dct_error_sum);
427  av_freep(&s->me.map);
428  av_freep(&s->me.score_map);
429  av_freep(&s->blocks);
430  av_freep(&s->block32);
431  av_freep(&s->dpcm_macroblock);
432  av_freep(&s->ac_val_base);
433  s->block = NULL;
434 }
435 
437 {
438 #define COPY(a) bak->a = src->a
439  COPY(sc.edge_emu_buffer);
440  COPY(me.scratchpad);
441  COPY(me.temp);
442  COPY(sc.rd_scratchpad);
443  COPY(sc.b_scratchpad);
444  COPY(sc.obmc_scratchpad);
445  COPY(me.map);
446  COPY(me.score_map);
447  COPY(blocks);
448  COPY(block);
449  COPY(block32);
450  COPY(dpcm_macroblock);
451  COPY(dpcm_direction);
452  COPY(start_mb_y);
453  COPY(end_mb_y);
454  COPY(me.map_generation);
455  COPY(pb);
456  COPY(dct_error_sum);
457  COPY(dct_count[0]);
458  COPY(dct_count[1]);
459  COPY(ac_val_base);
460  COPY(ac_val[0]);
461  COPY(ac_val[1]);
462  COPY(ac_val[2]);
463 #undef COPY
464 }
465 
467 {
468  MpegEncContext bak;
469  int i, ret;
470  // FIXME copy only needed parts
471  backup_duplicate_context(&bak, dst);
472  memcpy(dst, src, sizeof(MpegEncContext));
473  backup_duplicate_context(dst, &bak);
474  for (i = 0; i < 12; i++) {
475  dst->pblocks[i] = &dst->block[i];
476  }
477  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
478  // exchange uv
479  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
480  }
481  if (!dst->sc.edge_emu_buffer &&
482  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
483  &dst->sc, dst->linesize)) < 0) {
484  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
485  "scratch buffers.\n");
486  return ret;
487  }
488  return 0;
489 }
490 
492  const AVCodecContext *src)
493 {
494  int i, ret;
495  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
496 
497  if (dst == src)
498  return 0;
499 
500  av_assert0(s != s1);
501 
502  // FIXME can parameters change on I-frames?
503  // in that case dst may need a reinit
504  if (!s->context_initialized) {
505  int err;
506  memcpy(s, s1, sizeof(MpegEncContext));
507 
508  s->avctx = dst;
509  s->bitstream_buffer = NULL;
510  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
511 
512  if (s1->context_initialized){
513 // s->picture_range_start += MAX_PICTURE_COUNT;
514 // s->picture_range_end += MAX_PICTURE_COUNT;
516  if((err = ff_mpv_common_init(s)) < 0){
517  memset(s, 0, sizeof(MpegEncContext));
518  s->avctx = dst;
519  return err;
520  }
521  }
522  }
523 
524  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
525  s->context_reinit = 0;
526  s->height = s1->height;
527  s->width = s1->width;
529  return ret;
530  }
531 
532  s->avctx->coded_height = s1->avctx->coded_height;
533  s->avctx->coded_width = s1->avctx->coded_width;
534  s->avctx->width = s1->avctx->width;
535  s->avctx->height = s1->avctx->height;
536 
537  s->quarter_sample = s1->quarter_sample;
538 
539  s->coded_picture_number = s1->coded_picture_number;
540  s->picture_number = s1->picture_number;
541 
542  av_assert0(!s->picture || s->picture != s1->picture);
543  if(s->picture)
544  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
545  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
546  if (s1->picture && s1->picture[i].f->buf[0] &&
547  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
548  return ret;
549  }
550 
551 #define UPDATE_PICTURE(pic)\
552 do {\
553  ff_mpeg_unref_picture(s->avctx, &s->pic);\
554  if (s1->pic.f && s1->pic.f->buf[0])\
555  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
556  else\
557  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
558  if (ret < 0)\
559  return ret;\
560 } while (0)
561 
562  UPDATE_PICTURE(current_picture);
564  UPDATE_PICTURE(next_picture);
565 
566 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
567  ((pic && pic >= old_ctx->picture && \
568  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
569  &new_ctx->picture[pic - old_ctx->picture] : NULL)
570 
571  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
572  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
573  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
574 
575  // Error/bug resilience
576  s->next_p_frame_damaged = s1->next_p_frame_damaged;
577  s->workaround_bugs = s1->workaround_bugs;
578  s->padding_bug_score = s1->padding_bug_score;
579 
580  // MPEG-4 timing info
581  memcpy(&s->last_time_base, &s1->last_time_base,
582  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
583  (char *) &s1->last_time_base);
584 
585  // B-frame info
586  s->max_b_frames = s1->max_b_frames;
587  s->low_delay = s1->low_delay;
588  s->droppable = s1->droppable;
589 
590  // DivX handling (doesn't work)
591  s->divx_packed = s1->divx_packed;
592 
593  if (s1->bitstream_buffer) {
594  if (s1->bitstream_buffer_size +
595  AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
596  av_fast_malloc(&s->bitstream_buffer,
597  &s->allocated_bitstream_buffer_size,
598  s1->allocated_bitstream_buffer_size);
599  if (!s->bitstream_buffer) {
600  s->bitstream_buffer_size = 0;
601  return AVERROR(ENOMEM);
602  }
603  }
604  s->bitstream_buffer_size = s1->bitstream_buffer_size;
605  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
606  s1->bitstream_buffer_size);
607  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
609  }
610 
611  // linesize-dependent scratch buffer allocation
612  if (!s->sc.edge_emu_buffer)
613  if (s1->linesize) {
614  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
615  &s->sc, s1->linesize) < 0) {
616  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
617  "scratch buffers.\n");
618  return AVERROR(ENOMEM);
619  }
620  } else {
621  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
622  "be allocated due to unknown size.\n");
623  }
624 
625  // MPEG-2/interlacing info
626  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
627  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
628 
629  if (!s1->first_field) {
630  s->last_pict_type = s1->pict_type;
631  if (s1->current_picture_ptr)
632  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
633  }
634 
635  return 0;
636 }
637 
638 /**
639  * Set the given MpegEncContext to common defaults
640  * (same for encoding and decoding).
641  * The changed fields will not depend upon the
642  * prior state of the MpegEncContext.
643  */
645 {
646  s->y_dc_scale_table =
647  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
648  s->chroma_qscale_table = ff_default_chroma_qscale_table;
649  s->progressive_frame = 1;
650  s->progressive_sequence = 1;
651  s->picture_structure = PICT_FRAME;
652 
653  s->coded_picture_number = 0;
654  s->picture_number = 0;
655 
656  s->f_code = 1;
657  s->b_code = 1;
658 
659  s->slice_context_count = 1;
660 }
661 
662 /**
663  * Set the given MpegEncContext to defaults for decoding.
664  * the changed fields will not depend upon
665  * the prior state of the MpegEncContext.
666  */
668 {
670 }
671 
673 {
674  s->avctx = avctx;
675  s->width = avctx->coded_width;
676  s->height = avctx->coded_height;
677  s->codec_id = avctx->codec->id;
678  s->workaround_bugs = avctx->workaround_bugs;
679 
680  /* convert fourcc to upper case */
681  s->codec_tag = avpriv_toupper4(avctx->codec_tag);
682 }
683 
684 /**
685  * Initialize and allocates MpegEncContext fields dependent on the resolution.
686  */
688 {
689  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
690 
691  s->mb_width = (s->width + 15) / 16;
692  s->mb_stride = s->mb_width + 1;
693  s->b8_stride = s->mb_width * 2 + 1;
694  mb_array_size = s->mb_height * s->mb_stride;
695  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
696 
697  /* set default edge pos, will be overridden
698  * in decode_header if needed */
699  s->h_edge_pos = s->mb_width * 16;
700  s->v_edge_pos = s->mb_height * 16;
701 
702  s->mb_num = s->mb_width * s->mb_height;
703 
704  s->block_wrap[0] =
705  s->block_wrap[1] =
706  s->block_wrap[2] =
707  s->block_wrap[3] = s->b8_stride;
708  s->block_wrap[4] =
709  s->block_wrap[5] = s->mb_stride;
710 
711  y_size = s->b8_stride * (2 * s->mb_height + 1);
712  c_size = s->mb_stride * (s->mb_height + 1);
713  yc_size = y_size + 2 * c_size;
714 
715  if (s->mb_height & 1)
716  yc_size += 2*s->b8_stride + 2*s->mb_stride;
717 
718  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
719  fail); // error resilience code looks cleaner with this
720  for (y = 0; y < s->mb_height; y++)
721  for (x = 0; x < s->mb_width; x++)
722  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
723 
724  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
725 
726  if (s->encoding) {
727  /* Allocate MV tables */
728  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
729  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
730  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
731  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
732  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
733  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
734  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
735  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
736  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
737  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
738  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
739  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
740 
741  /* Allocate MB type table */
742  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
743 
744  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
745 
746  FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
747  mb_array_size * sizeof(float), fail);
748  FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
749  mb_array_size * sizeof(float), fail);
750 
751  }
752 
753  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
754  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
755  /* interlaced direct mode decoding tables */
756  for (i = 0; i < 2; i++) {
757  int j, k;
758  for (j = 0; j < 2; j++) {
759  for (k = 0; k < 2; k++) {
760  FF_ALLOCZ_OR_GOTO(s->avctx,
761  s->b_field_mv_table_base[i][j][k],
762  mv_table_size * 2 * sizeof(int16_t),
763  fail);
764  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
765  s->mb_stride + 1;
766  }
767  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
768  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
769  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
770  }
771  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
772  }
773  }
774  if (s->out_format == FMT_H263) {
775  /* cbp values */
776  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
777  s->coded_block = s->coded_block_base + s->b8_stride + 1;
778 
779  /* cbp, ac_pred, pred_dir */
780  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
781  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
782  }
783 
784  if (s->h263_pred || s->h263_plus || !s->encoding) {
785  /* dc values */
786  // MN: we need these for error resilience of intra-frames
787  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
788  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
789  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
790  s->dc_val[2] = s->dc_val[1] + c_size;
791  for (i = 0; i < yc_size; i++)
792  s->dc_val_base[i] = 1024;
793  }
794 
795  /* which mb is an intra block */
796  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
797  memset(s->mbintra_table, 1, mb_array_size);
798 
799  /* init macroblock skip table */
800  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
801  // Note the + 1 is for a quicker MPEG-4 slice_end detection
802 
803  return ff_mpeg_er_init(s);
804 fail:
805  return AVERROR(ENOMEM);
806 }
807 
809 {
810  int i, j, k;
811 
812  memset(&s->next_picture, 0, sizeof(s->next_picture));
813  memset(&s->last_picture, 0, sizeof(s->last_picture));
814  memset(&s->current_picture, 0, sizeof(s->current_picture));
815  memset(&s->new_picture, 0, sizeof(s->new_picture));
816 
817  memset(s->thread_context, 0, sizeof(s->thread_context));
818 
819  s->me.map = NULL;
820  s->me.score_map = NULL;
821  s->dct_error_sum = NULL;
822  s->block = NULL;
823  s->blocks = NULL;
824  s->block32 = NULL;
825  memset(s->pblocks, 0, sizeof(s->pblocks));
826  s->dpcm_direction = 0;
827  s->dpcm_macroblock = NULL;
828  s->ac_val_base = NULL;
829  s->ac_val[0] =
830  s->ac_val[1] =
831  s->ac_val[2] =NULL;
832  s->sc.edge_emu_buffer = NULL;
833  s->me.scratchpad = NULL;
834  s->me.temp =
835  s->sc.rd_scratchpad =
836  s->sc.b_scratchpad =
837  s->sc.obmc_scratchpad = NULL;
838 
839 
840  s->bitstream_buffer = NULL;
841  s->allocated_bitstream_buffer_size = 0;
842  s->picture = NULL;
843  s->mb_type = NULL;
844  s->p_mv_table_base = NULL;
845  s->b_forw_mv_table_base = NULL;
846  s->b_back_mv_table_base = NULL;
847  s->b_bidir_forw_mv_table_base = NULL;
848  s->b_bidir_back_mv_table_base = NULL;
849  s->b_direct_mv_table_base = NULL;
850  s->p_mv_table = NULL;
851  s->b_forw_mv_table = NULL;
852  s->b_back_mv_table = NULL;
853  s->b_bidir_forw_mv_table = NULL;
854  s->b_bidir_back_mv_table = NULL;
855  s->b_direct_mv_table = NULL;
856  for (i = 0; i < 2; i++) {
857  for (j = 0; j < 2; j++) {
858  for (k = 0; k < 2; k++) {
859  s->b_field_mv_table_base[i][j][k] = NULL;
860  s->b_field_mv_table[i][j][k] = NULL;
861  }
862  s->b_field_select_table[i][j] = NULL;
863  s->p_field_mv_table_base[i][j] = NULL;
864  s->p_field_mv_table[i][j] = NULL;
865  }
866  s->p_field_select_table[i] = NULL;
867  }
868 
869  s->dc_val_base = NULL;
870  s->coded_block_base = NULL;
871  s->mbintra_table = NULL;
872  s->cbp_table = NULL;
873  s->pred_dir_table = NULL;
874 
875  s->mbskip_table = NULL;
876 
877  s->er.error_status_table = NULL;
878  s->er.er_temp_buffer = NULL;
879  s->mb_index2xy = NULL;
880  s->lambda_table = NULL;
881 
882  s->cplx_tab = NULL;
883  s->bits_tab = NULL;
884 }
885 
886 /**
887  * init common structure for both encoder and decoder.
888  * this assumes that some variables like width/height are already set
889  */
891 {
892  int i, ret;
893  int nb_slices = (HAVE_THREADS &&
894  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
895  s->avctx->thread_count : 1;
896 
897  clear_context(s);
898 
899  if (s->encoding && s->avctx->slices)
900  nb_slices = s->avctx->slices;
901 
902  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
903  s->mb_height = (s->height + 31) / 32 * 2;
904  else
905  s->mb_height = (s->height + 15) / 16;
906 
907  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
908  av_log(s->avctx, AV_LOG_ERROR,
909  "decoding to AV_PIX_FMT_NONE is not supported.\n");
910  return AVERROR(EINVAL);
911  }
912 
913  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
914  int max_slices;
915  if (s->mb_height)
916  max_slices = FFMIN(MAX_THREADS, s->mb_height);
917  else
918  max_slices = MAX_THREADS;
919  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
920  " reducing to %d\n", nb_slices, max_slices);
921  nb_slices = max_slices;
922  }
923 
924  if ((s->width || s->height) &&
925  av_image_check_size(s->width, s->height, 0, s->avctx))
926  return AVERROR(EINVAL);
927 
928  dct_init(s);
929 
930  /* set chroma shifts */
931  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
932  &s->chroma_x_shift,
933  &s->chroma_y_shift);
934  if (ret)
935  return ret;
936 
937  FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
938  MAX_PICTURE_COUNT * sizeof(Picture), fail_nomem);
939  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
940  s->picture[i].f = av_frame_alloc();
941  if (!s->picture[i].f)
942  goto fail_nomem;
943  }
944  s->next_picture.f = av_frame_alloc();
945  if (!s->next_picture.f)
946  goto fail_nomem;
947  s->last_picture.f = av_frame_alloc();
948  if (!s->last_picture.f)
949  goto fail_nomem;
950  s->current_picture.f = av_frame_alloc();
951  if (!s->current_picture.f)
952  goto fail_nomem;
953  s->new_picture.f = av_frame_alloc();
954  if (!s->new_picture.f)
955  goto fail_nomem;
956 
957  if ((ret = init_context_frame(s)))
958  goto fail_nomem;
959 
960  s->parse_context.state = -1;
961 
962  s->context_initialized = 1;
963  memset(s->thread_context, 0, sizeof(s->thread_context));
964  s->thread_context[0] = s;
965 
966 // if (s->width && s->height) {
967  if (nb_slices > 1) {
968  for (i = 0; i < nb_slices; i++) {
969  if (i) {
970  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
971  if (!s->thread_context[i])
972  goto fail_nomem;
973  }
974  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
975  goto fail;
976  s->thread_context[i]->start_mb_y =
977  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
978  s->thread_context[i]->end_mb_y =
979  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
980  }
981  } else {
982  if ((ret = init_duplicate_context(s)) < 0)
983  goto fail;
984  s->start_mb_y = 0;
985  s->end_mb_y = s->mb_height;
986  }
987  s->slice_context_count = nb_slices;
988 // }
989 
990  return 0;
991  fail_nomem:
992  ret = AVERROR(ENOMEM);
993  fail:
995  return ret;
996 }
997 
998 /**
999  * Frees and resets MpegEncContext fields depending on the resolution.
1000  * Is used during resolution changes to avoid a full reinitialization of the
1001  * codec.
1002  */
1004 {
1005  int i, j, k;
1006 
1007  av_freep(&s->mb_type);
1008  av_freep(&s->p_mv_table_base);
1009  av_freep(&s->b_forw_mv_table_base);
1010  av_freep(&s->b_back_mv_table_base);
1011  av_freep(&s->b_bidir_forw_mv_table_base);
1012  av_freep(&s->b_bidir_back_mv_table_base);
1013  av_freep(&s->b_direct_mv_table_base);
1014  s->p_mv_table = NULL;
1015  s->b_forw_mv_table = NULL;
1016  s->b_back_mv_table = NULL;
1017  s->b_bidir_forw_mv_table = NULL;
1018  s->b_bidir_back_mv_table = NULL;
1019  s->b_direct_mv_table = NULL;
1020  for (i = 0; i < 2; i++) {
1021  for (j = 0; j < 2; j++) {
1022  for (k = 0; k < 2; k++) {
1023  av_freep(&s->b_field_mv_table_base[i][j][k]);
1024  s->b_field_mv_table[i][j][k] = NULL;
1025  }
1026  av_freep(&s->b_field_select_table[i][j]);
1027  av_freep(&s->p_field_mv_table_base[i][j]);
1028  s->p_field_mv_table[i][j] = NULL;
1029  }
1030  av_freep(&s->p_field_select_table[i]);
1031  }
1032 
1033  av_freep(&s->dc_val_base);
1034  av_freep(&s->coded_block_base);
1035  av_freep(&s->mbintra_table);
1036  av_freep(&s->cbp_table);
1037  av_freep(&s->pred_dir_table);
1038 
1039  av_freep(&s->mbskip_table);
1040 
1041  av_freep(&s->er.error_status_table);
1042  av_freep(&s->er.er_temp_buffer);
1043  av_freep(&s->mb_index2xy);
1044  av_freep(&s->lambda_table);
1045 
1046  av_freep(&s->cplx_tab);
1047  av_freep(&s->bits_tab);
1048 
1049  s->linesize = s->uvlinesize = 0;
1050 }
1051 
1053 {
1054  int i, err = 0;
1055 
1056  if (!s->context_initialized)
1057  return AVERROR(EINVAL);
1058 
1059  if (s->slice_context_count > 1) {
1060  for (i = 0; i < s->slice_context_count; i++) {
1061  free_duplicate_context(s->thread_context[i]);
1062  }
1063  for (i = 1; i < s->slice_context_count; i++) {
1064  av_freep(&s->thread_context[i]);
1065  }
1066  } else
1068 
1070 
1071  if (s->picture)
1072  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1073  s->picture[i].needs_realloc = 1;
1074  }
1075 
1076  s->last_picture_ptr =
1077  s->next_picture_ptr =
1078  s->current_picture_ptr = NULL;
1079 
1080  // init
1081  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1082  s->mb_height = (s->height + 31) / 32 * 2;
1083  else
1084  s->mb_height = (s->height + 15) / 16;
1085 
1086  if ((s->width || s->height) &&
1087  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1088  goto fail;
1089 
1090  if ((err = init_context_frame(s)))
1091  goto fail;
1092 
1093  memset(s->thread_context, 0, sizeof(s->thread_context));
1094  s->thread_context[0] = s;
1095 
1096  if (s->width && s->height) {
1097  int nb_slices = s->slice_context_count;
1098  if (nb_slices > 1) {
1099  for (i = 0; i < nb_slices; i++) {
1100  if (i) {
1101  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
1102  if (!s->thread_context[i]) {
1103  err = AVERROR(ENOMEM);
1104  goto fail;
1105  }
1106  }
1107  if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1108  goto fail;
1109  s->thread_context[i]->start_mb_y =
1110  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1111  s->thread_context[i]->end_mb_y =
1112  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1113  }
1114  } else {
1115  err = init_duplicate_context(s);
1116  if (err < 0)
1117  goto fail;
1118  s->start_mb_y = 0;
1119  s->end_mb_y = s->mb_height;
1120  }
1121  s->slice_context_count = nb_slices;
1122  }
1123 
1124  return 0;
1125  fail:
1127  return err;
1128 }
1129 
1130 /* init common structure for both encoder and decoder */
1132 {
1133  int i;
1134 
1135  if (!s)
1136  return ;
1137 
1138  if (s->slice_context_count > 1) {
1139  for (i = 0; i < s->slice_context_count; i++) {
1140  free_duplicate_context(s->thread_context[i]);
1141  }
1142  for (i = 1; i < s->slice_context_count; i++) {
1143  av_freep(&s->thread_context[i]);
1144  }
1145  s->slice_context_count = 1;
1146  } else free_duplicate_context(s);
1147 
1148  av_freep(&s->parse_context.buffer);
1149  s->parse_context.buffer_size = 0;
1150 
1151  av_freep(&s->bitstream_buffer);
1152  s->allocated_bitstream_buffer_size = 0;
1153 
1154  if (s->picture) {
1155  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1156  ff_free_picture_tables(&s->picture[i]);
1157  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1158  av_frame_free(&s->picture[i].f);
1159  }
1160  }
1161  av_freep(&s->picture);
1162  ff_free_picture_tables(&s->last_picture);
1163  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1164  av_frame_free(&s->last_picture.f);
1165  ff_free_picture_tables(&s->current_picture);
1166  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1167  av_frame_free(&s->current_picture.f);
1168  ff_free_picture_tables(&s->next_picture);
1169  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1170  av_frame_free(&s->next_picture.f);
1171  ff_free_picture_tables(&s->new_picture);
1172  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1173  av_frame_free(&s->new_picture.f);
1174 
1176 
1177  s->context_initialized = 0;
1178  s->last_picture_ptr =
1179  s->next_picture_ptr =
1180  s->current_picture_ptr = NULL;
1181  s->linesize = s->uvlinesize = 0;
1182 }
1183 
1184 
1185 static void gray_frame(AVFrame *frame)
1186 {
1187  int i, h_chroma_shift, v_chroma_shift;
1188 
1189  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1190 
1191  for(i=0; i<frame->height; i++)
1192  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1193  for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1194  memset(frame->data[1] + frame->linesize[1]*i,
1195  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1196  memset(frame->data[2] + frame->linesize[2]*i,
1197  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1198  }
1199 }
1200 
1201 /**
1202  * generic function called after decoding
1203  * the header and before a frame is decoded.
1204  */
1206 {
1207  int i, ret;
1208  Picture *pic;
1209  s->mb_skipped = 0;
1210 
1211  if (!ff_thread_can_start_frame(avctx)) {
1212  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1213  return -1;
1214  }
1215 
1216  /* mark & release old frames */
1217  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1218  s->last_picture_ptr != s->next_picture_ptr &&
1219  s->last_picture_ptr->f->buf[0]) {
1220  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1221  }
1222 
1223  /* release forgotten pictures */
1224  /* if (MPEG-124 / H.263) */
1225  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1226  if (&s->picture[i] != s->last_picture_ptr &&
1227  &s->picture[i] != s->next_picture_ptr &&
1228  s->picture[i].reference && !s->picture[i].needs_realloc) {
1229  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1230  }
1231  }
1232 
1233  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1234  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1235  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1236 
1237  /* release non reference frames */
1238  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1239  if (!s->picture[i].reference)
1240  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1241  }
1242 
1243  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1244  // we already have an unused image
1245  // (maybe it was set before reading the header)
1246  pic = s->current_picture_ptr;
1247  } else {
1248  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1249  if (i < 0) {
1250  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1251  return i;
1252  }
1253  pic = &s->picture[i];
1254  }
1255 
1256  pic->reference = 0;
1257  if (!s->droppable) {
1258  if (s->pict_type != AV_PICTURE_TYPE_B)
1259  pic->reference = 3;
1260  }
1261 
1262  pic->f->coded_picture_number = s->coded_picture_number++;
1263 
1264  if (alloc_picture(s, pic) < 0)
1265  return -1;
1266 
1267  s->current_picture_ptr = pic;
1268  // FIXME use only the vars from current_pic
1269  s->current_picture_ptr->f->top_field_first = s->top_field_first;
1270  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1271  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1272  if (s->picture_structure != PICT_FRAME)
1273  s->current_picture_ptr->f->top_field_first =
1274  (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1275  }
1276  s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1277  !s->progressive_sequence;
1278  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1279 
1280  s->current_picture_ptr->f->pict_type = s->pict_type;
1281  // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1282  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1283  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1284 
1285  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1286  s->current_picture_ptr)) < 0)
1287  return ret;
1288 
1289  if (s->pict_type != AV_PICTURE_TYPE_B) {
1290  s->last_picture_ptr = s->next_picture_ptr;
1291  if (!s->droppable)
1292  s->next_picture_ptr = s->current_picture_ptr;
1293  }
1294  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1295  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1296  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1297  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1298  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1299  s->pict_type, s->droppable);
1300 
1301  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1302  (s->pict_type != AV_PICTURE_TYPE_I)) {
1303  int h_chroma_shift, v_chroma_shift;
1304  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1305  &h_chroma_shift, &v_chroma_shift);
1306  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1307  av_log(avctx, AV_LOG_DEBUG,
1308  "allocating dummy last picture for B frame\n");
1309  else if (s->pict_type != AV_PICTURE_TYPE_I)
1310  av_log(avctx, AV_LOG_ERROR,
1311  "warning: first frame is no keyframe\n");
1312 
1313  /* Allocate a dummy frame */
1314  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1315  if (i < 0) {
1316  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1317  return i;
1318  }
1319  s->last_picture_ptr = &s->picture[i];
1320 
1321  s->last_picture_ptr->reference = 3;
1322  s->last_picture_ptr->f->key_frame = 0;
1323  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1324 
1325  if (alloc_picture(s, s->last_picture_ptr) < 0) {
1326  s->last_picture_ptr = NULL;
1327  return -1;
1328  }
1329 
1330  if (!avctx->hwaccel) {
1331  for(i=0; i<avctx->height; i++)
1332  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1333  0x80, avctx->width);
1334  if (s->last_picture_ptr->f->data[2]) {
1335  for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1336  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1337  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1338  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1339  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1340  }
1341  }
1342 
1343  if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1344  for(i=0; i<avctx->height; i++)
1345  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1346  }
1347  }
1348 
1349  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1350  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1351  }
1352  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1353  s->pict_type == AV_PICTURE_TYPE_B) {
1354  /* Allocate a dummy frame */
1355  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1356  if (i < 0) {
1357  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1358  return i;
1359  }
1360  s->next_picture_ptr = &s->picture[i];
1361 
1362  s->next_picture_ptr->reference = 3;
1363  s->next_picture_ptr->f->key_frame = 0;
1364  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1365 
1366  if (alloc_picture(s, s->next_picture_ptr) < 0) {
1367  s->next_picture_ptr = NULL;
1368  return -1;
1369  }
1370  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1371  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1372  }
1373 
1374 #if 0 // BUFREF-FIXME
1375  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1376  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1377 #endif
1378  if (s->last_picture_ptr) {
1379  if (s->last_picture_ptr->f->buf[0] &&
1380  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1381  s->last_picture_ptr)) < 0)
1382  return ret;
1383  }
1384  if (s->next_picture_ptr) {
1385  if (s->next_picture_ptr->f->buf[0] &&
1386  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1387  s->next_picture_ptr)) < 0)
1388  return ret;
1389  }
1390 
1391  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1392  s->last_picture_ptr->f->buf[0]));
1393 
1394  if (s->picture_structure!= PICT_FRAME) {
1395  int i;
1396  for (i = 0; i < 4; i++) {
1397  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1398  s->current_picture.f->data[i] +=
1399  s->current_picture.f->linesize[i];
1400  }
1401  s->current_picture.f->linesize[i] *= 2;
1402  s->last_picture.f->linesize[i] *= 2;
1403  s->next_picture.f->linesize[i] *= 2;
1404  }
1405  }
1406 
1407  /* set dequantizer, we can't do it during init as
1408  * it might change for MPEG-4 and we can't do it in the header
1409  * decode as init is not called for MPEG-4 there yet */
1410  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1411  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1412  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1413  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1414  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1415  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1416  } else {
1417  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1418  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1419  }
1420 
1421  if (s->avctx->debug & FF_DEBUG_NOMC) {
1422  gray_frame(s->current_picture_ptr->f);
1423  }
1424 
1425  return 0;
1426 }
1427 
1428 /* called after a frame has been decoded. */
1430 {
1431  emms_c();
1432 
1433  if (s->current_picture.reference)
1434  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1435 }
1436 
1438 {
1439  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
1440  p->qscale_table, p->motion_val, &s->low_delay,
1441  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
1442 }
1443 
1445 {
1447  int offset = 2*s->mb_stride + 1;
1448  if(!ref)
1449  return AVERROR(ENOMEM);
1450  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
1451  ref->size -= offset;
1452  ref->data += offset;
1453  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
1454 }
1455 
1457  uint8_t *dest, uint8_t *src,
1458  int field_based, int field_select,
1459  int src_x, int src_y,
1460  int width, int height, ptrdiff_t stride,
1461  int h_edge_pos, int v_edge_pos,
1462  int w, int h, h264_chroma_mc_func *pix_op,
1463  int motion_x, int motion_y)
1464 {
1465  const int lowres = s->avctx->lowres;
1466  const int op_index = FFMIN(lowres, 3);
1467  const int s_mask = (2 << lowres) - 1;
1468  int emu = 0;
1469  int sx, sy;
1470 
1471  if (s->quarter_sample) {
1472  motion_x /= 2;
1473  motion_y /= 2;
1474  }
1475 
1476  sx = motion_x & s_mask;
1477  sy = motion_y & s_mask;
1478  src_x += motion_x >> lowres + 1;
1479  src_y += motion_y >> lowres + 1;
1480 
1481  src += src_y * stride + src_x;
1482 
1483  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1484  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1485  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
1486  s->linesize, s->linesize,
1487  w + 1, (h + 1) << field_based,
1488  src_x, src_y << field_based,
1489  h_edge_pos, v_edge_pos);
1490  src = s->sc.edge_emu_buffer;
1491  emu = 1;
1492  }
1493 
1494  sx = (sx << 2) >> lowres;
1495  sy = (sy << 2) >> lowres;
1496  if (field_select)
1497  src += s->linesize;
1498  pix_op[op_index](dest, src, stride, h, sx, sy);
1499  return emu;
1500 }
1501 
1502 /* apply one mpeg motion vector to the three components */
1504  uint8_t *dest_y,
1505  uint8_t *dest_cb,
1506  uint8_t *dest_cr,
1507  int field_based,
1508  int bottom_field,
1509  int field_select,
1510  uint8_t **ref_picture,
1511  h264_chroma_mc_func *pix_op,
1512  int motion_x, int motion_y,
1513  int h, int mb_y)
1514 {
1515  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1516  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1517  ptrdiff_t uvlinesize, linesize;
1518  const int lowres = s->avctx->lowres;
1519  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
1520  const int block_s = 8>>lowres;
1521  const int s_mask = (2 << lowres) - 1;
1522  const int h_edge_pos = s->h_edge_pos >> lowres;
1523  const int v_edge_pos = s->v_edge_pos >> lowres;
1524  linesize = s->current_picture.f->linesize[0] << field_based;
1525  uvlinesize = s->current_picture.f->linesize[1] << field_based;
1526 
1527  // FIXME obviously not perfect but qpel will not work in lowres anyway
1528  if (s->quarter_sample) {
1529  motion_x /= 2;
1530  motion_y /= 2;
1531  }
1532 
1533  if(field_based){
1534  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1535  }
1536 
1537  sx = motion_x & s_mask;
1538  sy = motion_y & s_mask;
1539  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1540  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1541 
1542  if (s->out_format == FMT_H263) {
1543  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1544  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1545  uvsrc_x = src_x >> 1;
1546  uvsrc_y = src_y >> 1;
1547  } else if (s->out_format == FMT_H261) {
1548  // even chroma mv's are full pel in H261
1549  mx = motion_x / 4;
1550  my = motion_y / 4;
1551  uvsx = (2 * mx) & s_mask;
1552  uvsy = (2 * my) & s_mask;
1553  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1554  uvsrc_y = mb_y * block_s + (my >> lowres);
1555  } else {
1556  if(s->chroma_y_shift){
1557  mx = motion_x / 2;
1558  my = motion_y / 2;
1559  uvsx = mx & s_mask;
1560  uvsy = my & s_mask;
1561  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1562  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1563  } else {
1564  if(s->chroma_x_shift){
1565  //Chroma422
1566  mx = motion_x / 2;
1567  uvsx = mx & s_mask;
1568  uvsy = motion_y & s_mask;
1569  uvsrc_y = src_y;
1570  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1571  } else {
1572  //Chroma444
1573  uvsx = motion_x & s_mask;
1574  uvsy = motion_y & s_mask;
1575  uvsrc_x = src_x;
1576  uvsrc_y = src_y;
1577  }
1578  }
1579  }
1580 
1581  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1582  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1583  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1584 
1585  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1586  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1587  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1588  linesize >> field_based, linesize >> field_based,
1589  17, 17 + field_based,
1590  src_x, src_y << field_based, h_edge_pos,
1591  v_edge_pos);
1592  ptr_y = s->sc.edge_emu_buffer;
1593  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1594  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1595  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1596  if (s->workaround_bugs & FF_BUG_IEDGE)
1597  vbuf -= s->uvlinesize;
1598  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1599  uvlinesize >> field_based, uvlinesize >> field_based,
1600  9, 9 + field_based,
1601  uvsrc_x, uvsrc_y << field_based,
1602  h_edge_pos >> 1, v_edge_pos >> 1);
1603  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1604  uvlinesize >> field_based,uvlinesize >> field_based,
1605  9, 9 + field_based,
1606  uvsrc_x, uvsrc_y << field_based,
1607  h_edge_pos >> 1, v_edge_pos >> 1);
1608  ptr_cb = ubuf;
1609  ptr_cr = vbuf;
1610  }
1611  }
1612 
1613  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1614  if (bottom_field) {
1615  dest_y += s->linesize;
1616  dest_cb += s->uvlinesize;
1617  dest_cr += s->uvlinesize;
1618  }
1619 
1620  if (field_select) {
1621  ptr_y += s->linesize;
1622  ptr_cb += s->uvlinesize;
1623  ptr_cr += s->uvlinesize;
1624  }
1625 
1626  sx = (sx << 2) >> lowres;
1627  sy = (sy << 2) >> lowres;
1628  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1629 
1630  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1631  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1632  uvsx = (uvsx << 2) >> lowres;
1633  uvsy = (uvsy << 2) >> lowres;
1634  if (hc) {
1635  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1636  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1637  }
1638  }
1639  // FIXME h261 lowres loop filter
1640 }
1641 
1643  uint8_t *dest_cb, uint8_t *dest_cr,
1644  uint8_t **ref_picture,
1645  h264_chroma_mc_func * pix_op,
1646  int mx, int my)
1647 {
1648  const int lowres = s->avctx->lowres;
1649  const int op_index = FFMIN(lowres, 3);
1650  const int block_s = 8 >> lowres;
1651  const int s_mask = (2 << lowres) - 1;
1652  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1653  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1654  int emu = 0, src_x, src_y, sx, sy;
1655  ptrdiff_t offset;
1656  uint8_t *ptr;
1657 
1658  if (s->quarter_sample) {
1659  mx /= 2;
1660  my /= 2;
1661  }
1662 
1663  /* In case of 8X8, we construct a single chroma motion vector
1664  with a special rounding */
1665  mx = ff_h263_round_chroma(mx);
1666  my = ff_h263_round_chroma(my);
1667 
1668  sx = mx & s_mask;
1669  sy = my & s_mask;
1670  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1671  src_y = s->mb_y * block_s + (my >> lowres + 1);
1672 
1673  offset = src_y * s->uvlinesize + src_x;
1674  ptr = ref_picture[1] + offset;
1675  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1676  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1677  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1678  s->uvlinesize, s->uvlinesize,
1679  9, 9,
1680  src_x, src_y, h_edge_pos, v_edge_pos);
1681  ptr = s->sc.edge_emu_buffer;
1682  emu = 1;
1683  }
1684  sx = (sx << 2) >> lowres;
1685  sy = (sy << 2) >> lowres;
1686  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1687 
1688  ptr = ref_picture[2] + offset;
1689  if (emu) {
1690  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1691  s->uvlinesize, s->uvlinesize,
1692  9, 9,
1693  src_x, src_y, h_edge_pos, v_edge_pos);
1694  ptr = s->sc.edge_emu_buffer;
1695  }
1696  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1697 }
1698 
1699 /**
1700  * motion compensation of a single macroblock
1701  * @param s context
1702  * @param dest_y luma destination pointer
1703  * @param dest_cb chroma cb/u destination pointer
1704  * @param dest_cr chroma cr/v destination pointer
1705  * @param dir direction (0->forward, 1->backward)
1706  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1707  * @param pix_op halfpel motion compensation function (average or put normally)
1708  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1709  */
1710 static inline void MPV_motion_lowres(MpegEncContext *s,
1711  uint8_t *dest_y, uint8_t *dest_cb,
1712  uint8_t *dest_cr,
1713  int dir, uint8_t **ref_picture,
1714  h264_chroma_mc_func *pix_op)
1715 {
1716  int mx, my;
1717  int mb_x, mb_y, i;
1718  const int lowres = s->avctx->lowres;
1719  const int block_s = 8 >>lowres;
1720 
1721  mb_x = s->mb_x;
1722  mb_y = s->mb_y;
1723 
1724  switch (s->mv_type) {
1725  case MV_TYPE_16X16:
1726  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1727  0, 0, 0,
1728  ref_picture, pix_op,
1729  s->mv[dir][0][0], s->mv[dir][0][1],
1730  2 * block_s, mb_y);
1731  break;
1732  case MV_TYPE_8X8:
1733  mx = 0;
1734  my = 0;
1735  for (i = 0; i < 4; i++) {
1736  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1737  s->linesize) * block_s,
1738  ref_picture[0], 0, 0,
1739  (2 * mb_x + (i & 1)) * block_s,
1740  (2 * mb_y + (i >> 1)) * block_s,
1741  s->width, s->height, s->linesize,
1742  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1743  block_s, block_s, pix_op,
1744  s->mv[dir][i][0], s->mv[dir][i][1]);
1745 
1746  mx += s->mv[dir][i][0];
1747  my += s->mv[dir][i][1];
1748  }
1749 
1750  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1751  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1752  pix_op, mx, my);
1753  break;
1754  case MV_TYPE_FIELD:
1755  if (s->picture_structure == PICT_FRAME) {
1756  /* top field */
1757  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1758  1, 0, s->field_select[dir][0],
1759  ref_picture, pix_op,
1760  s->mv[dir][0][0], s->mv[dir][0][1],
1761  block_s, mb_y);
1762  /* bottom field */
1763  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1764  1, 1, s->field_select[dir][1],
1765  ref_picture, pix_op,
1766  s->mv[dir][1][0], s->mv[dir][1][1],
1767  block_s, mb_y);
1768  } else {
1769  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1770  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1771  ref_picture = s->current_picture_ptr->f->data;
1772 
1773  }
1774  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1775  0, 0, s->field_select[dir][0],
1776  ref_picture, pix_op,
1777  s->mv[dir][0][0],
1778  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1779  }
1780  break;
1781  case MV_TYPE_16X8:
1782  for (i = 0; i < 2; i++) {
1783  uint8_t **ref2picture;
1784 
1785  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1786  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1787  ref2picture = ref_picture;
1788  } else {
1789  ref2picture = s->current_picture_ptr->f->data;
1790  }
1791 
1792  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1793  0, 0, s->field_select[dir][i],
1794  ref2picture, pix_op,
1795  s->mv[dir][i][0], s->mv[dir][i][1] +
1796  2 * block_s * i, block_s, mb_y >> 1);
1797 
1798  dest_y += 2 * block_s * s->linesize;
1799  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1800  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1801  }
1802  break;
1803  case MV_TYPE_DMV:
1804  if (s->picture_structure == PICT_FRAME) {
1805  for (i = 0; i < 2; i++) {
1806  int j;
1807  for (j = 0; j < 2; j++) {
1808  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1809  1, j, j ^ i,
1810  ref_picture, pix_op,
1811  s->mv[dir][2 * i + j][0],
1812  s->mv[dir][2 * i + j][1],
1813  block_s, mb_y);
1814  }
1815  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1816  }
1817  } else {
1818  for (i = 0; i < 2; i++) {
1819  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1820  0, 0, s->picture_structure != i + 1,
1821  ref_picture, pix_op,
1822  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1823  2 * block_s, mb_y >> 1);
1824 
1825  // after put we make avg of the same block
1826  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1827 
1828  // opposite parity is always in the same
1829  // frame if this is second field
1830  if (!s->first_field) {
1831  ref_picture = s->current_picture_ptr->f->data;
1832  }
1833  }
1834  }
1835  break;
1836  default:
1837  av_assert2(0);
1838  }
1839 }
1840 
1841 /**
1842  * find the lowest MB row referenced in the MVs
1843  */
1845 {
1846  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1847  int my, off, i, mvs;
1848 
1849  if (s->picture_structure != PICT_FRAME || s->mcsel)
1850  goto unhandled;
1851 
1852  switch (s->mv_type) {
1853  case MV_TYPE_16X16:
1854  mvs = 1;
1855  break;
1856  case MV_TYPE_16X8:
1857  mvs = 2;
1858  break;
1859  case MV_TYPE_8X8:
1860  mvs = 4;
1861  break;
1862  default:
1863  goto unhandled;
1864  }
1865 
1866  for (i = 0; i < mvs; i++) {
1867  my = s->mv[dir][i][1];
1868  my_max = FFMAX(my_max, my);
1869  my_min = FFMIN(my_min, my);
1870  }
1871 
1872  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1873 
1874  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1875 unhandled:
1876  return s->mb_height-1;
1877 }
1878 
1879 /* put block[] to dest[] */
1880 static inline void put_dct(MpegEncContext *s,
1881  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1882 {
1883  s->dct_unquantize_intra(s, block, i, qscale);
1884  s->idsp.idct_put(dest, line_size, block);
1885 }
1886 
1887 /* add block[] to dest[] */
1888 static inline void add_dct(MpegEncContext *s,
1889  int16_t *block, int i, uint8_t *dest, int line_size)
1890 {
1891  if (s->block_last_index[i] >= 0) {
1892  s->idsp.idct_add(dest, line_size, block);
1893  }
1894 }
1895 
1896 static inline void add_dequant_dct(MpegEncContext *s,
1897  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1898 {
1899  if (s->block_last_index[i] >= 0) {
1900  s->dct_unquantize_inter(s, block, i, qscale);
1901 
1902  s->idsp.idct_add(dest, line_size, block);
1903  }
1904 }
1905 
1906 /**
1907  * Clean dc, ac, coded_block for the current non-intra MB.
1908  */
1910 {
1911  int wrap = s->b8_stride;
1912  int xy = s->block_index[0];
1913 
1914  s->dc_val[0][xy ] =
1915  s->dc_val[0][xy + 1 ] =
1916  s->dc_val[0][xy + wrap] =
1917  s->dc_val[0][xy + 1 + wrap] = 1024;
1918  /* ac pred */
1919  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1920  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1921  if (s->msmpeg4_version>=3) {
1922  s->coded_block[xy ] =
1923  s->coded_block[xy + 1 ] =
1924  s->coded_block[xy + wrap] =
1925  s->coded_block[xy + 1 + wrap] = 0;
1926  }
1927  /* chroma */
1928  wrap = s->mb_stride;
1929  xy = s->mb_x + s->mb_y * wrap;
1930  s->dc_val[1][xy] =
1931  s->dc_val[2][xy] = 1024;
1932  /* ac pred */
1933  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1934  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1935 
1936  s->mbintra_table[xy]= 0;
1937 }
1938 
1939 /* generic function called after a macroblock has been parsed by the
1940  decoder or after it has been encoded by the encoder.
1941 
1942  Important variables used:
1943  s->mb_intra : true if intra macroblock
1944  s->mv_dir : motion vector direction
1945  s->mv_type : motion vector type
1946  s->mv : motion vector
1947  s->interlaced_dct : true if interlaced dct used (mpeg2)
1948  */
1949 static av_always_inline
1951  int lowres_flag, int is_mpeg12)
1952 {
1953  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1954 
1955  if (CONFIG_XVMC &&
1956  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
1957  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
1958  return;
1959  }
1960 
1961  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1962  /* print DCT coefficients */
1963  int i,j;
1964  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1965  for(i=0; i<6; i++){
1966  for(j=0; j<64; j++){
1967  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1968  block[i][s->idsp.idct_permutation[j]]);
1969  }
1970  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1971  }
1972  }
1973 
1974  s->current_picture.qscale_table[mb_xy] = s->qscale;
1975 
1976  /* update DC predictors for P macroblocks */
1977  if (!s->mb_intra) {
1978  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1979  if(s->mbintra_table[mb_xy])
1981  } else {
1982  s->last_dc[0] =
1983  s->last_dc[1] =
1984  s->last_dc[2] = 128 << s->intra_dc_precision;
1985  }
1986  }
1987  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1988  s->mbintra_table[mb_xy]=1;
1989 
1990  if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
1991  !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1992  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1993  uint8_t *dest_y, *dest_cb, *dest_cr;
1994  int dct_linesize, dct_offset;
1995  op_pixels_func (*op_pix)[4];
1996  qpel_mc_func (*op_qpix)[16];
1997  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1998  const int uvlinesize = s->current_picture.f->linesize[1];
1999  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2000  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2001 
2002  /* avoid copy if macroblock skipped in last frame too */
2003  /* skip only during decoding as we might trash the buffers during encoding a bit */
2004  if(!s->encoding){
2005  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2006 
2007  if (s->mb_skipped) {
2008  s->mb_skipped= 0;
2009  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2010  *mbskip_ptr = 1;
2011  } else if(!s->current_picture.reference) {
2012  *mbskip_ptr = 1;
2013  } else{
2014  *mbskip_ptr = 0; /* not skipped */
2015  }
2016  }
2017 
2018  dct_linesize = linesize << s->interlaced_dct;
2019  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2020 
2021  if(readable){
2022  dest_y= s->dest[0];
2023  dest_cb= s->dest[1];
2024  dest_cr= s->dest[2];
2025  }else{
2026  dest_y = s->sc.b_scratchpad;
2027  dest_cb= s->sc.b_scratchpad+16*linesize;
2028  dest_cr= s->sc.b_scratchpad+32*linesize;
2029  }
2030 
2031  if (!s->mb_intra) {
2032  /* motion handling */
2033  /* decoding or more than one mb_type (MC was already done otherwise) */
2034  if(!s->encoding){
2035 
2036  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2037  if (s->mv_dir & MV_DIR_FORWARD) {
2038  ff_thread_await_progress(&s->last_picture_ptr->tf,
2040  0);
2041  }
2042  if (s->mv_dir & MV_DIR_BACKWARD) {
2043  ff_thread_await_progress(&s->next_picture_ptr->tf,
2045  0);
2046  }
2047  }
2048 
2049  if(lowres_flag){
2050  h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2051 
2052  if (s->mv_dir & MV_DIR_FORWARD) {
2053  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2054  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2055  }
2056  if (s->mv_dir & MV_DIR_BACKWARD) {
2057  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2058  }
2059  }else{
2060  op_qpix = s->me.qpel_put;
2061  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2062  op_pix = s->hdsp.put_pixels_tab;
2063  }else{
2064  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2065  }
2066  if (s->mv_dir & MV_DIR_FORWARD) {
2067  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2068  op_pix = s->hdsp.avg_pixels_tab;
2069  op_qpix= s->me.qpel_avg;
2070  }
2071  if (s->mv_dir & MV_DIR_BACKWARD) {
2072  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2073  }
2074  }
2075  }
2076 
2077  /* skip dequant / idct if we are really late ;) */
2078  if(s->avctx->skip_idct){
2079  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2080  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2081  || s->avctx->skip_idct >= AVDISCARD_ALL)
2082  goto skip_idct;
2083  }
2084 
2085  /* add dct residue */
2086  if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2087  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2088  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2089  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2090  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2091  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2092 
2093  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2094  if (s->chroma_y_shift){
2095  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2096  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2097  }else{
2098  dct_linesize >>= 1;
2099  dct_offset >>=1;
2100  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2101  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2102  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2103  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2104  }
2105  }
2106  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2107  add_dct(s, block[0], 0, dest_y , dct_linesize);
2108  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2109  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2110  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2111 
2112  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2113  if(s->chroma_y_shift){//Chroma420
2114  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2115  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2116  }else{
2117  //chroma422
2118  dct_linesize = uvlinesize << s->interlaced_dct;
2119  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2120 
2121  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2122  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2123  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2124  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2125  if(!s->chroma_x_shift){//Chroma444
2126  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2127  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2128  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2129  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2130  }
2131  }
2132  }//fi gray
2133  }
2135  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2136  }
2137  } else {
2138  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
2139  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
2140  if (s->avctx->bits_per_raw_sample > 8){
2141  const int act_block_size = block_size * 2;
2142 
2143  if(s->dpcm_direction == 0) {
2144  s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
2145  s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
2146  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
2147  s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
2148 
2149  dct_linesize = uvlinesize << s->interlaced_dct;
2150  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2151 
2152  s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
2153  s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
2154  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
2155  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
2156  if(!s->chroma_x_shift){//Chroma444
2157  s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
2158  s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
2159  s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
2160  s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
2161  }
2162  } else if(s->dpcm_direction == 1) {
2163  int i, w, h;
2164  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2165  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2166  for(i = 0; i < 3; i++) {
2167  int idx = 0;
2168  int vsub = i ? s->chroma_y_shift : 0;
2169  int hsub = i ? s->chroma_x_shift : 0;
2170  for(h = 0; h < (16 >> vsub); h++){
2171  for(w = 0; w < (16 >> hsub); w++)
2172  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2173  dest_pcm[i] += linesize[i] / 2;
2174  }
2175  }
2176  } else if(s->dpcm_direction == -1) {
2177  int i, w, h;
2178  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2179  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2180  for(i = 0; i < 3; i++) {
2181  int idx = 0;
2182  int vsub = i ? s->chroma_y_shift : 0;
2183  int hsub = i ? s->chroma_x_shift : 0;
2184  dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
2185  for(h = (16 >> vsub)-1; h >= 1; h--){
2186  for(w = (16 >> hsub)-1; w >= 1; w--)
2187  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2188  dest_pcm[i] -= linesize[i] / 2;
2189  }
2190  }
2191  }
2192  }
2193  /* dct only in intra block */
2194  else if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2195  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2196  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2197  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2198  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2199 
2200  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2201  if(s->chroma_y_shift){
2202  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2203  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2204  }else{
2205  dct_offset >>=1;
2206  dct_linesize >>=1;
2207  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2208  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2209  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2210  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2211  }
2212  }
2213  }else{
2214  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2215  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2216  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2217  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2218 
2219  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2220  if(s->chroma_y_shift){
2221  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2222  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2223  }else{
2224 
2225  dct_linesize = uvlinesize << s->interlaced_dct;
2226  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2227 
2228  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2229  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2230  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2231  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2232  if(!s->chroma_x_shift){//Chroma444
2233  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2234  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2235  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2236  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2237  }
2238  }
2239  }//gray
2240  }
2241  }
2242 skip_idct:
2243  if(!readable){
2244  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2245  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2246  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2247  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2248  }
2249  }
2250  }
2251 }
2252 
2254 {
2255 #if !CONFIG_SMALL
2256  if(s->out_format == FMT_MPEG1) {
2257  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
2258  else mpv_reconstruct_mb_internal(s, block, 0, 1);
2259  } else
2260 #endif
2261  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
2262  else mpv_reconstruct_mb_internal(s, block, 0, 0);
2263 }
2264 
2266 {
2267  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
2268  s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
2269  s->first_field, s->low_delay);
2270 }
2271 
2272 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2273  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2274  const int uvlinesize = s->current_picture.f->linesize[1];
2275  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
2276  const int height_of_mb = 4 - s->avctx->lowres;
2277 
2278  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2279  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2280  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2281  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2282  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2283  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2284  //block_index is not used by mpeg2, so it is not affected by chroma_format
2285 
2286  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
2287  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2288  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2289 
2290  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2291  {
2292  if(s->picture_structure==PICT_FRAME){
2293  s->dest[0] += s->mb_y * linesize << height_of_mb;
2294  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2295  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2296  }else{
2297  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
2298  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2299  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2300  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2301  }
2302  }
2303 }
2304 
2306  int i;
2307  MpegEncContext *s = avctx->priv_data;
2308 
2309  if (!s || !s->picture)
2310  return;
2311 
2312  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2313  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2314  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2315 
2316  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
2317  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
2318  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
2319 
2320  s->mb_x= s->mb_y= 0;
2321  s->closed_gop= 0;
2322 
2323  s->parse_context.state= -1;
2324  s->parse_context.frame_start_found= 0;
2325  s->parse_context.overread= 0;
2326  s->parse_context.overread_index= 0;
2327  s->parse_context.index= 0;
2328  s->parse_context.last_index= 0;
2329  s->bitstream_buffer_size=0;
2330  s->pp_time=0;
2331 }
2332 
2333 /**
2334  * set qscale and update qscale dependent variables.
2335  */
2336 void ff_set_qscale(MpegEncContext * s, int qscale)
2337 {
2338  if (qscale < 1)
2339  qscale = 1;
2340  else if (qscale > 31)
2341  qscale = 31;
2342 
2343  s->qscale = qscale;
2344  s->chroma_qscale= s->chroma_qscale_table[qscale];
2345 
2346  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2347  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2348 }
2349 
2351 {
2352  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2353  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2354 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:890
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
level
uint8_t level
Definition: svq3.c:209
ARCH_X86
#define ARCH_X86
Definition: config.h:38
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:58
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:124
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1560
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:1503
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:436
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:644
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:1456
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:906
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
ARCH_ALPHA
#define ARCH_ALPHA
Definition: config.h:18
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:414
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1844
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
avpriv_toupper4
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:1832
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1052
internal.h
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:465
last_picture
enum AVPictureType last_picture
Definition: movenc.c:68
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
CONFIG_WMV2_DECODER
#define CONFIG_WMV2_DECODER
Definition: config.h:987
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1909
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:357
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:43
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2272
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
Picture
Picture.
Definition: mpegpicture.h:45
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
mpegutils.h
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:414
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:75
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1950
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:287
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1642
return
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
Definition: filter_design.txt:264
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:81
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
U
#define U(x)
Definition: vp56_arith.h:37
fail
#define fail()
Definition: checkasm.h:123
wrap
#define wrap(func)
Definition: neontest.h:65
ARCH_ARM
#define ARCH_ARM
Definition: config.h:19
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:103
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2265
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:34
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:52
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1131
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:506
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:269
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:568
motion_vector.h
width
#define width
emms_c
#define emms_c()
Definition: internal.h:55
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1888
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:299
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
ARCH_MIPS
#define ARCH_MIPS
Definition: config.h:26
s1
#define s1
Definition: regdef.h:38
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:140
FMT_H261
@ FMT_H261
Definition: mpegutils.h:125
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
limits.h
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
CONFIG_WMV2_ENCODER
#define CONFIG_WMV2_ENCODER
Definition: config.h:1340
f
#define f(width, name)
Definition: cbs_vp9.c:255
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
Picture::reference
int reference
Definition: mpegpicture.h:87
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:451
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1880
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:254
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2336
mathops.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1575
lowres
static int lowres
Definition: ffplay.c:336
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:282
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:236
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:110
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
HAVE_THREADS
#define HAVE_THREADS
Definition: config.h:273
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1623
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: mpegpicture.h:49
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:349
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2305
height
#define height
init_context_frame
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:687
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:491
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1444
ff_print_debug_info
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1437
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:174
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:808
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:126
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:242
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:118
av_always_inline
#define av_always_inline
Definition: attributes.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
uint8_t
Definition: audio_convert.c:194
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
CONFIG_XVMC
#define CONFIG_XVMC
Definition: config.h:547
ff_mpv_decode_defaults
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:667
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:83
AVCodecContext::height
int height
Definition: avcodec.h:699
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1429
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1636
idctdsp.h
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:50
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1205
free_context_frame
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:1003
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:282
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:466
ARCH_PPC
#define ARCH_PPC
Definition: config.h:29
AV_RL32
#define AV_RL32
Definition: intreadwrite.h:146
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:208
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1710
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2253
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
HAVE_INTRINSICS_NEON
#define HAVE_INTRINSICS_NEON
Definition: config.h:239
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1017
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:50
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
FF_ALLOC_OR_GOTO
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:140
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
COPY
#define COPY(a)
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:551
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:504
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CONFIG_GRAY
#define CONFIG_GRAY
Definition: config.h:549
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:2350
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1896
FF_ALLOCZ_OR_GOTO
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:70
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo.c:672
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
int
int
Definition: ffmpeg_filter.c:192
mjpegenc.h
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:126
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:81
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:275
gray_frame
static void gray_frame(AVFrame *frame)
Definition: mpegvideo.c:1185
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55