FFmpeg  1.2.12
vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "mpegvideo.h"
32 #include "h263.h"
33 #include "h264chroma.h"
34 #include "vc1.h"
35 #include "vc1data.h"
36 #include "vc1acdata.h"
37 #include "msmpeg4data.h"
38 #include "unary.h"
39 #include "mathops.h"
40 #include "vdpau_internal.h"
41 #include "libavutil/avassert.h"
42 
43 #undef NDEBUG
44 #include <assert.h>
45 
46 #define MB_INTRA_VLC_BITS 9
47 #define DC_VLC_BITS 9
48 
49 
50 // offset tables for interlaced picture MVDATA decoding
51 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
52 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
53 
54 /***********************************************************************/
65 enum Imode {
73 }; //imode defines
75 
77 {
78  MpegEncContext *s = &v->s;
80  if (v->field_mode && v->second_field) {
81  s->dest[0] += s->current_picture_ptr->f.linesize[0];
82  s->dest[1] += s->current_picture_ptr->f.linesize[1];
83  s->dest[2] += s->current_picture_ptr->f.linesize[2];
84  }
85 }
86 
87  //Bitplane group
89 
91 {
92  MpegEncContext *s = &v->s;
93  int topleft_mb_pos, top_mb_pos;
94  int stride_y, fieldtx = 0;
95  int v_dist;
96 
97  /* The put pixels loop is always one MB row behind the decoding loop,
98  * because we can only put pixels when overlap filtering is done, and
99  * for filtering of the bottom edge of a MB, we need the next MB row
100  * present as well.
101  * Within the row, the put pixels loop is also one MB col behind the
102  * decoding loop. The reason for this is again, because for filtering
103  * of the right MB edge, we need the next MB present. */
104  if (!s->first_slice_line) {
105  if (s->mb_x) {
106  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
107  if (v->fcm == ILACE_FRAME)
108  fieldtx = v->fieldtx_plane[topleft_mb_pos];
109  stride_y = s->linesize << fieldtx;
110  v_dist = (16 - fieldtx) >> (fieldtx == 0);
112  s->dest[0] - 16 * s->linesize - 16,
113  stride_y);
115  s->dest[0] - 16 * s->linesize - 8,
116  stride_y);
118  s->dest[0] - v_dist * s->linesize - 16,
119  stride_y);
121  s->dest[0] - v_dist * s->linesize - 8,
122  stride_y);
124  s->dest[1] - 8 * s->uvlinesize - 8,
125  s->uvlinesize);
127  s->dest[2] - 8 * s->uvlinesize - 8,
128  s->uvlinesize);
129  }
130  if (s->mb_x == s->mb_width - 1) {
131  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
132  if (v->fcm == ILACE_FRAME)
133  fieldtx = v->fieldtx_plane[top_mb_pos];
134  stride_y = s->linesize << fieldtx;
135  v_dist = fieldtx ? 15 : 8;
137  s->dest[0] - 16 * s->linesize,
138  stride_y);
140  s->dest[0] - 16 * s->linesize + 8,
141  stride_y);
143  s->dest[0] - v_dist * s->linesize,
144  stride_y);
146  s->dest[0] - v_dist * s->linesize + 8,
147  stride_y);
149  s->dest[1] - 8 * s->uvlinesize,
150  s->uvlinesize);
152  s->dest[2] - 8 * s->uvlinesize,
153  s->uvlinesize);
154  }
155  }
156 
157 #define inc_blk_idx(idx) do { \
158  idx++; \
159  if (idx >= v->n_allocated_blks) \
160  idx = 0; \
161  } while (0)
162 
167 }
168 
169 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
170 {
171  MpegEncContext *s = &v->s;
172  int j;
173  if (!s->first_slice_line) {
174  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
175  if (s->mb_x)
176  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
177  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
178  for (j = 0; j < 2; j++) {
179  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
180  if (s->mb_x)
181  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
182  }
183  }
184  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
185 
186  if (s->mb_y == s->end_mb_y - 1) {
187  if (s->mb_x) {
188  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
189  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
190  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
191  }
192  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
193  }
194 }
195 
197 {
198  MpegEncContext *s = &v->s;
199  int j;
200 
201  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
202  * means it runs two rows/cols behind the decoding loop. */
203  if (!s->first_slice_line) {
204  if (s->mb_x) {
205  if (s->mb_y >= s->start_mb_y + 2) {
206  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
207 
208  if (s->mb_x >= 2)
209  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
210  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
211  for (j = 0; j < 2; j++) {
212  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
213  if (s->mb_x >= 2) {
214  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
215  }
216  }
217  }
218  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
219  }
220 
221  if (s->mb_x == s->mb_width - 1) {
222  if (s->mb_y >= s->start_mb_y + 2) {
223  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
224 
225  if (s->mb_x)
226  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
228  for (j = 0; j < 2; j++) {
229  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
230  if (s->mb_x >= 2) {
231  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
232  }
233  }
234  }
235  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
236  }
237 
238  if (s->mb_y == s->end_mb_y) {
239  if (s->mb_x) {
240  if (s->mb_x >= 2)
241  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
242  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
243  if (s->mb_x >= 2) {
244  for (j = 0; j < 2; j++) {
245  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
246  }
247  }
248  }
249 
250  if (s->mb_x == s->mb_width - 1) {
251  if (s->mb_x)
252  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
253  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
254  if (s->mb_x) {
255  for (j = 0; j < 2; j++) {
256  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
257  }
258  }
259  }
260  }
261  }
262 }
263 
265 {
266  MpegEncContext *s = &v->s;
267  int mb_pos;
268 
269  if (v->condover == CONDOVER_NONE)
270  return;
271 
272  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
273 
274  /* Within a MB, the horizontal overlap always runs before the vertical.
275  * To accomplish that, we run the H on left and internal borders of the
276  * currently decoded MB. Then, we wait for the next overlap iteration
277  * to do H overlap on the right edge of this MB, before moving over and
278  * running the V overlap. Therefore, the V overlap makes us trail by one
279  * MB col and the H overlap filter makes us trail by one MB row. This
280  * is reflected in the time at which we run the put_pixels loop. */
281  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
282  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
283  v->over_flags_plane[mb_pos - 1])) {
285  v->block[v->cur_blk_idx][0]);
287  v->block[v->cur_blk_idx][2]);
288  if (!(s->flags & CODEC_FLAG_GRAY)) {
290  v->block[v->cur_blk_idx][4]);
292  v->block[v->cur_blk_idx][5]);
293  }
294  }
296  v->block[v->cur_blk_idx][1]);
298  v->block[v->cur_blk_idx][3]);
299 
300  if (s->mb_x == s->mb_width - 1) {
301  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
302  v->over_flags_plane[mb_pos - s->mb_stride])) {
304  v->block[v->cur_blk_idx][0]);
306  v->block[v->cur_blk_idx][1]);
307  if (!(s->flags & CODEC_FLAG_GRAY)) {
309  v->block[v->cur_blk_idx][4]);
311  v->block[v->cur_blk_idx][5]);
312  }
313  }
315  v->block[v->cur_blk_idx][2]);
317  v->block[v->cur_blk_idx][3]);
318  }
319  }
320  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
321  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
322  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
324  v->block[v->left_blk_idx][0]);
326  v->block[v->left_blk_idx][1]);
327  if (!(s->flags & CODEC_FLAG_GRAY)) {
329  v->block[v->left_blk_idx][4]);
331  v->block[v->left_blk_idx][5]);
332  }
333  }
335  v->block[v->left_blk_idx][2]);
337  v->block[v->left_blk_idx][3]);
338  }
339 }
340 
344 static void vc1_mc_1mv(VC1Context *v, int dir)
345 {
346  MpegEncContext *s = &v->s;
347  DSPContext *dsp = &v->s.dsp;
348  H264ChromaContext *h264chroma = &v->h264chroma;
349  uint8_t *srcY, *srcU, *srcV;
350  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
351  int off, off_uv;
352  int v_edge_pos = s->v_edge_pos >> v->field_mode;
353 
354  if ((!v->field_mode ||
355  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
356  !v->s.last_picture.f.data[0])
357  return;
358 
359  mx = s->mv[dir][0][0];
360  my = s->mv[dir][0][1];
361 
362  // store motion vectors for further use in B frames
363  if (s->pict_type == AV_PICTURE_TYPE_P) {
364  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = mx;
365  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = my;
366  }
367 
368  uvmx = (mx + ((mx & 3) == 3)) >> 1;
369  uvmy = (my + ((my & 3) == 3)) >> 1;
370  v->luma_mv[s->mb_x][0] = uvmx;
371  v->luma_mv[s->mb_x][1] = uvmy;
372 
373  if (v->field_mode &&
374  v->cur_field_type != v->ref_field_type[dir]) {
375  my = my - 2 + 4 * v->cur_field_type;
376  uvmy = uvmy - 2 + 4 * v->cur_field_type;
377  }
378 
379  // fastuvmc shall be ignored for interlaced frame picture
380  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
381  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
382  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
383  }
384  if (v->field_mode) { // interlaced field picture
385  if (!dir) {
386  if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
387  srcY = s->current_picture.f.data[0];
388  srcU = s->current_picture.f.data[1];
389  srcV = s->current_picture.f.data[2];
390  } else {
391  srcY = s->last_picture.f.data[0];
392  srcU = s->last_picture.f.data[1];
393  srcV = s->last_picture.f.data[2];
394  }
395  } else {
396  srcY = s->next_picture.f.data[0];
397  srcU = s->next_picture.f.data[1];
398  srcV = s->next_picture.f.data[2];
399  }
400  } else {
401  if (!dir) {
402  srcY = s->last_picture.f.data[0];
403  srcU = s->last_picture.f.data[1];
404  srcV = s->last_picture.f.data[2];
405  } else {
406  srcY = s->next_picture.f.data[0];
407  srcU = s->next_picture.f.data[1];
408  srcV = s->next_picture.f.data[2];
409  }
410  }
411 
412  if(!srcY)
413  return;
414 
415  src_x = s->mb_x * 16 + (mx >> 2);
416  src_y = s->mb_y * 16 + (my >> 2);
417  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
418  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
419 
420  if (v->profile != PROFILE_ADVANCED) {
421  src_x = av_clip( src_x, -16, s->mb_width * 16);
422  src_y = av_clip( src_y, -16, s->mb_height * 16);
423  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
424  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
425  } else {
426  src_x = av_clip( src_x, -17, s->avctx->coded_width);
427  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
428  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
429  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
430  }
431 
432  srcY += src_y * s->linesize + src_x;
433  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
434  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
435 
436  if (v->field_mode && v->ref_field_type[dir]) {
437  srcY += s->current_picture_ptr->f.linesize[0];
438  srcU += s->current_picture_ptr->f.linesize[1];
439  srcV += s->current_picture_ptr->f.linesize[2];
440  }
441 
442  /* for grayscale we should not try to read from unknown area */
443  if (s->flags & CODEC_FLAG_GRAY) {
444  srcU = s->edge_emu_buffer + 18 * s->linesize;
445  srcV = s->edge_emu_buffer + 18 * s->linesize;
446  }
447 
449  || s->h_edge_pos < 22 || v_edge_pos < 22
450  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
451  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
452  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
453 
454  srcY -= s->mspel * (1 + s->linesize);
456  17 + s->mspel * 2, 17 + s->mspel * 2,
457  src_x - s->mspel, src_y - s->mspel,
458  s->h_edge_pos, v_edge_pos);
459  srcY = s->edge_emu_buffer;
460  s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
461  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
462  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
463  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
464  srcU = uvbuf;
465  srcV = uvbuf + 16;
466  /* if we deal with range reduction we need to scale source blocks */
467  if (v->rangeredfrm) {
468  int i, j;
469  uint8_t *src, *src2;
470 
471  src = srcY;
472  for (j = 0; j < 17 + s->mspel * 2; j++) {
473  for (i = 0; i < 17 + s->mspel * 2; i++)
474  src[i] = ((src[i] - 128) >> 1) + 128;
475  src += s->linesize;
476  }
477  src = srcU;
478  src2 = srcV;
479  for (j = 0; j < 9; j++) {
480  for (i = 0; i < 9; i++) {
481  src[i] = ((src[i] - 128) >> 1) + 128;
482  src2[i] = ((src2[i] - 128) >> 1) + 128;
483  }
484  src += s->uvlinesize;
485  src2 += s->uvlinesize;
486  }
487  }
488  /* if we deal with intensity compensation we need to scale source blocks */
489  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
490  int i, j;
491  uint8_t *src, *src2;
492 
493  src = srcY;
494  for (j = 0; j < 17 + s->mspel * 2; j++) {
495  for (i = 0; i < 17 + s->mspel * 2; i++)
496  src[i] = v->luty[src[i]];
497  src += s->linesize;
498  }
499  src = srcU;
500  src2 = srcV;
501  for (j = 0; j < 9; j++) {
502  for (i = 0; i < 9; i++) {
503  src[i] = v->lutuv[src[i]];
504  src2[i] = v->lutuv[src2[i]];
505  }
506  src += s->uvlinesize;
507  src2 += s->uvlinesize;
508  }
509  }
510  srcY += s->mspel * (1 + s->linesize);
511  }
512 
513  off = 0;
514  off_uv = 0;
515  if (s->mspel) {
516  dxy = ((my & 3) << 2) | (mx & 3);
517  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
518  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
519  srcY += s->linesize * 8;
520  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
521  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
522  } else { // hpel mc - always used for luma
523  dxy = (my & 2) | ((mx & 2) >> 1);
524  if (!v->rnd)
525  dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
526  else
527  dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
528  }
529 
530  if (s->flags & CODEC_FLAG_GRAY) return;
531  /* Chroma MC always uses qpel bilinear */
532  uvmx = (uvmx & 3) << 1;
533  uvmy = (uvmy & 3) << 1;
534  if (!v->rnd) {
535  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
536  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
537  } else {
538  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
539  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
540  }
541 }
542 
543 static inline int median4(int a, int b, int c, int d)
544 {
545  if (a < b) {
546  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
547  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
548  } else {
549  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
550  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
551  }
552 }
553 
556 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
557 {
558  MpegEncContext *s = &v->s;
559  DSPContext *dsp = &v->s.dsp;
560  uint8_t *srcY;
561  int dxy, mx, my, src_x, src_y;
562  int off;
563  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
564  int v_edge_pos = s->v_edge_pos >> v->field_mode;
565 
566  if ((!v->field_mode ||
567  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
568  !v->s.last_picture.f.data[0])
569  return;
570 
571  mx = s->mv[dir][n][0];
572  my = s->mv[dir][n][1];
573 
574  if (!dir) {
575  if (v->field_mode) {
576  if ((v->cur_field_type != v->ref_field_type[dir]) && v->second_field)
577  srcY = s->current_picture.f.data[0];
578  else
579  srcY = s->last_picture.f.data[0];
580  } else
581  srcY = s->last_picture.f.data[0];
582  } else
583  srcY = s->next_picture.f.data[0];
584 
585  if(!srcY)
586  return;
587 
588  if (v->field_mode) {
589  if (v->cur_field_type != v->ref_field_type[dir])
590  my = my - 2 + 4 * v->cur_field_type;
591  }
592 
593  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
594  int same_count = 0, opp_count = 0, k;
595  int chosen_mv[2][4][2], f;
596  int tx, ty;
597  for (k = 0; k < 4; k++) {
598  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
599  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
600  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
601  opp_count += f;
602  same_count += 1 - f;
603  }
604  f = opp_count > same_count;
605  switch (f ? opp_count : same_count) {
606  case 4:
607  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
608  chosen_mv[f][2][0], chosen_mv[f][3][0]);
609  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
610  chosen_mv[f][2][1], chosen_mv[f][3][1]);
611  break;
612  case 3:
613  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
614  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
615  break;
616  case 2:
617  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
618  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
619  break;
620  default:
621  av_assert2(0);
622  }
623  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
624  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
625  for (k = 0; k < 4; k++)
626  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
627  }
628 
629  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
630  int qx, qy;
631  int width = s->avctx->coded_width;
632  int height = s->avctx->coded_height >> 1;
633  qx = (s->mb_x * 16) + (mx >> 2);
634  qy = (s->mb_y * 8) + (my >> 3);
635 
636  if (qx < -17)
637  mx -= 4 * (qx + 17);
638  else if (qx > width)
639  mx -= 4 * (qx - width);
640  if (qy < -18)
641  my -= 8 * (qy + 18);
642  else if (qy > height + 1)
643  my -= 8 * (qy - height - 1);
644  }
645 
646  if ((v->fcm == ILACE_FRAME) && fieldmv)
647  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
648  else
649  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
650 
651  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
652  if (!fieldmv)
653  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
654  else
655  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
656 
657  if (v->profile != PROFILE_ADVANCED) {
658  src_x = av_clip(src_x, -16, s->mb_width * 16);
659  src_y = av_clip(src_y, -16, s->mb_height * 16);
660  } else {
661  src_x = av_clip(src_x, -17, s->avctx->coded_width);
662  if (v->fcm == ILACE_FRAME) {
663  if (src_y & 1)
664  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
665  else
666  src_y = av_clip(src_y, -18, s->avctx->coded_height);
667  } else {
668  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
669  }
670  }
671 
672  srcY += src_y * s->linesize + src_x;
673  if (v->field_mode && v->ref_field_type[dir])
674  srcY += s->current_picture_ptr->f.linesize[0];
675 
676  if (fieldmv && !(src_y & 1))
677  v_edge_pos--;
678  if (fieldmv && (src_y & 1) && src_y < 4)
679  src_y--;
681  || s->h_edge_pos < 13 || v_edge_pos < 23
682  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
683  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
684  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
685  /* check emulate edge stride and offset */
687  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
688  src_x - s->mspel, src_y - (s->mspel << fieldmv),
689  s->h_edge_pos, v_edge_pos);
690  srcY = s->edge_emu_buffer;
691  /* if we deal with range reduction we need to scale source blocks */
692  if (v->rangeredfrm) {
693  int i, j;
694  uint8_t *src;
695 
696  src = srcY;
697  for (j = 0; j < 9 + s->mspel * 2; j++) {
698  for (i = 0; i < 9 + s->mspel * 2; i++)
699  src[i] = ((src[i] - 128) >> 1) + 128;
700  src += s->linesize << fieldmv;
701  }
702  }
703  /* if we deal with intensity compensation we need to scale source blocks */
704  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
705  int i, j;
706  uint8_t *src;
707 
708  src = srcY;
709  for (j = 0; j < 9 + s->mspel * 2; j++) {
710  for (i = 0; i < 9 + s->mspel * 2; i++)
711  src[i] = v->luty[src[i]];
712  src += s->linesize << fieldmv;
713  }
714  }
715  srcY += s->mspel * (1 + (s->linesize << fieldmv));
716  }
717 
718  if (s->mspel) {
719  dxy = ((my & 3) << 2) | (mx & 3);
720  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
721  } else { // hpel mc - always used for luma
722  dxy = (my & 2) | ((mx & 2) >> 1);
723  if (!v->rnd)
724  dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
725  else
726  dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
727  }
728 }
729 
730 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
731 {
732  int idx, i;
733  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
734 
735  idx = ((a[3] != flag) << 3)
736  | ((a[2] != flag) << 2)
737  | ((a[1] != flag) << 1)
738  | (a[0] != flag);
739  if (!idx) {
740  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
741  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
742  return 4;
743  } else if (count[idx] == 1) {
744  switch (idx) {
745  case 0x1:
746  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
747  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
748  return 3;
749  case 0x2:
750  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
751  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
752  return 3;
753  case 0x4:
754  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
755  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
756  return 3;
757  case 0x8:
758  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
759  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
760  return 3;
761  }
762  } else if (count[idx] == 2) {
763  int t1 = 0, t2 = 0;
764  for (i = 0; i < 3; i++)
765  if (!a[i]) {
766  t1 = i;
767  break;
768  }
769  for (i = t1 + 1; i < 4; i++)
770  if (!a[i]) {
771  t2 = i;
772  break;
773  }
774  *tx = (mvx[t1] + mvx[t2]) / 2;
775  *ty = (mvy[t1] + mvy[t2]) / 2;
776  return 2;
777  } else {
778  return 0;
779  }
780  return -1;
781 }
782 
785 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
786 {
787  MpegEncContext *s = &v->s;
788  H264ChromaContext *h264chroma = &v->h264chroma;
789  uint8_t *srcU, *srcV;
790  int uvmx, uvmy, uvsrc_x, uvsrc_y;
791  int k, tx = 0, ty = 0;
792  int mvx[4], mvy[4], intra[4], mv_f[4];
793  int valid_count;
794  int chroma_ref_type = v->cur_field_type, off = 0;
795  int v_edge_pos = s->v_edge_pos >> v->field_mode;
796 
797  if (!v->field_mode && !v->s.last_picture.f.data[0])
798  return;
799  if (s->flags & CODEC_FLAG_GRAY)
800  return;
801 
802  for (k = 0; k < 4; k++) {
803  mvx[k] = s->mv[dir][k][0];
804  mvy[k] = s->mv[dir][k][1];
805  intra[k] = v->mb_type[0][s->block_index[k]];
806  if (v->field_mode)
807  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
808  }
809 
810  /* calculate chroma MV vector from four luma MVs */
811  if (!v->field_mode || (v->field_mode && !v->numref)) {
812  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
813  chroma_ref_type = v->reffield;
814  if (!valid_count) {
815  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
816  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
817  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
818  return; //no need to do MC for intra blocks
819  }
820  } else {
821  int dominant = 0;
822  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
823  dominant = 1;
824  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
825  if (dominant)
826  chroma_ref_type = !v->cur_field_type;
827  }
828  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
829  return;
830  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
831  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
832  uvmx = (tx + ((tx & 3) == 3)) >> 1;
833  uvmy = (ty + ((ty & 3) == 3)) >> 1;
834 
835  v->luma_mv[s->mb_x][0] = uvmx;
836  v->luma_mv[s->mb_x][1] = uvmy;
837 
838  if (v->fastuvmc) {
839  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
840  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
841  }
842  // Field conversion bias
843  if (v->cur_field_type != chroma_ref_type)
844  uvmy += 2 - 4 * chroma_ref_type;
845 
846  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
847  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
848 
849  if (v->profile != PROFILE_ADVANCED) {
850  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
851  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
852  } else {
853  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
854  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
855  }
856 
857  if (!dir) {
858  if (v->field_mode) {
859  if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) {
860  srcU = s->current_picture.f.data[1];
861  srcV = s->current_picture.f.data[2];
862  } else {
863  srcU = s->last_picture.f.data[1];
864  srcV = s->last_picture.f.data[2];
865  }
866  } else {
867  srcU = s->last_picture.f.data[1];
868  srcV = s->last_picture.f.data[2];
869  }
870  } else {
871  srcU = s->next_picture.f.data[1];
872  srcV = s->next_picture.f.data[2];
873  }
874 
875  if(!srcU)
876  return;
877 
878  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
879  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
880 
881  if (v->field_mode) {
882  if (chroma_ref_type) {
883  srcU += s->current_picture_ptr->f.linesize[1];
884  srcV += s->current_picture_ptr->f.linesize[2];
885  }
886  off = 0;
887  }
888 
890  || s->h_edge_pos < 18 || v_edge_pos < 18
891  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
892  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
894  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
895  s->h_edge_pos >> 1, v_edge_pos >> 1);
896  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
897  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
898  s->h_edge_pos >> 1, v_edge_pos >> 1);
899  srcU = s->edge_emu_buffer;
900  srcV = s->edge_emu_buffer + 16;
901 
902  /* if we deal with range reduction we need to scale source blocks */
903  if (v->rangeredfrm) {
904  int i, j;
905  uint8_t *src, *src2;
906 
907  src = srcU;
908  src2 = srcV;
909  for (j = 0; j < 9; j++) {
910  for (i = 0; i < 9; i++) {
911  src[i] = ((src[i] - 128) >> 1) + 128;
912  src2[i] = ((src2[i] - 128) >> 1) + 128;
913  }
914  src += s->uvlinesize;
915  src2 += s->uvlinesize;
916  }
917  }
918  /* if we deal with intensity compensation we need to scale source blocks */
919  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
920  int i, j;
921  uint8_t *src, *src2;
922 
923  src = srcU;
924  src2 = srcV;
925  for (j = 0; j < 9; j++) {
926  for (i = 0; i < 9; i++) {
927  src[i] = v->lutuv[src[i]];
928  src2[i] = v->lutuv[src2[i]];
929  }
930  src += s->uvlinesize;
931  src2 += s->uvlinesize;
932  }
933  }
934  }
935 
936  /* Chroma MC always uses qpel bilinear */
937  uvmx = (uvmx & 3) << 1;
938  uvmy = (uvmy & 3) << 1;
939  if (!v->rnd) {
940  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
941  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
942  } else {
943  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off, srcU, s->uvlinesize, 8, uvmx, uvmy);
944  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off, srcV, s->uvlinesize, 8, uvmx, uvmy);
945  }
946 }
947 
951 {
952  MpegEncContext *s = &v->s;
953  H264ChromaContext *h264chroma = &v->h264chroma;
954  uint8_t *srcU, *srcV;
955  int uvsrc_x, uvsrc_y;
956  int uvmx_field[4], uvmy_field[4];
957  int i, off, tx, ty;
958  int fieldmv = v->blk_mv_type[s->block_index[0]];
959  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
960  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
961  int v_edge_pos = s->v_edge_pos >> 1;
962 
963  if (!v->s.last_picture.f.data[0])
964  return;
965  if (s->flags & CODEC_FLAG_GRAY)
966  return;
967 
968  for (i = 0; i < 4; i++) {
969  tx = s->mv[0][i][0];
970  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
971  ty = s->mv[0][i][1];
972  if (fieldmv)
973  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
974  else
975  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
976  }
977 
978  for (i = 0; i < 4; i++) {
979  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
980  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
981  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
982  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
983  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
984  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
985  srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x;
986  srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x;
987  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
988  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
989 
990  if (fieldmv && !(uvsrc_y & 1))
991  v_edge_pos = (s->v_edge_pos >> 1) - 1;
992 
993  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
994  uvsrc_y--;
995  if ((v->mv_mode == MV_PMODE_INTENSITY_COMP)
996  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
997  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
998  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1000  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1001  s->h_edge_pos >> 1, v_edge_pos);
1002  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV, s->uvlinesize,
1003  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1004  s->h_edge_pos >> 1, v_edge_pos);
1005  srcU = s->edge_emu_buffer;
1006  srcV = s->edge_emu_buffer + 16;
1007 
1008  /* if we deal with intensity compensation we need to scale source blocks */
1009  if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
1010  int i, j;
1011  uint8_t *src, *src2;
1012 
1013  src = srcU;
1014  src2 = srcV;
1015  for (j = 0; j < 5; j++) {
1016  for (i = 0; i < 5; i++) {
1017  src[i] = v->lutuv[src[i]];
1018  src2[i] = v->lutuv[src2[i]];
1019  }
1020  src += s->uvlinesize << 1;
1021  src2 += s->uvlinesize << 1;
1022  }
1023  }
1024  }
1025  if (!v->rnd) {
1026  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1027  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1028  } else {
1029  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1030  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1031  }
1032  }
1033 }
1034 
1035 /***********************************************************************/
1046 #define GET_MQUANT() \
1047  if (v->dquantfrm) { \
1048  int edges = 0; \
1049  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1050  if (v->dqbilevel) { \
1051  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1052  } else { \
1053  mqdiff = get_bits(gb, 3); \
1054  if (mqdiff != 7) \
1055  mquant = v->pq + mqdiff; \
1056  else \
1057  mquant = get_bits(gb, 5); \
1058  } \
1059  } \
1060  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1061  edges = 1 << v->dqsbedge; \
1062  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1063  edges = (3 << v->dqsbedge) % 15; \
1064  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1065  edges = 15; \
1066  if ((edges&1) && !s->mb_x) \
1067  mquant = v->altpq; \
1068  if ((edges&2) && s->first_slice_line) \
1069  mquant = v->altpq; \
1070  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1071  mquant = v->altpq; \
1072  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1073  mquant = v->altpq; \
1074  if (!mquant || mquant > 31) { \
1075  av_log(v->s.avctx, AV_LOG_ERROR, \
1076  "Overriding invalid mquant %d\n", mquant); \
1077  mquant = 1; \
1078  } \
1079  }
1080 
1088 #define GET_MVDATA(_dmv_x, _dmv_y) \
1089  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1090  VC1_MV_DIFF_VLC_BITS, 2); \
1091  if (index > 36) { \
1092  mb_has_coeffs = 1; \
1093  index -= 37; \
1094  } else \
1095  mb_has_coeffs = 0; \
1096  s->mb_intra = 0; \
1097  if (!index) { \
1098  _dmv_x = _dmv_y = 0; \
1099  } else if (index == 35) { \
1100  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1101  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1102  } else if (index == 36) { \
1103  _dmv_x = 0; \
1104  _dmv_y = 0; \
1105  s->mb_intra = 1; \
1106  } else { \
1107  index1 = index % 6; \
1108  if (!s->quarter_sample && index1 == 5) val = 1; \
1109  else val = 0; \
1110  if (size_table[index1] - val > 0) \
1111  val = get_bits(gb, size_table[index1] - val); \
1112  else val = 0; \
1113  sign = 0 - (val&1); \
1114  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1115  \
1116  index1 = index / 6; \
1117  if (!s->quarter_sample && index1 == 5) val = 1; \
1118  else val = 0; \
1119  if (size_table[index1] - val > 0) \
1120  val = get_bits(gb, size_table[index1] - val); \
1121  else val = 0; \
1122  sign = 0 - (val & 1); \
1123  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1124  }
1125 
1127  int *dmv_y, int *pred_flag)
1128 {
1129  int index, index1;
1130  int extend_x = 0, extend_y = 0;
1131  GetBitContext *gb = &v->s.gb;
1132  int bits, esc;
1133  int val, sign;
1134  const int* offs_tab;
1135 
1136  if (v->numref) {
1137  bits = VC1_2REF_MVDATA_VLC_BITS;
1138  esc = 125;
1139  } else {
1140  bits = VC1_1REF_MVDATA_VLC_BITS;
1141  esc = 71;
1142  }
1143  switch (v->dmvrange) {
1144  case 1:
1145  extend_x = 1;
1146  break;
1147  case 2:
1148  extend_y = 1;
1149  break;
1150  case 3:
1151  extend_x = extend_y = 1;
1152  break;
1153  }
1154  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1155  if (index == esc) {
1156  *dmv_x = get_bits(gb, v->k_x);
1157  *dmv_y = get_bits(gb, v->k_y);
1158  if (v->numref) {
1159  if (pred_flag) {
1160  *pred_flag = *dmv_y & 1;
1161  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1162  } else {
1163  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1164  }
1165  }
1166  }
1167  else {
1168  av_assert0(index < esc);
1169  if (extend_x)
1170  offs_tab = offset_table2;
1171  else
1172  offs_tab = offset_table1;
1173  index1 = (index + 1) % 9;
1174  if (index1 != 0) {
1175  val = get_bits(gb, index1 + extend_x);
1176  sign = 0 -(val & 1);
1177  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1178  } else
1179  *dmv_x = 0;
1180  if (extend_y)
1181  offs_tab = offset_table2;
1182  else
1183  offs_tab = offset_table1;
1184  index1 = (index + 1) / 9;
1185  if (index1 > v->numref) {
1186  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1187  sign = 0 - (val & 1);
1188  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1189  } else
1190  *dmv_y = 0;
1191  if (v->numref && pred_flag)
1192  *pred_flag = index1 & 1;
1193  }
1194 }
1195 
1196 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1197 {
1198  int scaledvalue, refdist;
1199  int scalesame1, scalesame2;
1200  int scalezone1_x, zone1offset_x;
1201  int table_index = dir ^ v->second_field;
1202 
1203  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1204  refdist = v->refdist;
1205  else
1206  refdist = dir ? v->brfd : v->frfd;
1207  if (refdist > 3)
1208  refdist = 3;
1209  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1210  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1211  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1212  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1213 
1214  if (FFABS(n) > 255)
1215  scaledvalue = n;
1216  else {
1217  if (FFABS(n) < scalezone1_x)
1218  scaledvalue = (n * scalesame1) >> 8;
1219  else {
1220  if (n < 0)
1221  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1222  else
1223  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1224  }
1225  }
1226  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1227 }
1228 
1229 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1230 {
1231  int scaledvalue, refdist;
1232  int scalesame1, scalesame2;
1233  int scalezone1_y, zone1offset_y;
1234  int table_index = dir ^ v->second_field;
1235 
1236  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1237  refdist = v->refdist;
1238  else
1239  refdist = dir ? v->brfd : v->frfd;
1240  if (refdist > 3)
1241  refdist = 3;
1242  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1243  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1244  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1245  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1246 
1247  if (FFABS(n) > 63)
1248  scaledvalue = n;
1249  else {
1250  if (FFABS(n) < scalezone1_y)
1251  scaledvalue = (n * scalesame1) >> 8;
1252  else {
1253  if (n < 0)
1254  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1255  else
1256  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1257  }
1258  }
1259 
1260  if (v->cur_field_type && !v->ref_field_type[dir])
1261  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1262  else
1263  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1264 }
1265 
1266 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1267 {
1268  int scalezone1_x, zone1offset_x;
1269  int scaleopp1, scaleopp2, brfd;
1270  int scaledvalue;
1271 
1272  brfd = FFMIN(v->brfd, 3);
1273  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1274  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1275  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1276  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1277 
1278  if (FFABS(n) > 255)
1279  scaledvalue = n;
1280  else {
1281  if (FFABS(n) < scalezone1_x)
1282  scaledvalue = (n * scaleopp1) >> 8;
1283  else {
1284  if (n < 0)
1285  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1286  else
1287  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1288  }
1289  }
1290  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1291 }
1292 
1293 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1294 {
1295  int scalezone1_y, zone1offset_y;
1296  int scaleopp1, scaleopp2, brfd;
1297  int scaledvalue;
1298 
1299  brfd = FFMIN(v->brfd, 3);
1300  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1301  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1302  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1303  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1304 
1305  if (FFABS(n) > 63)
1306  scaledvalue = n;
1307  else {
1308  if (FFABS(n) < scalezone1_y)
1309  scaledvalue = (n * scaleopp1) >> 8;
1310  else {
1311  if (n < 0)
1312  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1313  else
1314  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1315  }
1316  }
1317  if (v->cur_field_type && !v->ref_field_type[dir]) {
1318  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1319  } else {
1320  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1321  }
1322 }
1323 
1324 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1325  int dim, int dir)
1326 {
1327  int brfd, scalesame;
1328  int hpel = 1 - v->s.quarter_sample;
1329 
1330  n >>= hpel;
1331  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1332  if (dim)
1333  n = scaleforsame_y(v, i, n, dir) << hpel;
1334  else
1335  n = scaleforsame_x(v, n, dir) << hpel;
1336  return n;
1337  }
1338  brfd = FFMIN(v->brfd, 3);
1339  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1340 
1341  n = (n * scalesame >> 8) << hpel;
1342  return n;
1343 }
1344 
1345 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1346  int dim, int dir)
1347 {
1348  int refdist, scaleopp;
1349  int hpel = 1 - v->s.quarter_sample;
1350 
1351  n >>= hpel;
1352  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1353  if (dim)
1354  n = scaleforopp_y(v, n, dir) << hpel;
1355  else
1356  n = scaleforopp_x(v, n) << hpel;
1357  return n;
1358  }
1359  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1360  refdist = FFMIN(v->refdist, 3);
1361  else
1362  refdist = dir ? v->brfd : v->frfd;
1363  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1364 
1365  n = (n * scaleopp >> 8) << hpel;
1366  return n;
1367 }
1368 
1371 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1372  int mv1, int r_x, int r_y, uint8_t* is_intra,
1373  int pred_flag, int dir)
1374 {
1375  MpegEncContext *s = &v->s;
1376  int xy, wrap, off = 0;
1377  int16_t *A, *B, *C;
1378  int px, py;
1379  int sum;
1380  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1381  int opposite, a_f, b_f, c_f;
1382  int16_t field_predA[2];
1383  int16_t field_predB[2];
1384  int16_t field_predC[2];
1385  int a_valid, b_valid, c_valid;
1386  int hybridmv_thresh, y_bias = 0;
1387 
1388  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1390  mixedmv_pic = 1;
1391  else
1392  mixedmv_pic = 0;
1393  /* scale MV difference to be quad-pel */
1394  dmv_x <<= 1 - s->quarter_sample;
1395  dmv_y <<= 1 - s->quarter_sample;
1396 
1397  wrap = s->b8_stride;
1398  xy = s->block_index[n];
1399 
1400  if (s->mb_intra) {
1401  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = 0;
1402  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = 0;
1403  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = 0;
1404  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
1405  if (mv1) { /* duplicate motion data for 1-MV block */
1406  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1407  s->current_picture.f.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1408  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1409  s->current_picture.f.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1410  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1411  s->current_picture.f.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1412  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1413  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1414  s->current_picture.f.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1415  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1416  s->current_picture.f.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1417  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1418  s->current_picture.f.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1419  }
1420  return;
1421  }
1422 
1423  C = s->current_picture.f.motion_val[dir][xy - 1 + v->blocks_off];
1424  A = s->current_picture.f.motion_val[dir][xy - wrap + v->blocks_off];
1425  if (mv1) {
1426  if (v->field_mode && mixedmv_pic)
1427  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1428  else
1429  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1430  } else {
1431  //in 4-MV mode different blocks have different B predictor position
1432  switch (n) {
1433  case 0:
1434  off = (s->mb_x > 0) ? -1 : 1;
1435  break;
1436  case 1:
1437  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1438  break;
1439  case 2:
1440  off = 1;
1441  break;
1442  case 3:
1443  off = -1;
1444  }
1445  }
1446  B = s->current_picture.f.motion_val[dir][xy - wrap + off + v->blocks_off];
1447 
1448  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1449  b_valid = a_valid && (s->mb_width > 1);
1450  c_valid = s->mb_x || (n == 1 || n == 3);
1451  if (v->field_mode) {
1452  a_valid = a_valid && !is_intra[xy - wrap];
1453  b_valid = b_valid && !is_intra[xy - wrap + off];
1454  c_valid = c_valid && !is_intra[xy - 1];
1455  }
1456 
1457  if (a_valid) {
1458  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1459  num_oppfield += a_f;
1460  num_samefield += 1 - a_f;
1461  field_predA[0] = A[0];
1462  field_predA[1] = A[1];
1463  } else {
1464  field_predA[0] = field_predA[1] = 0;
1465  a_f = 0;
1466  }
1467  if (b_valid) {
1468  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1469  num_oppfield += b_f;
1470  num_samefield += 1 - b_f;
1471  field_predB[0] = B[0];
1472  field_predB[1] = B[1];
1473  } else {
1474  field_predB[0] = field_predB[1] = 0;
1475  b_f = 0;
1476  }
1477  if (c_valid) {
1478  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1479  num_oppfield += c_f;
1480  num_samefield += 1 - c_f;
1481  field_predC[0] = C[0];
1482  field_predC[1] = C[1];
1483  } else {
1484  field_predC[0] = field_predC[1] = 0;
1485  c_f = 0;
1486  }
1487 
1488  if (v->field_mode) {
1489  if (!v->numref)
1490  // REFFIELD determines if the last field or the second-last field is
1491  // to be used as reference
1492  opposite = 1 - v->reffield;
1493  else {
1494  if (num_samefield <= num_oppfield)
1495  opposite = 1 - pred_flag;
1496  else
1497  opposite = pred_flag;
1498  }
1499  } else
1500  opposite = 0;
1501  if (opposite) {
1502  if (a_valid && !a_f) {
1503  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1504  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1505  }
1506  if (b_valid && !b_f) {
1507  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1508  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1509  }
1510  if (c_valid && !c_f) {
1511  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1512  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1513  }
1514  v->mv_f[dir][xy + v->blocks_off] = 1;
1515  v->ref_field_type[dir] = !v->cur_field_type;
1516  } else {
1517  if (a_valid && a_f) {
1518  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1519  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1520  }
1521  if (b_valid && b_f) {
1522  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1523  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1524  }
1525  if (c_valid && c_f) {
1526  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1527  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1528  }
1529  v->mv_f[dir][xy + v->blocks_off] = 0;
1530  v->ref_field_type[dir] = v->cur_field_type;
1531  }
1532 
1533  if (a_valid) {
1534  px = field_predA[0];
1535  py = field_predA[1];
1536  } else if (c_valid) {
1537  px = field_predC[0];
1538  py = field_predC[1];
1539  } else if (b_valid) {
1540  px = field_predB[0];
1541  py = field_predB[1];
1542  } else {
1543  px = 0;
1544  py = 0;
1545  }
1546 
1547  if (num_samefield + num_oppfield > 1) {
1548  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1549  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1550  }
1551 
1552  /* Pullback MV as specified in 8.3.5.3.4 */
1553  if (!v->field_mode) {
1554  int qx, qy, X, Y;
1555  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1556  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1557  X = (s->mb_width << 6) - 4;
1558  Y = (s->mb_height << 6) - 4;
1559  if (mv1) {
1560  if (qx + px < -60) px = -60 - qx;
1561  if (qy + py < -60) py = -60 - qy;
1562  } else {
1563  if (qx + px < -28) px = -28 - qx;
1564  if (qy + py < -28) py = -28 - qy;
1565  }
1566  if (qx + px > X) px = X - qx;
1567  if (qy + py > Y) py = Y - qy;
1568  }
1569 
1570  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1571  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1572  hybridmv_thresh = 32;
1573  if (a_valid && c_valid) {
1574  if (is_intra[xy - wrap])
1575  sum = FFABS(px) + FFABS(py);
1576  else
1577  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1578  if (sum > hybridmv_thresh) {
1579  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1580  px = field_predA[0];
1581  py = field_predA[1];
1582  } else {
1583  px = field_predC[0];
1584  py = field_predC[1];
1585  }
1586  } else {
1587  if (is_intra[xy - 1])
1588  sum = FFABS(px) + FFABS(py);
1589  else
1590  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1591  if (sum > hybridmv_thresh) {
1592  if (get_bits1(&s->gb)) {
1593  px = field_predA[0];
1594  py = field_predA[1];
1595  } else {
1596  px = field_predC[0];
1597  py = field_predC[1];
1598  }
1599  }
1600  }
1601  }
1602  }
1603 
1604  if (v->field_mode && v->numref)
1605  r_y >>= 1;
1606  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1607  y_bias = 1;
1608  /* store MV using signed modulus of MV range defined in 4.11 */
1609  s->mv[dir][n][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1610  s->mv[dir][n][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1611  if (mv1) { /* duplicate motion data for 1-MV block */
1612  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1613  s->current_picture.f.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1614  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1615  s->current_picture.f.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1616  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][0];
1617  s->current_picture.f.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.f.motion_val[dir][xy + v->blocks_off][1];
1618  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1619  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1620  }
1621 }
1622 
1625 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1626  int mvn, int r_x, int r_y, uint8_t* is_intra)
1627 {
1628  MpegEncContext *s = &v->s;
1629  int xy, wrap, off = 0;
1630  int A[2], B[2], C[2];
1631  int px, py;
1632  int a_valid = 0, b_valid = 0, c_valid = 0;
1633  int field_a, field_b, field_c; // 0: same, 1: opposit
1634  int total_valid, num_samefield, num_oppfield;
1635  int pos_c, pos_b, n_adj;
1636 
1637  wrap = s->b8_stride;
1638  xy = s->block_index[n];
1639 
1640  if (s->mb_intra) {
1641  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = 0;
1642  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = 0;
1643  s->current_picture.f.motion_val[1][xy][0] = 0;
1644  s->current_picture.f.motion_val[1][xy][1] = 0;
1645  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1646  s->current_picture.f.motion_val[0][xy + 1][0] = 0;
1647  s->current_picture.f.motion_val[0][xy + 1][1] = 0;
1648  s->current_picture.f.motion_val[0][xy + wrap][0] = 0;
1649  s->current_picture.f.motion_val[0][xy + wrap][1] = 0;
1650  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = 0;
1651  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = 0;
1652  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1653  s->current_picture.f.motion_val[1][xy + 1][0] = 0;
1654  s->current_picture.f.motion_val[1][xy + 1][1] = 0;
1655  s->current_picture.f.motion_val[1][xy + wrap][0] = 0;
1656  s->current_picture.f.motion_val[1][xy + wrap][1] = 0;
1657  s->current_picture.f.motion_val[1][xy + wrap + 1][0] = 0;
1658  s->current_picture.f.motion_val[1][xy + wrap + 1][1] = 0;
1659  }
1660  return;
1661  }
1662 
1663  off = ((n == 0) || (n == 1)) ? 1 : -1;
1664  /* predict A */
1665  if (s->mb_x || (n == 1) || (n == 3)) {
1666  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1667  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1668  A[0] = s->current_picture.f.motion_val[0][xy - 1][0];
1669  A[1] = s->current_picture.f.motion_val[0][xy - 1][1];
1670  a_valid = 1;
1671  } else { // current block has frame mv and cand. has field MV (so average)
1672  A[0] = (s->current_picture.f.motion_val[0][xy - 1][0]
1673  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][0] + 1) >> 1;
1674  A[1] = (s->current_picture.f.motion_val[0][xy - 1][1]
1675  + s->current_picture.f.motion_val[0][xy - 1 + off * wrap][1] + 1) >> 1;
1676  a_valid = 1;
1677  }
1678  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1679  a_valid = 0;
1680  A[0] = A[1] = 0;
1681  }
1682  } else
1683  A[0] = A[1] = 0;
1684  /* Predict B and C */
1685  B[0] = B[1] = C[0] = C[1] = 0;
1686  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1687  if (!s->first_slice_line) {
1688  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1689  b_valid = 1;
1690  n_adj = n | 2;
1691  pos_b = s->block_index[n_adj] - 2 * wrap;
1692  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1693  n_adj = (n & 2) | (n & 1);
1694  }
1695  B[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][0];
1696  B[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap][1];
1697  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1698  B[0] = (B[0] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1699  B[1] = (B[1] + s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1700  }
1701  }
1702  if (s->mb_width > 1) {
1703  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1704  c_valid = 1;
1705  n_adj = 2;
1706  pos_c = s->block_index[2] - 2 * wrap + 2;
1707  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1708  n_adj = n & 2;
1709  }
1710  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][0];
1711  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap + 2][1];
1712  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1713  C[0] = (1 + C[0] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1714  C[1] = (1 + C[1] + (s->current_picture.f.motion_val[0][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1715  }
1716  if (s->mb_x == s->mb_width - 1) {
1717  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1718  c_valid = 1;
1719  n_adj = 3;
1720  pos_c = s->block_index[3] - 2 * wrap - 2;
1721  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1722  n_adj = n | 1;
1723  }
1724  C[0] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][0];
1725  C[1] = s->current_picture.f.motion_val[0][s->block_index[n_adj] - 2 * wrap - 2][1];
1726  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1727  C[0] = (1 + C[0] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1728  C[1] = (1 + C[1] + s->current_picture.f.motion_val[0][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1729  }
1730  } else
1731  c_valid = 0;
1732  }
1733  }
1734  }
1735  }
1736  } else {
1737  pos_b = s->block_index[1];
1738  b_valid = 1;
1739  B[0] = s->current_picture.f.motion_val[0][pos_b][0];
1740  B[1] = s->current_picture.f.motion_val[0][pos_b][1];
1741  pos_c = s->block_index[0];
1742  c_valid = 1;
1743  C[0] = s->current_picture.f.motion_val[0][pos_c][0];
1744  C[1] = s->current_picture.f.motion_val[0][pos_c][1];
1745  }
1746 
1747  total_valid = a_valid + b_valid + c_valid;
1748  // check if predictor A is out of bounds
1749  if (!s->mb_x && !(n == 1 || n == 3)) {
1750  A[0] = A[1] = 0;
1751  }
1752  // check if predictor B is out of bounds
1753  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1754  B[0] = B[1] = C[0] = C[1] = 0;
1755  }
1756  if (!v->blk_mv_type[xy]) {
1757  if (s->mb_width == 1) {
1758  px = B[0];
1759  py = B[1];
1760  } else {
1761  if (total_valid >= 2) {
1762  px = mid_pred(A[0], B[0], C[0]);
1763  py = mid_pred(A[1], B[1], C[1]);
1764  } else if (total_valid) {
1765  if (a_valid) { px = A[0]; py = A[1]; }
1766  else if (b_valid) { px = B[0]; py = B[1]; }
1767  else if (c_valid) { px = C[0]; py = C[1]; }
1768  else av_assert2(0);
1769  } else
1770  px = py = 0;
1771  }
1772  } else {
1773  if (a_valid)
1774  field_a = (A[1] & 4) ? 1 : 0;
1775  else
1776  field_a = 0;
1777  if (b_valid)
1778  field_b = (B[1] & 4) ? 1 : 0;
1779  else
1780  field_b = 0;
1781  if (c_valid)
1782  field_c = (C[1] & 4) ? 1 : 0;
1783  else
1784  field_c = 0;
1785 
1786  num_oppfield = field_a + field_b + field_c;
1787  num_samefield = total_valid - num_oppfield;
1788  if (total_valid == 3) {
1789  if ((num_samefield == 3) || (num_oppfield == 3)) {
1790  px = mid_pred(A[0], B[0], C[0]);
1791  py = mid_pred(A[1], B[1], C[1]);
1792  } else if (num_samefield >= num_oppfield) {
1793  /* take one MV from same field set depending on priority
1794  the check for B may not be necessary */
1795  px = !field_a ? A[0] : B[0];
1796  py = !field_a ? A[1] : B[1];
1797  } else {
1798  px = field_a ? A[0] : B[0];
1799  py = field_a ? A[1] : B[1];
1800  }
1801  } else if (total_valid == 2) {
1802  if (num_samefield >= num_oppfield) {
1803  if (!field_a && a_valid) {
1804  px = A[0];
1805  py = A[1];
1806  } else if (!field_b && b_valid) {
1807  px = B[0];
1808  py = B[1];
1809  } else if (c_valid) {
1810  px = C[0];
1811  py = C[1];
1812  } else px = py = 0;
1813  } else {
1814  if (field_a && a_valid) {
1815  px = A[0];
1816  py = A[1];
1817  } else if (field_b && b_valid) {
1818  px = B[0];
1819  py = B[1];
1820  } else if (c_valid) {
1821  px = C[0];
1822  py = C[1];
1823  } else px = py = 0;
1824  }
1825  } else if (total_valid == 1) {
1826  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1827  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1828  } else
1829  px = py = 0;
1830  }
1831 
1832  /* store MV using signed modulus of MV range defined in 4.11 */
1833  s->mv[0][n][0] = s->current_picture.f.motion_val[0][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1834  s->mv[0][n][1] = s->current_picture.f.motion_val[0][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1835  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1836  s->current_picture.f.motion_val[0][xy + 1 ][0] = s->current_picture.f.motion_val[0][xy][0];
1837  s->current_picture.f.motion_val[0][xy + 1 ][1] = s->current_picture.f.motion_val[0][xy][1];
1838  s->current_picture.f.motion_val[0][xy + wrap ][0] = s->current_picture.f.motion_val[0][xy][0];
1839  s->current_picture.f.motion_val[0][xy + wrap ][1] = s->current_picture.f.motion_val[0][xy][1];
1840  s->current_picture.f.motion_val[0][xy + wrap + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1841  s->current_picture.f.motion_val[0][xy + wrap + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1842  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1843  s->current_picture.f.motion_val[0][xy + 1][0] = s->current_picture.f.motion_val[0][xy][0];
1844  s->current_picture.f.motion_val[0][xy + 1][1] = s->current_picture.f.motion_val[0][xy][1];
1845  s->mv[0][n + 1][0] = s->mv[0][n][0];
1846  s->mv[0][n + 1][1] = s->mv[0][n][1];
1847  }
1848 }
1849 
1853 {
1854  MpegEncContext *s = &v->s;
1855  DSPContext *dsp = &v->s.dsp;
1856  H264ChromaContext *h264chroma = &v->h264chroma;
1857  uint8_t *srcY, *srcU, *srcV;
1858  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1859  int off, off_uv;
1860  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1861 
1862  if (!v->field_mode && !v->s.next_picture.f.data[0])
1863  return;
1864 
1865  mx = s->mv[1][0][0];
1866  my = s->mv[1][0][1];
1867  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1868  uvmy = (my + ((my & 3) == 3)) >> 1;
1869  if (v->field_mode) {
1870  if (v->cur_field_type != v->ref_field_type[1]) {
1871  my = my - 2 + 4 * v->cur_field_type;
1872  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1873  }
1874  }
1875  if (v->fastuvmc) {
1876  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1877  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1878  }
1879  srcY = s->next_picture.f.data[0];
1880  srcU = s->next_picture.f.data[1];
1881  srcV = s->next_picture.f.data[2];
1882 
1883  src_x = s->mb_x * 16 + (mx >> 2);
1884  src_y = s->mb_y * 16 + (my >> 2);
1885  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1886  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1887 
1888  if (v->profile != PROFILE_ADVANCED) {
1889  src_x = av_clip( src_x, -16, s->mb_width * 16);
1890  src_y = av_clip( src_y, -16, s->mb_height * 16);
1891  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1892  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1893  } else {
1894  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1895  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1896  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1897  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1898  }
1899 
1900  srcY += src_y * s->linesize + src_x;
1901  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1902  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1903 
1904  if (v->field_mode && v->ref_field_type[1]) {
1905  srcY += s->current_picture_ptr->f.linesize[0];
1906  srcU += s->current_picture_ptr->f.linesize[1];
1907  srcV += s->current_picture_ptr->f.linesize[2];
1908  }
1909 
1910  /* for grayscale we should not try to read from unknown area */
1911  if (s->flags & CODEC_FLAG_GRAY) {
1912  srcU = s->edge_emu_buffer + 18 * s->linesize;
1913  srcV = s->edge_emu_buffer + 18 * s->linesize;
1914  }
1915 
1916  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22
1917  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1918  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1919  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1920 
1921  srcY -= s->mspel * (1 + s->linesize);
1923  17 + s->mspel * 2, 17 + s->mspel * 2,
1924  src_x - s->mspel, src_y - s->mspel,
1925  s->h_edge_pos, v_edge_pos);
1926  srcY = s->edge_emu_buffer;
1927  s->vdsp.emulated_edge_mc(uvbuf , srcU, s->uvlinesize, 8 + 1, 8 + 1,
1928  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1929  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV, s->uvlinesize, 8 + 1, 8 + 1,
1930  uvsrc_x, uvsrc_y, s->h_edge_pos >> 1, v_edge_pos >> 1);
1931  srcU = uvbuf;
1932  srcV = uvbuf + 16;
1933  /* if we deal with range reduction we need to scale source blocks */
1934  if (v->rangeredfrm) {
1935  int i, j;
1936  uint8_t *src, *src2;
1937 
1938  src = srcY;
1939  for (j = 0; j < 17 + s->mspel * 2; j++) {
1940  for (i = 0; i < 17 + s->mspel * 2; i++)
1941  src[i] = ((src[i] - 128) >> 1) + 128;
1942  src += s->linesize;
1943  }
1944  src = srcU;
1945  src2 = srcV;
1946  for (j = 0; j < 9; j++) {
1947  for (i = 0; i < 9; i++) {
1948  src[i] = ((src[i] - 128) >> 1) + 128;
1949  src2[i] = ((src2[i] - 128) >> 1) + 128;
1950  }
1951  src += s->uvlinesize;
1952  src2 += s->uvlinesize;
1953  }
1954  }
1955  srcY += s->mspel * (1 + s->linesize);
1956  }
1957 
1958  off = 0;
1959  off_uv = 0;
1960 
1961  if (s->mspel) {
1962  dxy = ((my & 3) << 2) | (mx & 3);
1963  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
1964  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
1965  srcY += s->linesize * 8;
1966  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
1967  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
1968  } else { // hpel mc
1969  dxy = (my & 2) | ((mx & 2) >> 1);
1970 
1971  if (!v->rnd)
1972  dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
1973  else
1974  dsp->avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
1975  }
1976 
1977  if (s->flags & CODEC_FLAG_GRAY) return;
1978  /* Chroma MC always uses qpel blilinear */
1979  uvmx = (uvmx & 3) << 1;
1980  uvmy = (uvmy & 3) << 1;
1981  if (!v->rnd) {
1982  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1983  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1984  } else {
1985  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
1986  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
1987  }
1988 }
1989 
1990 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
1991 {
1992  int n = bfrac;
1993 
1994 #if B_FRACTION_DEN==256
1995  if (inv)
1996  n -= 256;
1997  if (!qs)
1998  return 2 * ((value * n + 255) >> 9);
1999  return (value * n + 128) >> 8;
2000 #else
2001  if (inv)
2002  n -= B_FRACTION_DEN;
2003  if (!qs)
2004  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2005  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2006 #endif
2007 }
2008 
2011 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2012  int direct, int mode)
2013 {
2014  if (v->use_ic) {
2015  v->mv_mode2 = v->mv_mode;
2017  }
2018  if (direct) {
2019  vc1_mc_1mv(v, 0);
2020  vc1_interp_mc(v);
2021  if (v->use_ic)
2022  v->mv_mode = v->mv_mode2;
2023  return;
2024  }
2025  if (mode == BMV_TYPE_INTERPOLATED) {
2026  vc1_mc_1mv(v, 0);
2027  vc1_interp_mc(v);
2028  if (v->use_ic)
2029  v->mv_mode = v->mv_mode2;
2030  return;
2031  }
2032 
2033  if (v->use_ic && (mode == BMV_TYPE_BACKWARD))
2034  v->mv_mode = v->mv_mode2;
2035  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2036  if (v->use_ic)
2037  v->mv_mode = v->mv_mode2;
2038 }
2039 
2040 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2041  int direct, int mvtype)
2042 {
2043  MpegEncContext *s = &v->s;
2044  int xy, wrap, off = 0;
2045  int16_t *A, *B, *C;
2046  int px, py;
2047  int sum;
2048  int r_x, r_y;
2049  const uint8_t *is_intra = v->mb_type[0];
2050 
2051  r_x = v->range_x;
2052  r_y = v->range_y;
2053  /* scale MV difference to be quad-pel */
2054  dmv_x[0] <<= 1 - s->quarter_sample;
2055  dmv_y[0] <<= 1 - s->quarter_sample;
2056  dmv_x[1] <<= 1 - s->quarter_sample;
2057  dmv_y[1] <<= 1 - s->quarter_sample;
2058 
2059  wrap = s->b8_stride;
2060  xy = s->block_index[0];
2061 
2062  if (s->mb_intra) {
2063  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] =
2064  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] =
2065  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] =
2066  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = 0;
2067  return;
2068  }
2069  if (!v->field_mode) {
2070  s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2071  s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2072  s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2073  s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2074 
2075  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2076  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2077  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2078  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2079  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2080  }
2081  if (direct) {
2082  s->current_picture.f.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2083  s->current_picture.f.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2084  s->current_picture.f.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2085  s->current_picture.f.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2086  return;
2087  }
2088 
2089  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2090  C = s->current_picture.f.motion_val[0][xy - 2];
2091  A = s->current_picture.f.motion_val[0][xy - wrap * 2];
2092  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2093  B = s->current_picture.f.motion_val[0][xy - wrap * 2 + off];
2094 
2095  if (!s->mb_x) C[0] = C[1] = 0;
2096  if (!s->first_slice_line) { // predictor A is not out of bounds
2097  if (s->mb_width == 1) {
2098  px = A[0];
2099  py = A[1];
2100  } else {
2101  px = mid_pred(A[0], B[0], C[0]);
2102  py = mid_pred(A[1], B[1], C[1]);
2103  }
2104  } else if (s->mb_x) { // predictor C is not out of bounds
2105  px = C[0];
2106  py = C[1];
2107  } else {
2108  px = py = 0;
2109  }
2110  /* Pullback MV as specified in 8.3.5.3.4 */
2111  {
2112  int qx, qy, X, Y;
2113  if (v->profile < PROFILE_ADVANCED) {
2114  qx = (s->mb_x << 5);
2115  qy = (s->mb_y << 5);
2116  X = (s->mb_width << 5) - 4;
2117  Y = (s->mb_height << 5) - 4;
2118  if (qx + px < -28) px = -28 - qx;
2119  if (qy + py < -28) py = -28 - qy;
2120  if (qx + px > X) px = X - qx;
2121  if (qy + py > Y) py = Y - qy;
2122  } else {
2123  qx = (s->mb_x << 6);
2124  qy = (s->mb_y << 6);
2125  X = (s->mb_width << 6) - 4;
2126  Y = (s->mb_height << 6) - 4;
2127  if (qx + px < -60) px = -60 - qx;
2128  if (qy + py < -60) py = -60 - qy;
2129  if (qx + px > X) px = X - qx;
2130  if (qy + py > Y) py = Y - qy;
2131  }
2132  }
2133  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2134  if (0 && !s->first_slice_line && s->mb_x) {
2135  if (is_intra[xy - wrap])
2136  sum = FFABS(px) + FFABS(py);
2137  else
2138  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2139  if (sum > 32) {
2140  if (get_bits1(&s->gb)) {
2141  px = A[0];
2142  py = A[1];
2143  } else {
2144  px = C[0];
2145  py = C[1];
2146  }
2147  } else {
2148  if (is_intra[xy - 2])
2149  sum = FFABS(px) + FFABS(py);
2150  else
2151  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2152  if (sum > 32) {
2153  if (get_bits1(&s->gb)) {
2154  px = A[0];
2155  py = A[1];
2156  } else {
2157  px = C[0];
2158  py = C[1];
2159  }
2160  }
2161  }
2162  }
2163  /* store MV using signed modulus of MV range defined in 4.11 */
2164  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2165  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2166  }
2167  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2168  C = s->current_picture.f.motion_val[1][xy - 2];
2169  A = s->current_picture.f.motion_val[1][xy - wrap * 2];
2170  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2171  B = s->current_picture.f.motion_val[1][xy - wrap * 2 + off];
2172 
2173  if (!s->mb_x)
2174  C[0] = C[1] = 0;
2175  if (!s->first_slice_line) { // predictor A is not out of bounds
2176  if (s->mb_width == 1) {
2177  px = A[0];
2178  py = A[1];
2179  } else {
2180  px = mid_pred(A[0], B[0], C[0]);
2181  py = mid_pred(A[1], B[1], C[1]);
2182  }
2183  } else if (s->mb_x) { // predictor C is not out of bounds
2184  px = C[0];
2185  py = C[1];
2186  } else {
2187  px = py = 0;
2188  }
2189  /* Pullback MV as specified in 8.3.5.3.4 */
2190  {
2191  int qx, qy, X, Y;
2192  if (v->profile < PROFILE_ADVANCED) {
2193  qx = (s->mb_x << 5);
2194  qy = (s->mb_y << 5);
2195  X = (s->mb_width << 5) - 4;
2196  Y = (s->mb_height << 5) - 4;
2197  if (qx + px < -28) px = -28 - qx;
2198  if (qy + py < -28) py = -28 - qy;
2199  if (qx + px > X) px = X - qx;
2200  if (qy + py > Y) py = Y - qy;
2201  } else {
2202  qx = (s->mb_x << 6);
2203  qy = (s->mb_y << 6);
2204  X = (s->mb_width << 6) - 4;
2205  Y = (s->mb_height << 6) - 4;
2206  if (qx + px < -60) px = -60 - qx;
2207  if (qy + py < -60) py = -60 - qy;
2208  if (qx + px > X) px = X - qx;
2209  if (qy + py > Y) py = Y - qy;
2210  }
2211  }
2212  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2213  if (0 && !s->first_slice_line && s->mb_x) {
2214  if (is_intra[xy - wrap])
2215  sum = FFABS(px) + FFABS(py);
2216  else
2217  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2218  if (sum > 32) {
2219  if (get_bits1(&s->gb)) {
2220  px = A[0];
2221  py = A[1];
2222  } else {
2223  px = C[0];
2224  py = C[1];
2225  }
2226  } else {
2227  if (is_intra[xy - 2])
2228  sum = FFABS(px) + FFABS(py);
2229  else
2230  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2231  if (sum > 32) {
2232  if (get_bits1(&s->gb)) {
2233  px = A[0];
2234  py = A[1];
2235  } else {
2236  px = C[0];
2237  py = C[1];
2238  }
2239  }
2240  }
2241  }
2242  /* store MV using signed modulus of MV range defined in 4.11 */
2243 
2244  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2245  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2246  }
2247  s->current_picture.f.motion_val[0][xy][0] = s->mv[0][0][0];
2248  s->current_picture.f.motion_val[0][xy][1] = s->mv[0][0][1];
2249  s->current_picture.f.motion_val[1][xy][0] = s->mv[1][0][0];
2250  s->current_picture.f.motion_val[1][xy][1] = s->mv[1][0][1];
2251 }
2252 
2253 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2254 {
2255  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2256  MpegEncContext *s = &v->s;
2257  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2258 
2259  if (v->bmvtype == BMV_TYPE_DIRECT) {
2260  int total_opp, k, f;
2261  if (s->next_picture.f.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2262  s->mv[0][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2263  v->bfraction, 0, s->quarter_sample);
2264  s->mv[0][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2265  v->bfraction, 0, s->quarter_sample);
2266  s->mv[1][0][0] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0],
2267  v->bfraction, 1, s->quarter_sample);
2268  s->mv[1][0][1] = scale_mv(s->next_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1],
2269  v->bfraction, 1, s->quarter_sample);
2270 
2271  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2272  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2273  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2274  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2275  f = (total_opp > 2) ? 1 : 0;
2276  } else {
2277  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2278  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2279  f = 0;
2280  }
2281  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2282  for (k = 0; k < 4; k++) {
2283  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2284  s->current_picture.f.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2285  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2286  s->current_picture.f.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2287  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2288  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2289  }
2290  return;
2291  }
2292  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2293  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2294  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2295  return;
2296  }
2297  if (dir) { // backward
2298  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2299  if (n == 3 || mv1) {
2300  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2301  }
2302  } else { // forward
2303  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2304  if (n == 3 || mv1) {
2305  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2306  }
2307  }
2308 }
2309 
2319 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2320  int16_t **dc_val_ptr, int *dir_ptr)
2321 {
2322  int a, b, c, wrap, pred, scale;
2323  int16_t *dc_val;
2324  static const uint16_t dcpred[32] = {
2325  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2326  114, 102, 93, 85, 79, 73, 68, 64,
2327  60, 57, 54, 51, 49, 47, 45, 43,
2328  41, 39, 38, 37, 35, 34, 33
2329  };
2330 
2331  /* find prediction - wmv3_dc_scale always used here in fact */
2332  if (n < 4) scale = s->y_dc_scale;
2333  else scale = s->c_dc_scale;
2334 
2335  wrap = s->block_wrap[n];
2336  dc_val = s->dc_val[0] + s->block_index[n];
2337 
2338  /* B A
2339  * C X
2340  */
2341  c = dc_val[ - 1];
2342  b = dc_val[ - 1 - wrap];
2343  a = dc_val[ - wrap];
2344 
2345  if (pq < 9 || !overlap) {
2346  /* Set outer values */
2347  if (s->first_slice_line && (n != 2 && n != 3))
2348  b = a = dcpred[scale];
2349  if (s->mb_x == 0 && (n != 1 && n != 3))
2350  b = c = dcpred[scale];
2351  } else {
2352  /* Set outer values */
2353  if (s->first_slice_line && (n != 2 && n != 3))
2354  b = a = 0;
2355  if (s->mb_x == 0 && (n != 1 && n != 3))
2356  b = c = 0;
2357  }
2358 
2359  if (abs(a - b) <= abs(b - c)) {
2360  pred = c;
2361  *dir_ptr = 1; // left
2362  } else {
2363  pred = a;
2364  *dir_ptr = 0; // top
2365  }
2366 
2367  /* update predictor */
2368  *dc_val_ptr = &dc_val[0];
2369  return pred;
2370 }
2371 
2372 
2384 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2385  int a_avail, int c_avail,
2386  int16_t **dc_val_ptr, int *dir_ptr)
2387 {
2388  int a, b, c, wrap, pred;
2389  int16_t *dc_val;
2390  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2391  int q1, q2 = 0;
2392  int dqscale_index;
2393 
2394  wrap = s->block_wrap[n];
2395  dc_val = s->dc_val[0] + s->block_index[n];
2396 
2397  /* B A
2398  * C X
2399  */
2400  c = dc_val[ - 1];
2401  b = dc_val[ - 1 - wrap];
2402  a = dc_val[ - wrap];
2403  /* scale predictors if needed */
2404  q1 = s->current_picture.f.qscale_table[mb_pos];
2405  dqscale_index = s->y_dc_scale_table[q1] - 1;
2406  if (dqscale_index < 0)
2407  return 0;
2408  if (c_avail && (n != 1 && n != 3)) {
2409  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2410  if (q2 && q2 != q1)
2411  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2412  }
2413  if (a_avail && (n != 2 && n != 3)) {
2414  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2415  if (q2 && q2 != q1)
2416  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2417  }
2418  if (a_avail && c_avail && (n != 3)) {
2419  int off = mb_pos;
2420  if (n != 1)
2421  off--;
2422  if (n != 2)
2423  off -= s->mb_stride;
2424  q2 = s->current_picture.f.qscale_table[off];
2425  if (q2 && q2 != q1)
2426  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2427  }
2428 
2429  if (a_avail && c_avail) {
2430  if (abs(a - b) <= abs(b - c)) {
2431  pred = c;
2432  *dir_ptr = 1; // left
2433  } else {
2434  pred = a;
2435  *dir_ptr = 0; // top
2436  }
2437  } else if (a_avail) {
2438  pred = a;
2439  *dir_ptr = 0; // top
2440  } else if (c_avail) {
2441  pred = c;
2442  *dir_ptr = 1; // left
2443  } else {
2444  pred = 0;
2445  *dir_ptr = 1; // left
2446  }
2447 
2448  /* update predictor */
2449  *dc_val_ptr = &dc_val[0];
2450  return pred;
2451 }
2452  // Block group
2454 
2461 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2462  uint8_t **coded_block_ptr)
2463 {
2464  int xy, wrap, pred, a, b, c;
2465 
2466  xy = s->block_index[n];
2467  wrap = s->b8_stride;
2468 
2469  /* B C
2470  * A X
2471  */
2472  a = s->coded_block[xy - 1 ];
2473  b = s->coded_block[xy - 1 - wrap];
2474  c = s->coded_block[xy - wrap];
2475 
2476  if (b == c) {
2477  pred = a;
2478  } else {
2479  pred = c;
2480  }
2481 
2482  /* store value */
2483  *coded_block_ptr = &s->coded_block[xy];
2484 
2485  return pred;
2486 }
2487 
2497 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2498  int *value, int codingset)
2499 {
2500  GetBitContext *gb = &v->s.gb;
2501  int index, escape, run = 0, level = 0, lst = 0;
2502 
2503  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2504  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2505  run = vc1_index_decode_table[codingset][index][0];
2506  level = vc1_index_decode_table[codingset][index][1];
2507  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2508  if (get_bits1(gb))
2509  level = -level;
2510  } else {
2511  escape = decode210(gb);
2512  if (escape != 2) {
2513  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2514  run = vc1_index_decode_table[codingset][index][0];
2515  level = vc1_index_decode_table[codingset][index][1];
2516  lst = index >= vc1_last_decode_table[codingset];
2517  if (escape == 0) {
2518  if (lst)
2519  level += vc1_last_delta_level_table[codingset][run];
2520  else
2521  level += vc1_delta_level_table[codingset][run];
2522  } else {
2523  if (lst)
2524  run += vc1_last_delta_run_table[codingset][level] + 1;
2525  else
2526  run += vc1_delta_run_table[codingset][level] + 1;
2527  }
2528  if (get_bits1(gb))
2529  level = -level;
2530  } else {
2531  int sign;
2532  lst = get_bits1(gb);
2533  if (v->s.esc3_level_length == 0) {
2534  if (v->pq < 8 || v->dquantfrm) { // table 59
2535  v->s.esc3_level_length = get_bits(gb, 3);
2536  if (!v->s.esc3_level_length)
2537  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2538  } else { // table 60
2539  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2540  }
2541  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2542  }
2543  run = get_bits(gb, v->s.esc3_run_length);
2544  sign = get_bits1(gb);
2545  level = get_bits(gb, v->s.esc3_level_length);
2546  if (sign)
2547  level = -level;
2548  }
2549  }
2550 
2551  *last = lst;
2552  *skip = run;
2553  *value = level;
2554 }
2555 
2563 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2564  int coded, int codingset)
2565 {
2566  GetBitContext *gb = &v->s.gb;
2567  MpegEncContext *s = &v->s;
2568  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2569  int i;
2570  int16_t *dc_val;
2571  int16_t *ac_val, *ac_val2;
2572  int dcdiff;
2573 
2574  /* Get DC differential */
2575  if (n < 4) {
2577  } else {
2579  }
2580  if (dcdiff < 0) {
2581  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2582  return -1;
2583  }
2584  if (dcdiff) {
2585  if (dcdiff == 119 /* ESC index value */) {
2586  /* TODO: Optimize */
2587  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2588  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2589  else dcdiff = get_bits(gb, 8);
2590  } else {
2591  if (v->pq == 1)
2592  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2593  else if (v->pq == 2)
2594  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2595  }
2596  if (get_bits1(gb))
2597  dcdiff = -dcdiff;
2598  }
2599 
2600  /* Prediction */
2601  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2602  *dc_val = dcdiff;
2603 
2604  /* Store the quantized DC coeff, used for prediction */
2605  if (n < 4) {
2606  block[0] = dcdiff * s->y_dc_scale;
2607  } else {
2608  block[0] = dcdiff * s->c_dc_scale;
2609  }
2610  /* Skip ? */
2611  if (!coded) {
2612  goto not_coded;
2613  }
2614 
2615  // AC Decoding
2616  i = 1;
2617 
2618  {
2619  int last = 0, skip, value;
2620  const uint8_t *zz_table;
2621  int scale;
2622  int k;
2623 
2624  scale = v->pq * 2 + v->halfpq;
2625 
2626  if (v->s.ac_pred) {
2627  if (!dc_pred_dir)
2628  zz_table = v->zz_8x8[2];
2629  else
2630  zz_table = v->zz_8x8[3];
2631  } else
2632  zz_table = v->zz_8x8[1];
2633 
2634  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2635  ac_val2 = ac_val;
2636  if (dc_pred_dir) // left
2637  ac_val -= 16;
2638  else // top
2639  ac_val -= 16 * s->block_wrap[n];
2640 
2641  while (!last) {
2642  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2643  i += skip;
2644  if (i > 63)
2645  break;
2646  block[zz_table[i++]] = value;
2647  }
2648 
2649  /* apply AC prediction if needed */
2650  if (s->ac_pred) {
2651  if (dc_pred_dir) { // left
2652  for (k = 1; k < 8; k++)
2653  block[k << v->left_blk_sh] += ac_val[k];
2654  } else { // top
2655  for (k = 1; k < 8; k++)
2656  block[k << v->top_blk_sh] += ac_val[k + 8];
2657  }
2658  }
2659  /* save AC coeffs for further prediction */
2660  for (k = 1; k < 8; k++) {
2661  ac_val2[k] = block[k << v->left_blk_sh];
2662  ac_val2[k + 8] = block[k << v->top_blk_sh];
2663  }
2664 
2665  /* scale AC coeffs */
2666  for (k = 1; k < 64; k++)
2667  if (block[k]) {
2668  block[k] *= scale;
2669  if (!v->pquantizer)
2670  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2671  }
2672 
2673  if (s->ac_pred) i = 63;
2674  }
2675 
2676 not_coded:
2677  if (!coded) {
2678  int k, scale;
2679  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2680  ac_val2 = ac_val;
2681 
2682  i = 0;
2683  scale = v->pq * 2 + v->halfpq;
2684  memset(ac_val2, 0, 16 * 2);
2685  if (dc_pred_dir) { // left
2686  ac_val -= 16;
2687  if (s->ac_pred)
2688  memcpy(ac_val2, ac_val, 8 * 2);
2689  } else { // top
2690  ac_val -= 16 * s->block_wrap[n];
2691  if (s->ac_pred)
2692  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2693  }
2694 
2695  /* apply AC prediction if needed */
2696  if (s->ac_pred) {
2697  if (dc_pred_dir) { //left
2698  for (k = 1; k < 8; k++) {
2699  block[k << v->left_blk_sh] = ac_val[k] * scale;
2700  if (!v->pquantizer && block[k << v->left_blk_sh])
2701  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2702  }
2703  } else { // top
2704  for (k = 1; k < 8; k++) {
2705  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2706  if (!v->pquantizer && block[k << v->top_blk_sh])
2707  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2708  }
2709  }
2710  i = 63;
2711  }
2712  }
2713  s->block_last_index[n] = i;
2714 
2715  return 0;
2716 }
2717 
2726 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2727  int coded, int codingset, int mquant)
2728 {
2729  GetBitContext *gb = &v->s.gb;
2730  MpegEncContext *s = &v->s;
2731  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2732  int i;
2733  int16_t *dc_val = NULL;
2734  int16_t *ac_val, *ac_val2;
2735  int dcdiff;
2736  int a_avail = v->a_avail, c_avail = v->c_avail;
2737  int use_pred = s->ac_pred;
2738  int scale;
2739  int q1, q2 = 0;
2740  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2741 
2742  /* Get DC differential */
2743  if (n < 4) {
2745  } else {
2747  }
2748  if (dcdiff < 0) {
2749  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2750  return -1;
2751  }
2752  if (dcdiff) {
2753  if (dcdiff == 119 /* ESC index value */) {
2754  /* TODO: Optimize */
2755  if (mquant == 1) dcdiff = get_bits(gb, 10);
2756  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2757  else dcdiff = get_bits(gb, 8);
2758  } else {
2759  if (mquant == 1)
2760  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2761  else if (mquant == 2)
2762  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2763  }
2764  if (get_bits1(gb))
2765  dcdiff = -dcdiff;
2766  }
2767 
2768  /* Prediction */
2769  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2770  *dc_val = dcdiff;
2771 
2772  /* Store the quantized DC coeff, used for prediction */
2773  if (n < 4) {
2774  block[0] = dcdiff * s->y_dc_scale;
2775  } else {
2776  block[0] = dcdiff * s->c_dc_scale;
2777  }
2778 
2779  //AC Decoding
2780  i = 1;
2781 
2782  /* check if AC is needed at all */
2783  if (!a_avail && !c_avail)
2784  use_pred = 0;
2785  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2786  ac_val2 = ac_val;
2787 
2788  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2789 
2790  if (dc_pred_dir) // left
2791  ac_val -= 16;
2792  else // top
2793  ac_val -= 16 * s->block_wrap[n];
2794 
2795  q1 = s->current_picture.f.qscale_table[mb_pos];
2796  if ( dc_pred_dir && c_avail && mb_pos)
2797  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
2798  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2799  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
2800  if ( dc_pred_dir && n == 1)
2801  q2 = q1;
2802  if (!dc_pred_dir && n == 2)
2803  q2 = q1;
2804  if (n == 3)
2805  q2 = q1;
2806 
2807  if (coded) {
2808  int last = 0, skip, value;
2809  const uint8_t *zz_table;
2810  int k;
2811 
2812  if (v->s.ac_pred) {
2813  if (!use_pred && v->fcm == ILACE_FRAME) {
2814  zz_table = v->zzi_8x8;
2815  } else {
2816  if (!dc_pred_dir) // top
2817  zz_table = v->zz_8x8[2];
2818  else // left
2819  zz_table = v->zz_8x8[3];
2820  }
2821  } else {
2822  if (v->fcm != ILACE_FRAME)
2823  zz_table = v->zz_8x8[1];
2824  else
2825  zz_table = v->zzi_8x8;
2826  }
2827 
2828  while (!last) {
2829  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2830  i += skip;
2831  if (i > 63)
2832  break;
2833  block[zz_table[i++]] = value;
2834  }
2835 
2836  /* apply AC prediction if needed */
2837  if (use_pred) {
2838  /* scale predictors if needed*/
2839  if (q2 && q1 != q2) {
2840  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2841  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2842 
2843  if (q1 < 1)
2844  return AVERROR_INVALIDDATA;
2845  if (dc_pred_dir) { // left
2846  for (k = 1; k < 8; k++)
2847  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2848  } else { // top
2849  for (k = 1; k < 8; k++)
2850  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2851  }
2852  } else {
2853  if (dc_pred_dir) { //left
2854  for (k = 1; k < 8; k++)
2855  block[k << v->left_blk_sh] += ac_val[k];
2856  } else { //top
2857  for (k = 1; k < 8; k++)
2858  block[k << v->top_blk_sh] += ac_val[k + 8];
2859  }
2860  }
2861  }
2862  /* save AC coeffs for further prediction */
2863  for (k = 1; k < 8; k++) {
2864  ac_val2[k ] = block[k << v->left_blk_sh];
2865  ac_val2[k + 8] = block[k << v->top_blk_sh];
2866  }
2867 
2868  /* scale AC coeffs */
2869  for (k = 1; k < 64; k++)
2870  if (block[k]) {
2871  block[k] *= scale;
2872  if (!v->pquantizer)
2873  block[k] += (block[k] < 0) ? -mquant : mquant;
2874  }
2875 
2876  if (use_pred) i = 63;
2877  } else { // no AC coeffs
2878  int k;
2879 
2880  memset(ac_val2, 0, 16 * 2);
2881  if (dc_pred_dir) { // left
2882  if (use_pred) {
2883  memcpy(ac_val2, ac_val, 8 * 2);
2884  if (q2 && q1 != q2) {
2885  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2886  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2887  if (q1 < 1)
2888  return AVERROR_INVALIDDATA;
2889  for (k = 1; k < 8; k++)
2890  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2891  }
2892  }
2893  } else { // top
2894  if (use_pred) {
2895  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2896  if (q2 && q1 != q2) {
2897  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2898  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2899  if (q1 < 1)
2900  return AVERROR_INVALIDDATA;
2901  for (k = 1; k < 8; k++)
2902  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2903  }
2904  }
2905  }
2906 
2907  /* apply AC prediction if needed */
2908  if (use_pred) {
2909  if (dc_pred_dir) { // left
2910  for (k = 1; k < 8; k++) {
2911  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2912  if (!v->pquantizer && block[k << v->left_blk_sh])
2913  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2914  }
2915  } else { // top
2916  for (k = 1; k < 8; k++) {
2917  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2918  if (!v->pquantizer && block[k << v->top_blk_sh])
2919  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2920  }
2921  }
2922  i = 63;
2923  }
2924  }
2925  s->block_last_index[n] = i;
2926 
2927  return 0;
2928 }
2929 
2938 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
2939  int coded, int mquant, int codingset)
2940 {
2941  GetBitContext *gb = &v->s.gb;
2942  MpegEncContext *s = &v->s;
2943  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2944  int i;
2945  int16_t *dc_val = NULL;
2946  int16_t *ac_val, *ac_val2;
2947  int dcdiff;
2948  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2949  int a_avail = v->a_avail, c_avail = v->c_avail;
2950  int use_pred = s->ac_pred;
2951  int scale;
2952  int q1, q2 = 0;
2953 
2954  s->dsp.clear_block(block);
2955 
2956  /* XXX: Guard against dumb values of mquant */
2957  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
2958 
2959  /* Set DC scale - y and c use the same */
2960  s->y_dc_scale = s->y_dc_scale_table[mquant];
2961  s->c_dc_scale = s->c_dc_scale_table[mquant];
2962 
2963  /* Get DC differential */
2964  if (n < 4) {
2966  } else {
2968  }
2969  if (dcdiff < 0) {
2970  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2971  return -1;
2972  }
2973  if (dcdiff) {
2974  if (dcdiff == 119 /* ESC index value */) {
2975  /* TODO: Optimize */
2976  if (mquant == 1) dcdiff = get_bits(gb, 10);
2977  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2978  else dcdiff = get_bits(gb, 8);
2979  } else {
2980  if (mquant == 1)
2981  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2982  else if (mquant == 2)
2983  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2984  }
2985  if (get_bits1(gb))
2986  dcdiff = -dcdiff;
2987  }
2988 
2989  /* Prediction */
2990  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
2991  *dc_val = dcdiff;
2992 
2993  /* Store the quantized DC coeff, used for prediction */
2994 
2995  if (n < 4) {
2996  block[0] = dcdiff * s->y_dc_scale;
2997  } else {
2998  block[0] = dcdiff * s->c_dc_scale;
2999  }
3000 
3001  //AC Decoding
3002  i = 1;
3003 
3004  /* check if AC is needed at all and adjust direction if needed */
3005  if (!a_avail) dc_pred_dir = 1;
3006  if (!c_avail) dc_pred_dir = 0;
3007  if (!a_avail && !c_avail) use_pred = 0;
3008  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3009  ac_val2 = ac_val;
3010 
3011  scale = mquant * 2 + v->halfpq;
3012 
3013  if (dc_pred_dir) //left
3014  ac_val -= 16;
3015  else //top
3016  ac_val -= 16 * s->block_wrap[n];
3017 
3018  q1 = s->current_picture.f.qscale_table[mb_pos];
3019  if (dc_pred_dir && c_avail && mb_pos)
3020  q2 = s->current_picture.f.qscale_table[mb_pos - 1];
3021  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3022  q2 = s->current_picture.f.qscale_table[mb_pos - s->mb_stride];
3023  if ( dc_pred_dir && n == 1)
3024  q2 = q1;
3025  if (!dc_pred_dir && n == 2)
3026  q2 = q1;
3027  if (n == 3) q2 = q1;
3028 
3029  if (coded) {
3030  int last = 0, skip, value;
3031  int k;
3032 
3033  while (!last) {
3034  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3035  i += skip;
3036  if (i > 63)
3037  break;
3038  if (v->fcm == PROGRESSIVE)
3039  block[v->zz_8x8[0][i++]] = value;
3040  else {
3041  if (use_pred && (v->fcm == ILACE_FRAME)) {
3042  if (!dc_pred_dir) // top
3043  block[v->zz_8x8[2][i++]] = value;
3044  else // left
3045  block[v->zz_8x8[3][i++]] = value;
3046  } else {
3047  block[v->zzi_8x8[i++]] = value;
3048  }
3049  }
3050  }
3051 
3052  /* apply AC prediction if needed */
3053  if (use_pred) {
3054  /* scale predictors if needed*/
3055  if (q2 && q1 != q2) {
3056  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3057  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3058 
3059  if (q1 < 1)
3060  return AVERROR_INVALIDDATA;
3061  if (dc_pred_dir) { // left
3062  for (k = 1; k < 8; k++)
3063  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3064  } else { //top
3065  for (k = 1; k < 8; k++)
3066  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3067  }
3068  } else {
3069  if (dc_pred_dir) { // left
3070  for (k = 1; k < 8; k++)
3071  block[k << v->left_blk_sh] += ac_val[k];
3072  } else { // top
3073  for (k = 1; k < 8; k++)
3074  block[k << v->top_blk_sh] += ac_val[k + 8];
3075  }
3076  }
3077  }
3078  /* save AC coeffs for further prediction */
3079  for (k = 1; k < 8; k++) {
3080  ac_val2[k ] = block[k << v->left_blk_sh];
3081  ac_val2[k + 8] = block[k << v->top_blk_sh];
3082  }
3083 
3084  /* scale AC coeffs */
3085  for (k = 1; k < 64; k++)
3086  if (block[k]) {
3087  block[k] *= scale;
3088  if (!v->pquantizer)
3089  block[k] += (block[k] < 0) ? -mquant : mquant;
3090  }
3091 
3092  if (use_pred) i = 63;
3093  } else { // no AC coeffs
3094  int k;
3095 
3096  memset(ac_val2, 0, 16 * 2);
3097  if (dc_pred_dir) { // left
3098  if (use_pred) {
3099  memcpy(ac_val2, ac_val, 8 * 2);
3100  if (q2 && q1 != q2) {
3101  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3102  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3103  if (q1 < 1)
3104  return AVERROR_INVALIDDATA;
3105  for (k = 1; k < 8; k++)
3106  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3107  }
3108  }
3109  } else { // top
3110  if (use_pred) {
3111  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3112  if (q2 && q1 != q2) {
3113  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3114  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3115  if (q1 < 1)
3116  return AVERROR_INVALIDDATA;
3117  for (k = 1; k < 8; k++)
3118  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3119  }
3120  }
3121  }
3122 
3123  /* apply AC prediction if needed */
3124  if (use_pred) {
3125  if (dc_pred_dir) { // left
3126  for (k = 1; k < 8; k++) {
3127  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3128  if (!v->pquantizer && block[k << v->left_blk_sh])
3129  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3130  }
3131  } else { // top
3132  for (k = 1; k < 8; k++) {
3133  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3134  if (!v->pquantizer && block[k << v->top_blk_sh])
3135  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3136  }
3137  }
3138  i = 63;
3139  }
3140  }
3141  s->block_last_index[n] = i;
3142 
3143  return 0;
3144 }
3145 
3148 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3149  int mquant, int ttmb, int first_block,
3150  uint8_t *dst, int linesize, int skip_block,
3151  int *ttmb_out)
3152 {
3153  MpegEncContext *s = &v->s;
3154  GetBitContext *gb = &s->gb;
3155  int i, j;
3156  int subblkpat = 0;
3157  int scale, off, idx, last, skip, value;
3158  int ttblk = ttmb & 7;
3159  int pat = 0;
3160 
3161  s->dsp.clear_block(block);
3162 
3163  if (ttmb == -1) {
3165  }
3166  if (ttblk == TT_4X4) {
3167  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3168  }
3169  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3170  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3171  || (!v->res_rtm_flag && !first_block))) {
3172  subblkpat = decode012(gb);
3173  if (subblkpat)
3174  subblkpat ^= 3; // swap decoded pattern bits
3175  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3176  ttblk = TT_8X4;
3177  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3178  ttblk = TT_4X8;
3179  }
3180  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3181 
3182  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3183  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3184  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3185  ttblk = TT_8X4;
3186  }
3187  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3188  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3189  ttblk = TT_4X8;
3190  }
3191  switch (ttblk) {
3192  case TT_8X8:
3193  pat = 0xF;
3194  i = 0;
3195  last = 0;
3196  while (!last) {
3197  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3198  i += skip;
3199  if (i > 63)
3200  break;
3201  if (!v->fcm)
3202  idx = v->zz_8x8[0][i++];
3203  else
3204  idx = v->zzi_8x8[i++];
3205  block[idx] = value * scale;
3206  if (!v->pquantizer)
3207  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3208  }
3209  if (!skip_block) {
3210  if (i == 1)
3211  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3212  else {
3213  v->vc1dsp.vc1_inv_trans_8x8(block);
3214  s->dsp.add_pixels_clamped(block, dst, linesize);
3215  }
3216  }
3217  break;
3218  case TT_4X4:
3219  pat = ~subblkpat & 0xF;
3220  for (j = 0; j < 4; j++) {
3221  last = subblkpat & (1 << (3 - j));
3222  i = 0;
3223  off = (j & 1) * 4 + (j & 2) * 16;
3224  while (!last) {
3225  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3226  i += skip;
3227  if (i > 15)
3228  break;
3229  if (!v->fcm)
3231  else
3232  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3233  block[idx + off] = value * scale;
3234  if (!v->pquantizer)
3235  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3236  }
3237  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3238  if (i == 1)
3239  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3240  else
3241  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3242  }
3243  }
3244  break;
3245  case TT_8X4:
3246  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3247  for (j = 0; j < 2; j++) {
3248  last = subblkpat & (1 << (1 - j));
3249  i = 0;
3250  off = j * 32;
3251  while (!last) {
3252  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3253  i += skip;
3254  if (i > 31)
3255  break;
3256  if (!v->fcm)
3257  idx = v->zz_8x4[i++] + off;
3258  else
3259  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3260  block[idx] = value * scale;
3261  if (!v->pquantizer)
3262  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3263  }
3264  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3265  if (i == 1)
3266  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3267  else
3268  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3269  }
3270  }
3271  break;
3272  case TT_4X8:
3273  pat = ~(subblkpat * 5) & 0xF;
3274  for (j = 0; j < 2; j++) {
3275  last = subblkpat & (1 << (1 - j));
3276  i = 0;
3277  off = j * 4;
3278  while (!last) {
3279  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3280  i += skip;
3281  if (i > 31)
3282  break;
3283  if (!v->fcm)
3284  idx = v->zz_4x8[i++] + off;
3285  else
3286  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3287  block[idx] = value * scale;
3288  if (!v->pquantizer)
3289  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3290  }
3291  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3292  if (i == 1)
3293  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3294  else
3295  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3296  }
3297  }
3298  break;
3299  }
3300  if (ttmb_out)
3301  *ttmb_out |= ttblk << (n * 4);
3302  return pat;
3303 }
3304  // Macroblock group
3306 
3307 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3308 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3309 
3311 {
3312  MpegEncContext *s = &v->s;
3313  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3314  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3315  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3316  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3317  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3318  uint8_t *dst;
3319 
3320  if (block_num > 3) {
3321  dst = s->dest[block_num - 3];
3322  } else {
3323  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3324  }
3325  if (s->mb_y != s->end_mb_y || block_num < 2) {
3326  int16_t (*mv)[2];
3327  int mv_stride;
3328 
3329  if (block_num > 3) {
3330  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3331  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3332  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3333  mv_stride = s->mb_stride;
3334  } else {
3335  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3336  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3337  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3338  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3339  mv_stride = s->b8_stride;
3340  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3341  }
3342 
3343  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3344  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3345  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3346  } else {
3347  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3348  if (idx == 3) {
3349  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3350  } else if (idx) {
3351  if (idx == 1)
3352  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3353  else
3354  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3355  }
3356  }
3357  }
3358 
3359  dst -= 4 * linesize;
3360  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3361  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3362  idx = (block_cbp | (block_cbp >> 2)) & 3;
3363  if (idx == 3) {
3364  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3365  } else if (idx) {
3366  if (idx == 1)
3367  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3368  else
3369  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3370  }
3371  }
3372 }
3373 
3375 {
3376  MpegEncContext *s = &v->s;
3377  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3378  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3379  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3380  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3381  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3382  uint8_t *dst;
3383 
3384  if (block_num > 3) {
3385  dst = s->dest[block_num - 3] - 8 * linesize;
3386  } else {
3387  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3388  }
3389 
3390  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3391  int16_t (*mv)[2];
3392 
3393  if (block_num > 3) {
3394  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3395  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3396  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3397  } else {
3398  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3399  : (mb_cbp >> ((block_num + 1) * 4));
3400  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3401  : (mb_is_intra >> ((block_num + 1) * 4));
3402  mv = &s->current_picture.f.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3403  }
3404  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3405  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3406  } else {
3407  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3408  if (idx == 5) {
3409  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3410  } else if (idx) {
3411  if (idx == 1)
3412  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3413  else
3414  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3415  }
3416  }
3417  }
3418 
3419  dst -= 4;
3420  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3421  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3422  idx = (block_cbp | (block_cbp >> 1)) & 5;
3423  if (idx == 5) {
3424  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3425  } else if (idx) {
3426  if (idx == 1)
3427  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3428  else
3429  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3430  }
3431  }
3432 }
3433 
3435 {
3436  MpegEncContext *s = &v->s;
3437  int i;
3438 
3439  for (i = 0; i < 6; i++) {
3441  }
3442 
3443  /* V always precedes H, therefore we run H one MB before V;
3444  * at the end of a row, we catch up to complete the row */
3445  if (s->mb_x) {
3446  for (i = 0; i < 6; i++) {
3448  }
3449  if (s->mb_x == s->mb_width - 1) {
3450  s->mb_x++;
3452  for (i = 0; i < 6; i++) {
3454  }
3455  }
3456  }
3457 }
3458 
3462 {
3463  MpegEncContext *s = &v->s;
3464  GetBitContext *gb = &s->gb;
3465  int i, j;
3466  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3467  int cbp; /* cbp decoding stuff */
3468  int mqdiff, mquant; /* MB quantization */
3469  int ttmb = v->ttfrm; /* MB Transform type */
3470 
3471  int mb_has_coeffs = 1; /* last_flag */
3472  int dmv_x, dmv_y; /* Differential MV components */
3473  int index, index1; /* LUT indexes */
3474  int val, sign; /* temp values */
3475  int first_block = 1;
3476  int dst_idx, off;
3477  int skipped, fourmv;
3478  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3479 
3480  mquant = v->pq; /* lossy initialization */
3481 
3482  if (v->mv_type_is_raw)
3483  fourmv = get_bits1(gb);
3484  else
3485  fourmv = v->mv_type_mb_plane[mb_pos];
3486  if (v->skip_is_raw)
3487  skipped = get_bits1(gb);
3488  else
3489  skipped = v->s.mbskip_table[mb_pos];
3490 
3491  if (!fourmv) { /* 1MV mode */
3492  if (!skipped) {
3493  GET_MVDATA(dmv_x, dmv_y);
3494 
3495  if (s->mb_intra) {
3496  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3497  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3498  }
3500  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3501 
3502  /* FIXME Set DC val for inter block ? */
3503  if (s->mb_intra && !mb_has_coeffs) {
3504  GET_MQUANT();
3505  s->ac_pred = get_bits1(gb);
3506  cbp = 0;
3507  } else if (mb_has_coeffs) {
3508  if (s->mb_intra)
3509  s->ac_pred = get_bits1(gb);
3510  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3511  GET_MQUANT();
3512  } else {
3513  mquant = v->pq;
3514  cbp = 0;
3515  }
3516  s->current_picture.f.qscale_table[mb_pos] = mquant;
3517 
3518  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3519  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3520  VC1_TTMB_VLC_BITS, 2);
3521  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3522  dst_idx = 0;
3523  for (i = 0; i < 6; i++) {
3524  s->dc_val[0][s->block_index[i]] = 0;
3525  dst_idx += i >> 2;
3526  val = ((cbp >> (5 - i)) & 1);
3527  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3528  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3529  if (s->mb_intra) {
3530  /* check if prediction blocks A and C are available */
3531  v->a_avail = v->c_avail = 0;
3532  if (i == 2 || i == 3 || !s->first_slice_line)
3533  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3534  if (i == 1 || i == 3 || s->mb_x)
3535  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3536 
3537  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3538  (i & 4) ? v->codingset2 : v->codingset);
3539  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3540  continue;
3541  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3542  if (v->rangeredfrm)
3543  for (j = 0; j < 64; j++)
3544  s->block[i][j] <<= 1;
3545  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3546  if (v->pq >= 9 && v->overlap) {
3547  if (v->c_avail)
3548  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3549  if (v->a_avail)
3550  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3551  }
3552  block_cbp |= 0xF << (i << 2);
3553  block_intra |= 1 << i;
3554  } else if (val) {
3555  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3556  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3557  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3558  block_cbp |= pat << (i << 2);
3559  if (!v->ttmbf && ttmb < 8)
3560  ttmb = -1;
3561  first_block = 0;
3562  }
3563  }
3564  } else { // skipped
3565  s->mb_intra = 0;
3566  for (i = 0; i < 6; i++) {
3567  v->mb_type[0][s->block_index[i]] = 0;
3568  s->dc_val[0][s->block_index[i]] = 0;
3569  }
3570  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3571  s->current_picture.f.qscale_table[mb_pos] = 0;
3572  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3573  vc1_mc_1mv(v, 0);
3574  }
3575  } else { // 4MV mode
3576  if (!skipped /* unskipped MB */) {
3577  int intra_count = 0, coded_inter = 0;
3578  int is_intra[6], is_coded[6];
3579  /* Get CBPCY */
3580  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3581  for (i = 0; i < 6; i++) {
3582  val = ((cbp >> (5 - i)) & 1);
3583  s->dc_val[0][s->block_index[i]] = 0;
3584  s->mb_intra = 0;
3585  if (i < 4) {
3586  dmv_x = dmv_y = 0;
3587  s->mb_intra = 0;
3588  mb_has_coeffs = 0;
3589  if (val) {
3590  GET_MVDATA(dmv_x, dmv_y);
3591  }
3592  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3593  if (!s->mb_intra)
3594  vc1_mc_4mv_luma(v, i, 0);
3595  intra_count += s->mb_intra;
3596  is_intra[i] = s->mb_intra;
3597  is_coded[i] = mb_has_coeffs;
3598  }
3599  if (i & 4) {
3600  is_intra[i] = (intra_count >= 3);
3601  is_coded[i] = val;
3602  }
3603  if (i == 4)
3604  vc1_mc_4mv_chroma(v, 0);
3605  v->mb_type[0][s->block_index[i]] = is_intra[i];
3606  if (!coded_inter)
3607  coded_inter = !is_intra[i] & is_coded[i];
3608  }
3609  // if there are no coded blocks then don't do anything more
3610  dst_idx = 0;
3611  if (!intra_count && !coded_inter)
3612  goto end;
3613  GET_MQUANT();
3614  s->current_picture.f.qscale_table[mb_pos] = mquant;
3615  /* test if block is intra and has pred */
3616  {
3617  int intrapred = 0;
3618  for (i = 0; i < 6; i++)
3619  if (is_intra[i]) {
3620  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3621  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3622  intrapred = 1;
3623  break;
3624  }
3625  }
3626  if (intrapred)
3627  s->ac_pred = get_bits1(gb);
3628  else
3629  s->ac_pred = 0;
3630  }
3631  if (!v->ttmbf && coded_inter)
3633  for (i = 0; i < 6; i++) {
3634  dst_idx += i >> 2;
3635  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3636  s->mb_intra = is_intra[i];
3637  if (is_intra[i]) {
3638  /* check if prediction blocks A and C are available */
3639  v->a_avail = v->c_avail = 0;
3640  if (i == 2 || i == 3 || !s->first_slice_line)
3641  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3642  if (i == 1 || i == 3 || s->mb_x)
3643  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3644 
3645  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3646  (i & 4) ? v->codingset2 : v->codingset);
3647  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3648  continue;
3649  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3650  if (v->rangeredfrm)
3651  for (j = 0; j < 64; j++)
3652  s->block[i][j] <<= 1;
3653  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3654  (i & 4) ? s->uvlinesize : s->linesize);
3655  if (v->pq >= 9 && v->overlap) {
3656  if (v->c_avail)
3657  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3658  if (v->a_avail)
3659  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3660  }
3661  block_cbp |= 0xF << (i << 2);
3662  block_intra |= 1 << i;
3663  } else if (is_coded[i]) {
3664  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3665  first_block, s->dest[dst_idx] + off,
3666  (i & 4) ? s->uvlinesize : s->linesize,
3667  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3668  &block_tt);
3669  block_cbp |= pat << (i << 2);
3670  if (!v->ttmbf && ttmb < 8)
3671  ttmb = -1;
3672  first_block = 0;
3673  }
3674  }
3675  } else { // skipped MB
3676  s->mb_intra = 0;
3677  s->current_picture.f.qscale_table[mb_pos] = 0;
3678  for (i = 0; i < 6; i++) {
3679  v->mb_type[0][s->block_index[i]] = 0;
3680  s->dc_val[0][s->block_index[i]] = 0;
3681  }
3682  for (i = 0; i < 4; i++) {
3683  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3684  vc1_mc_4mv_luma(v, i, 0);
3685  }
3686  vc1_mc_4mv_chroma(v, 0);
3687  s->current_picture.f.qscale_table[mb_pos] = 0;
3688  }
3689  }
3690 end:
3691  v->cbp[s->mb_x] = block_cbp;
3692  v->ttblk[s->mb_x] = block_tt;
3693  v->is_intra[s->mb_x] = block_intra;
3694 
3695  return 0;
3696 }
3697 
3698 /* Decode one macroblock in an interlaced frame p picture */
3699 
3701 {
3702  MpegEncContext *s = &v->s;
3703  GetBitContext *gb = &s->gb;
3704  int i;
3705  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3706  int cbp = 0; /* cbp decoding stuff */
3707  int mqdiff, mquant; /* MB quantization */
3708  int ttmb = v->ttfrm; /* MB Transform type */
3709 
3710  int mb_has_coeffs = 1; /* last_flag */
3711  int dmv_x, dmv_y; /* Differential MV components */
3712  int val; /* temp value */
3713  int first_block = 1;
3714  int dst_idx, off;
3715  int skipped, fourmv = 0, twomv = 0;
3716  int block_cbp = 0, pat, block_tt = 0;
3717  int idx_mbmode = 0, mvbp;
3718  int stride_y, fieldtx;
3719 
3720  mquant = v->pq; /* Lossy initialization */
3721 
3722  if (v->skip_is_raw)
3723  skipped = get_bits1(gb);
3724  else
3725  skipped = v->s.mbskip_table[mb_pos];
3726  if (!skipped) {
3727  if (v->fourmvswitch)
3728  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3729  else
3730  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3731  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3732  /* store the motion vector type in a flag (useful later) */
3733  case MV_PMODE_INTFR_4MV:
3734  fourmv = 1;
3735  v->blk_mv_type[s->block_index[0]] = 0;
3736  v->blk_mv_type[s->block_index[1]] = 0;
3737  v->blk_mv_type[s->block_index[2]] = 0;
3738  v->blk_mv_type[s->block_index[3]] = 0;
3739  break;
3741  fourmv = 1;
3742  v->blk_mv_type[s->block_index[0]] = 1;
3743  v->blk_mv_type[s->block_index[1]] = 1;
3744  v->blk_mv_type[s->block_index[2]] = 1;
3745  v->blk_mv_type[s->block_index[3]] = 1;
3746  break;
3748  twomv = 1;
3749  v->blk_mv_type[s->block_index[0]] = 1;
3750  v->blk_mv_type[s->block_index[1]] = 1;
3751  v->blk_mv_type[s->block_index[2]] = 1;
3752  v->blk_mv_type[s->block_index[3]] = 1;
3753  break;
3754  case MV_PMODE_INTFR_1MV:
3755  v->blk_mv_type[s->block_index[0]] = 0;
3756  v->blk_mv_type[s->block_index[1]] = 0;
3757  v->blk_mv_type[s->block_index[2]] = 0;
3758  v->blk_mv_type[s->block_index[3]] = 0;
3759  break;
3760  }
3761  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3762  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
3763  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
3764  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
3765  s->mb_intra = v->is_intra[s->mb_x] = 1;
3766  for (i = 0; i < 6; i++)
3767  v->mb_type[0][s->block_index[i]] = 1;
3768  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3769  mb_has_coeffs = get_bits1(gb);
3770  if (mb_has_coeffs)
3771  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3772  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3773  GET_MQUANT();
3774  s->current_picture.f.qscale_table[mb_pos] = mquant;
3775  /* Set DC scale - y and c use the same (not sure if necessary here) */
3776  s->y_dc_scale = s->y_dc_scale_table[mquant];
3777  s->c_dc_scale = s->c_dc_scale_table[mquant];
3778  dst_idx = 0;
3779  for (i = 0; i < 6; i++) {
3780  s->dc_val[0][s->block_index[i]] = 0;
3781  dst_idx += i >> 2;
3782  val = ((cbp >> (5 - i)) & 1);
3783  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3784  v->a_avail = v->c_avail = 0;
3785  if (i == 2 || i == 3 || !s->first_slice_line)
3786  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3787  if (i == 1 || i == 3 || s->mb_x)
3788  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3789 
3790  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3791  (i & 4) ? v->codingset2 : v->codingset);
3792  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3793  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3794  if (i < 4) {
3795  stride_y = s->linesize << fieldtx;
3796  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3797  } else {
3798  stride_y = s->uvlinesize;
3799  off = 0;
3800  }
3801  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3802  //TODO: loop filter
3803  }
3804 
3805  } else { // inter MB
3806  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3807  if (mb_has_coeffs)
3808  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3809  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3811  } else {
3812  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3813  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3815  }
3816  }
3817  s->mb_intra = v->is_intra[s->mb_x] = 0;
3818  for (i = 0; i < 6; i++)
3819  v->mb_type[0][s->block_index[i]] = 0;
3820  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3821  /* for all motion vector read MVDATA and motion compensate each block */
3822  dst_idx = 0;
3823  if (fourmv) {
3824  mvbp = v->fourmvbp;
3825  for (i = 0; i < 6; i++) {
3826  if (i < 4) {
3827  dmv_x = dmv_y = 0;
3828  val = ((mvbp >> (3 - i)) & 1);
3829  if (val) {
3830  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3831  }
3832  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0]);
3833  vc1_mc_4mv_luma(v, i, 0);
3834  } else if (i == 4) {
3835  vc1_mc_4mv_chroma4(v);
3836  }
3837  }
3838  } else if (twomv) {
3839  mvbp = v->twomvbp;
3840  dmv_x = dmv_y = 0;
3841  if (mvbp & 2) {
3842  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3843  }
3844  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3845  vc1_mc_4mv_luma(v, 0, 0);
3846  vc1_mc_4mv_luma(v, 1, 0);
3847  dmv_x = dmv_y = 0;
3848  if (mvbp & 1) {
3849  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3850  }
3851  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0]);
3852  vc1_mc_4mv_luma(v, 2, 0);
3853  vc1_mc_4mv_luma(v, 3, 0);
3854  vc1_mc_4mv_chroma4(v);
3855  } else {
3856  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3857  dmv_x = dmv_y = 0;
3858  if (mvbp) {
3859  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3860  }
3861  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0]);
3862  vc1_mc_1mv(v, 0);
3863  }
3864  if (cbp)
3865  GET_MQUANT(); // p. 227
3866  s->current_picture.f.qscale_table[mb_pos] = mquant;
3867  if (!v->ttmbf && cbp)
3869  for (i = 0; i < 6; i++) {
3870  s->dc_val[0][s->block_index[i]] = 0;
3871  dst_idx += i >> 2;
3872  val = ((cbp >> (5 - i)) & 1);
3873  if (!fieldtx)
3874  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3875  else
3876  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3877  if (val) {
3878  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3879  first_block, s->dest[dst_idx] + off,
3880  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3881  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3882  block_cbp |= pat << (i << 2);
3883  if (!v->ttmbf && ttmb < 8)
3884  ttmb = -1;
3885  first_block = 0;
3886  }
3887  }
3888  }
3889  } else { // skipped
3890  s->mb_intra = v->is_intra[s->mb_x] = 0;
3891  for (i = 0; i < 6; i++) {
3892  v->mb_type[0][s->block_index[i]] = 0;
3893  s->dc_val[0][s->block_index[i]] = 0;
3894  }
3895  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_SKIP;
3896  s->current_picture.f.qscale_table[mb_pos] = 0;
3897  v->blk_mv_type[s->block_index[0]] = 0;
3898  v->blk_mv_type[s->block_index[1]] = 0;
3899  v->blk_mv_type[s->block_index[2]] = 0;
3900  v->blk_mv_type[s->block_index[3]] = 0;
3901  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0]);
3902  vc1_mc_1mv(v, 0);
3903  }
3904  if (s->mb_x == s->mb_width - 1)
3905  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3906  return 0;
3907 }
3908 
3910 {
3911  MpegEncContext *s = &v->s;
3912  GetBitContext *gb = &s->gb;
3913  int i;
3914  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3915  int cbp = 0; /* cbp decoding stuff */
3916  int mqdiff, mquant; /* MB quantization */
3917  int ttmb = v->ttfrm; /* MB Transform type */
3918 
3919  int mb_has_coeffs = 1; /* last_flag */
3920  int dmv_x, dmv_y; /* Differential MV components */
3921  int val; /* temp values */
3922  int first_block = 1;
3923  int dst_idx, off;
3924  int pred_flag = 0;
3925  int block_cbp = 0, pat, block_tt = 0;
3926  int idx_mbmode = 0;
3927 
3928  mquant = v->pq; /* Lossy initialization */
3929 
3930  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3931  if (idx_mbmode <= 1) { // intra MB
3932  s->mb_intra = v->is_intra[s->mb_x] = 1;
3933  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
3934  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
3935  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
3936  GET_MQUANT();
3937  s->current_picture.f.qscale_table[mb_pos] = mquant;
3938  /* Set DC scale - y and c use the same (not sure if necessary here) */
3939  s->y_dc_scale = s->y_dc_scale_table[mquant];
3940  s->c_dc_scale = s->c_dc_scale_table[mquant];
3941  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3942  mb_has_coeffs = idx_mbmode & 1;
3943  if (mb_has_coeffs)
3944  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
3945  dst_idx = 0;
3946  for (i = 0; i < 6; i++) {
3947  s->dc_val[0][s->block_index[i]] = 0;
3948  v->mb_type[0][s->block_index[i]] = 1;
3949  dst_idx += i >> 2;
3950  val = ((cbp >> (5 - i)) & 1);
3951  v->a_avail = v->c_avail = 0;
3952  if (i == 2 || i == 3 || !s->first_slice_line)
3953  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3954  if (i == 1 || i == 3 || s->mb_x)
3955  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3956 
3957  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3958  (i & 4) ? v->codingset2 : v->codingset);
3959  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3960  continue;
3961  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3962  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3963  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
3964  // TODO: loop filter
3965  }
3966  } else {
3967  s->mb_intra = v->is_intra[s->mb_x] = 0;
3968  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
3969  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
3970  if (idx_mbmode <= 5) { // 1-MV
3971  dmv_x = dmv_y = pred_flag = 0;
3972  if (idx_mbmode & 1) {
3973  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3974  }
3975  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3976  vc1_mc_1mv(v, 0);
3977  mb_has_coeffs = !(idx_mbmode & 2);
3978  } else { // 4-MV
3980  for (i = 0; i < 6; i++) {
3981  if (i < 4) {
3982  dmv_x = dmv_y = pred_flag = 0;
3983  val = ((v->fourmvbp >> (3 - i)) & 1);
3984  if (val) {
3985  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
3986  }
3987  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
3988  vc1_mc_4mv_luma(v, i, 0);
3989  } else if (i == 4)
3990  vc1_mc_4mv_chroma(v, 0);
3991  }
3992  mb_has_coeffs = idx_mbmode & 1;
3993  }
3994  if (mb_has_coeffs)
3995  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3996  if (cbp) {
3997  GET_MQUANT();
3998  }
3999  s->current_picture.f.qscale_table[mb_pos] = mquant;
4000  if (!v->ttmbf && cbp) {
4002  }
4003  dst_idx = 0;
4004  for (i = 0; i < 6; i++) {
4005  s->dc_val[0][s->block_index[i]] = 0;
4006  dst_idx += i >> 2;
4007  val = ((cbp >> (5 - i)) & 1);
4008  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4009  if (val) {
4010  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4011  first_block, s->dest[dst_idx] + off,
4012  (i & 4) ? s->uvlinesize : s->linesize,
4013  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4014  &block_tt);
4015  block_cbp |= pat << (i << 2);
4016  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4017  first_block = 0;
4018  }
4019  }
4020  }
4021  if (s->mb_x == s->mb_width - 1)
4022  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4023  return 0;
4024 }
4025 
4029 {
4030  MpegEncContext *s = &v->s;
4031  GetBitContext *gb = &s->gb;
4032  int i, j;
4033  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4034  int cbp = 0; /* cbp decoding stuff */
4035  int mqdiff, mquant; /* MB quantization */
4036  int ttmb = v->ttfrm; /* MB Transform type */
4037  int mb_has_coeffs = 0; /* last_flag */
4038  int index, index1; /* LUT indexes */
4039  int val, sign; /* temp values */
4040  int first_block = 1;
4041  int dst_idx, off;
4042  int skipped, direct;
4043  int dmv_x[2], dmv_y[2];
4044  int bmvtype = BMV_TYPE_BACKWARD;
4045 
4046  mquant = v->pq; /* lossy initialization */
4047  s->mb_intra = 0;
4048 
4049  if (v->dmb_is_raw)
4050  direct = get_bits1(gb);
4051  else
4052  direct = v->direct_mb_plane[mb_pos];
4053  if (v->skip_is_raw)
4054  skipped = get_bits1(gb);
4055  else
4056  skipped = v->s.mbskip_table[mb_pos];
4057 
4058  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4059  for (i = 0; i < 6; i++) {
4060  v->mb_type[0][s->block_index[i]] = 0;
4061  s->dc_val[0][s->block_index[i]] = 0;
4062  }
4063  s->current_picture.f.qscale_table[mb_pos] = 0;
4064 
4065  if (!direct) {
4066  if (!skipped) {
4067  GET_MVDATA(dmv_x[0], dmv_y[0]);
4068  dmv_x[1] = dmv_x[0];
4069  dmv_y[1] = dmv_y[0];
4070  }
4071  if (skipped || !s->mb_intra) {
4072  bmvtype = decode012(gb);
4073  switch (bmvtype) {
4074  case 0:
4075  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4076  break;
4077  case 1:
4078  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4079  break;
4080  case 2:
4081  bmvtype = BMV_TYPE_INTERPOLATED;
4082  dmv_x[0] = dmv_y[0] = 0;
4083  }
4084  }
4085  }
4086  for (i = 0; i < 6; i++)
4087  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4088 
4089  if (skipped) {
4090  if (direct)
4091  bmvtype = BMV_TYPE_INTERPOLATED;
4092  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4093  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4094  return;
4095  }
4096  if (direct) {
4097  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4098  GET_MQUANT();
4099  s->mb_intra = 0;
4100  s->current_picture.f.qscale_table[mb_pos] = mquant;
4101  if (!v->ttmbf)
4103  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4104  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4105  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4106  } else {
4107  if (!mb_has_coeffs && !s->mb_intra) {
4108  /* no coded blocks - effectively skipped */
4109  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4110  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4111  return;
4112  }
4113  if (s->mb_intra && !mb_has_coeffs) {
4114  GET_MQUANT();
4115  s->current_picture.f.qscale_table[mb_pos] = mquant;
4116  s->ac_pred = get_bits1(gb);
4117  cbp = 0;
4118  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4119  } else {
4120  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4121  GET_MVDATA(dmv_x[0], dmv_y[0]);
4122  if (!mb_has_coeffs) {
4123  /* interpolated skipped block */
4124  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4125  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4126  return;
4127  }
4128  }
4129  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4130  if (!s->mb_intra) {
4131  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4132  }
4133  if (s->mb_intra)
4134  s->ac_pred = get_bits1(gb);
4135  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4136  GET_MQUANT();
4137  s->current_picture.f.qscale_table[mb_pos] = mquant;
4138  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4140  }
4141  }
4142  dst_idx = 0;
4143  for (i = 0; i < 6; i++) {
4144  s->dc_val[0][s->block_index[i]] = 0;
4145  dst_idx += i >> 2;
4146  val = ((cbp >> (5 - i)) & 1);
4147  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4148  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4149  if (s->mb_intra) {
4150  /* check if prediction blocks A and C are available */
4151  v->a_avail = v->c_avail = 0;
4152  if (i == 2 || i == 3 || !s->first_slice_line)
4153  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4154  if (i == 1 || i == 3 || s->mb_x)
4155  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4156 
4157  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4158  (i & 4) ? v->codingset2 : v->codingset);
4159  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4160  continue;
4161  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4162  if (v->rangeredfrm)
4163  for (j = 0; j < 64; j++)
4164  s->block[i][j] <<= 1;
4165  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4166  } else if (val) {
4167  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4168  first_block, s->dest[dst_idx] + off,
4169  (i & 4) ? s->uvlinesize : s->linesize,
4170  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4171  if (!v->ttmbf && ttmb < 8)
4172  ttmb = -1;
4173  first_block = 0;
4174  }
4175  }
4176 }
4177 
4181 {
4182  MpegEncContext *s = &v->s;
4183  GetBitContext *gb = &s->gb;
4184  int i, j;
4185  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4186  int cbp = 0; /* cbp decoding stuff */
4187  int mqdiff, mquant; /* MB quantization */
4188  int ttmb = v->ttfrm; /* MB Transform type */
4189  int mb_has_coeffs = 0; /* last_flag */
4190  int val; /* temp value */
4191  int first_block = 1;
4192  int dst_idx, off;
4193  int fwd;
4194  int dmv_x[2], dmv_y[2], pred_flag[2];
4195  int bmvtype = BMV_TYPE_BACKWARD;
4196  int idx_mbmode, interpmvp;
4197 
4198  mquant = v->pq; /* Lossy initialization */
4199  s->mb_intra = 0;
4200 
4201  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4202  if (idx_mbmode <= 1) { // intra MB
4203  s->mb_intra = v->is_intra[s->mb_x] = 1;
4204  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4205  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4206  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4207  GET_MQUANT();
4208  s->current_picture.f.qscale_table[mb_pos] = mquant;
4209  /* Set DC scale - y and c use the same (not sure if necessary here) */
4210  s->y_dc_scale = s->y_dc_scale_table[mquant];
4211  s->c_dc_scale = s->c_dc_scale_table[mquant];
4212  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4213  mb_has_coeffs = idx_mbmode & 1;
4214  if (mb_has_coeffs)
4215  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4216  dst_idx = 0;
4217  for (i = 0; i < 6; i++) {
4218  s->dc_val[0][s->block_index[i]] = 0;
4219  dst_idx += i >> 2;
4220  val = ((cbp >> (5 - i)) & 1);
4221  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4222  v->a_avail = v->c_avail = 0;
4223  if (i == 2 || i == 3 || !s->first_slice_line)
4224  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4225  if (i == 1 || i == 3 || s->mb_x)
4226  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4227 
4228  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4229  (i & 4) ? v->codingset2 : v->codingset);
4230  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4231  continue;
4232  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4233  if (v->rangeredfrm)
4234  for (j = 0; j < 64; j++)
4235  s->block[i][j] <<= 1;
4236  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4237  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4238  // TODO: yet to perform loop filter
4239  }
4240  } else {
4241  s->mb_intra = v->is_intra[s->mb_x] = 0;
4242  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4243  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4244  if (v->fmb_is_raw)
4245  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4246  else
4247  fwd = v->forward_mb_plane[mb_pos];
4248  if (idx_mbmode <= 5) { // 1-MV
4249  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4250  pred_flag[0] = pred_flag[1] = 0;
4251  if (fwd)
4252  bmvtype = BMV_TYPE_FORWARD;
4253  else {
4254  bmvtype = decode012(gb);
4255  switch (bmvtype) {
4256  case 0:
4257  bmvtype = BMV_TYPE_BACKWARD;
4258  break;
4259  case 1:
4260  bmvtype = BMV_TYPE_DIRECT;
4261  break;
4262  case 2:
4263  bmvtype = BMV_TYPE_INTERPOLATED;
4264  interpmvp = get_bits1(gb);
4265  }
4266  }
4267  v->bmvtype = bmvtype;
4268  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4269  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4270  }
4271  if (bmvtype == BMV_TYPE_INTERPOLATED && interpmvp) {
4272  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4273  }
4274  if (bmvtype == BMV_TYPE_DIRECT) {
4275  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4276  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4277  }
4278  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4279  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4280  mb_has_coeffs = !(idx_mbmode & 2);
4281  } else { // 4-MV
4282  if (fwd)
4283  bmvtype = BMV_TYPE_FORWARD;
4284  v->bmvtype = bmvtype;
4286  for (i = 0; i < 6; i++) {
4287  if (i < 4) {
4288  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4289  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4290  val = ((v->fourmvbp >> (3 - i)) & 1);
4291  if (val) {
4292  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4293  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4294  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4295  }
4296  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4297  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD);
4298  } else if (i == 4)
4299  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4300  }
4301  mb_has_coeffs = idx_mbmode & 1;
4302  }
4303  if (mb_has_coeffs)
4304  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4305  if (cbp) {
4306  GET_MQUANT();
4307  }
4308  s->current_picture.f.qscale_table[mb_pos] = mquant;
4309  if (!v->ttmbf && cbp) {
4311  }
4312  dst_idx = 0;
4313  for (i = 0; i < 6; i++) {
4314  s->dc_val[0][s->block_index[i]] = 0;
4315  dst_idx += i >> 2;
4316  val = ((cbp >> (5 - i)) & 1);
4317  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4318  if (val) {
4319  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4320  first_block, s->dest[dst_idx] + off,
4321  (i & 4) ? s->uvlinesize : s->linesize,
4322  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4323  if (!v->ttmbf && ttmb < 8)
4324  ttmb = -1;
4325  first_block = 0;
4326  }
4327  }
4328  }
4329 }
4330 
4334 {
4335  int k, j;
4336  MpegEncContext *s = &v->s;
4337  int cbp, val;
4338  uint8_t *coded_val;
4339  int mb_pos;
4340 
4341  /* select codingmode used for VLC tables selection */
4342  switch (v->y_ac_table_index) {
4343  case 0:
4345  break;
4346  case 1:
4348  break;
4349  case 2:
4351  break;
4352  }
4353 
4354  switch (v->c_ac_table_index) {
4355  case 0:
4357  break;
4358  case 1:
4360  break;
4361  case 2:
4363  break;
4364  }
4365 
4366  /* Set DC scale - y and c use the same */
4367  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4368  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4369 
4370  //do frame decode
4371  s->mb_x = s->mb_y = 0;
4372  s->mb_intra = 1;
4373  s->first_slice_line = 1;
4374  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4375  s->mb_x = 0;
4376  init_block_index(v);
4377  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4378  uint8_t *dst[6];
4380  dst[0] = s->dest[0];
4381  dst[1] = dst[0] + 8;
4382  dst[2] = s->dest[0] + s->linesize * 8;
4383  dst[3] = dst[2] + 8;
4384  dst[4] = s->dest[1];
4385  dst[5] = s->dest[2];
4386  s->dsp.clear_blocks(s->block[0]);
4387  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4388  s->current_picture.f.mb_type[mb_pos] = MB_TYPE_INTRA;
4389  s->current_picture.f.qscale_table[mb_pos] = v->pq;
4390  s->current_picture.f.motion_val[1][s->block_index[0]][0] = 0;
4391  s->current_picture.f.motion_val[1][s->block_index[0]][1] = 0;
4392 
4393  // do actual MB decoding and displaying
4395  v->s.ac_pred = get_bits1(&v->s.gb);
4396 
4397  for (k = 0; k < 6; k++) {
4398  val = ((cbp >> (5 - k)) & 1);
4399 
4400  if (k < 4) {
4401  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4402  val = val ^ pred;
4403  *coded_val = val;
4404  }
4405  cbp |= val << (5 - k);
4406 
4407  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4408 
4409  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4410  continue;
4411  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4412  if (v->pq >= 9 && v->overlap) {
4413  if (v->rangeredfrm)
4414  for (j = 0; j < 64; j++)
4415  s->block[k][j] <<= 1;
4416  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4417  } else {
4418  if (v->rangeredfrm)
4419  for (j = 0; j < 64; j++)
4420  s->block[k][j] = (s->block[k][j] - 64) << 1;
4421  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4422  }
4423  }
4424 
4425  if (v->pq >= 9 && v->overlap) {
4426  if (s->mb_x) {
4427  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4428  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4429  if (!(s->flags & CODEC_FLAG_GRAY)) {
4430  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4431  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4432  }
4433  }
4434  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4435  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4436  if (!s->first_slice_line) {
4437  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4438  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4439  if (!(s->flags & CODEC_FLAG_GRAY)) {
4440  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4441  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4442  }
4443  }
4444  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4445  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4446  }
4447  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4448 
4449  if (get_bits_count(&s->gb) > v->bits) {
4450  ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4451  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4452  get_bits_count(&s->gb), v->bits);
4453  return;
4454  }
4455  }
4456  if (!v->s.loop_filter)
4457  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4458  else if (s->mb_y)
4459  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4460 
4461  s->first_slice_line = 0;
4462  }
4463  if (v->s.loop_filter)
4464  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4465 
4466  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4467  * profile, these only differ are when decoding MSS2 rectangles. */
4468  ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4469 }
4470 
4474 {
4475  int k;
4476  MpegEncContext *s = &v->s;
4477  int cbp, val;
4478  uint8_t *coded_val;
4479  int mb_pos;
4480  int mquant = v->pq;
4481  int mqdiff;
4482  GetBitContext *gb = &s->gb;
4483 
4484  /* select codingmode used for VLC tables selection */
4485  switch (v->y_ac_table_index) {
4486  case 0:
4488  break;
4489  case 1:
4491  break;
4492  case 2:
4494  break;
4495  }
4496 
4497  switch (v->c_ac_table_index) {
4498  case 0:
4500  break;
4501  case 1:
4503  break;
4504  case 2:
4506  break;
4507  }
4508 
4509  // do frame decode
4510  s->mb_x = s->mb_y = 0;
4511  s->mb_intra = 1;
4512  s->first_slice_line = 1;
4513  s->mb_y = s->start_mb_y;
4514  if (s->start_mb_y) {
4515  s->mb_x = 0;
4516  init_block_index(v);
4517  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4518  (1 + s->b8_stride) * sizeof(*s->coded_block));
4519  }
4520  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4521  s->mb_x = 0;
4522  init_block_index(v);
4523  for (;s->mb_x < s->mb_width; s->mb_x++) {
4524  int16_t (*block)[64] = v->block[v->cur_blk_idx];
4526  s->dsp.clear_blocks(block[0]);
4527  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4528  s->current_picture.f.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4529  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4530  s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4531 
4532  // do actual MB decoding and displaying
4533  if (v->fieldtx_is_raw)
4534  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4536  if ( v->acpred_is_raw)
4537  v->s.ac_pred = get_bits1(&v->s.gb);
4538  else
4539  v->s.ac_pred = v->acpred_plane[mb_pos];
4540 
4541  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4542  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4543 
4544  GET_MQUANT();
4545 
4546  s->current_picture.f.qscale_table[mb_pos] = mquant;
4547  /* Set DC scale - y and c use the same */
4548  s->y_dc_scale = s->y_dc_scale_table[mquant];
4549  s->c_dc_scale = s->c_dc_scale_table[mquant];
4550 
4551  for (k = 0; k < 6; k++) {
4552  val = ((cbp >> (5 - k)) & 1);
4553 
4554  if (k < 4) {
4555  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4556  val = val ^ pred;
4557  *coded_val = val;
4558  }
4559  cbp |= val << (5 - k);
4560 
4561  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4562  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4563 
4564  vc1_decode_i_block_adv(v, block[k], k, val,
4565  (k < 4) ? v->codingset : v->codingset2, mquant);
4566 
4567  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4568  continue;
4570  }
4571 
4575 
4576  if (get_bits_count(&s->gb) > v->bits) {
4577  // TODO: may need modification to handle slice coding
4578  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4579  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4580  get_bits_count(&s->gb), v->bits);
4581  return;
4582  }
4583  }
4584  if (!v->s.loop_filter)
4585  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4586  else if (s->mb_y)
4587  ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
4588  s->first_slice_line = 0;
4589  }
4590 
4591  /* raw bottom MB row */
4592  s->mb_x = 0;
4593  init_block_index(v);
4594 
4595  for (;s->mb_x < s->mb_width; s->mb_x++) {
4598  if (v->s.loop_filter)
4600  }
4601  if (v->s.loop_filter)
4602  ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
4603  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4604  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4605 }
4606 
4608 {
4609  MpegEncContext *s = &v->s;
4610  int apply_loop_filter;
4611 
4612  /* select codingmode used for VLC tables selection */
4613  switch (v->c_ac_table_index) {
4614  case 0:
4616  break;
4617  case 1:
4619  break;
4620  case 2:
4622  break;
4623  }
4624 
4625  switch (v->c_ac_table_index) {
4626  case 0:
4628  break;
4629  case 1:
4631  break;
4632  case 2:
4634  break;
4635  }
4636 
4637  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY);
4638  s->first_slice_line = 1;
4639  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
4640  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4641  s->mb_x = 0;
4642  init_block_index(v);
4643  for (; s->mb_x < s->mb_width; s->mb_x++) {
4645 
4646  if (v->fcm == ILACE_FIELD)
4648  else if (v->fcm == ILACE_FRAME)
4650  else vc1_decode_p_mb(v);
4651  if (s->mb_y != s->start_mb_y && apply_loop_filter && v->fcm == PROGRESSIVE)
4653  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4654  // TODO: may need modification to handle slice coding
4655  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4656  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4657  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4658  return;
4659  }
4660  }
4661  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
4662  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
4663  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4664  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
4665  if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4666  s->first_slice_line = 0;
4667  }
4668  if (apply_loop_filter && v->fcm == PROGRESSIVE) {
4669  s->mb_x = 0;
4670  init_block_index(v);
4671  for (; s->mb_x < s->mb_width; s->mb_x++) {
4674  }
4675  }
4676  if (s->end_mb_y >= s->start_mb_y)
4677  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4678  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4679  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4680 }
4681 
4683 {
4684  MpegEncContext *s = &v->s;
4685 
4686  /* select codingmode used for VLC tables selection */
4687  switch (v->c_ac_table_index) {
4688  case 0:
4690  break;
4691  case 1:
4693  break;
4694  case 2:
4696  break;
4697  }
4698 
4699  switch (v->c_ac_table_index) {
4700  case 0:
4702  break;
4703  case 1:
4705  break;
4706  case 2:
4708  break;
4709  }
4710 
4711  s->first_slice_line = 1;
4712  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4713  s->mb_x = 0;
4714  init_block_index(v);
4715  for (; s->mb_x < s->mb_width; s->mb_x++) {
4717 
4718  if (v->fcm == ILACE_FIELD)
4720  else
4721  vc1_decode_b_mb(v);
4722  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
4723  // TODO: may need modification to handle slice coding
4724  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4725  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
4726  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
4727  return;
4728  }
4729  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4730  }
4731  if (!v->s.loop_filter)
4732  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4733  else if (s->mb_y)
4734  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4735  s->first_slice_line = 0;
4736  }
4737  if (v->s.loop_filter)
4738  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4739  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
4740  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
4741 }
4742 
4744 {
4745  MpegEncContext *s = &v->s;
4746 
4747  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
4748  s->first_slice_line = 1;
4749  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
4750  s->mb_x = 0;
4751  init_block_index(v);
4753  if (s->last_picture.f.data[0]) {
4754  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
4755  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4756  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
4757  }
4758  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4759  s->first_slice_line = 0;
4760  }
4762 }
4763 
4765 {
4766 
4767  v->s.esc3_level_length = 0;
4768  if (v->x8_type) {
4769  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
4770  } else {
4771  v->cur_blk_idx = 0;
4772  v->left_blk_idx = -1;
4773  v->topleft_blk_idx = 1;
4774  v->top_blk_idx = 2;
4775  switch (v->s.pict_type) {
4776  case AV_PICTURE_TYPE_I:
4777  if (v->profile == PROFILE_ADVANCED)
4779  else
4781  break;
4782  case AV_PICTURE_TYPE_P:
4783  if (v->p_frame_skipped)
4785  else
4787  break;
4788  case AV_PICTURE_TYPE_B:
4789  if (v->bi_type) {
4790  if (v->profile == PROFILE_ADVANCED)
4792  else
4794  } else
4796  break;
4797  }
4798  }
4799 }
4800 
4801 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
4802 
4803 typedef struct {
4815  int coefs[2][7];
4816 
4817  int effect_type, effect_flag;
4818  int effect_pcount1, effect_pcount2;
4819  int effect_params1[15], effect_params2[10];
4820 } SpriteData;
4821 
4822 static inline int get_fp_val(GetBitContext* gb)
4823 {
4824  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
4825 }
4826 
4827 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
4828 {
4829  c[1] = c[3] = 0;
4830 
4831  switch (get_bits(gb, 2)) {
4832  case 0:
4833  c[0] = 1 << 16;
4834  c[2] = get_fp_val(gb);
4835  c[4] = 1 << 16;
4836  break;
4837  case 1:
4838  c[0] = c[4] = get_fp_val(gb);
4839  c[2] = get_fp_val(gb);
4840  break;
4841  case 2:
4842  c[0] = get_fp_val(gb);
4843  c[2] = get_fp_val(gb);
4844  c[4] = get_fp_val(gb);
4845  break;
4846  case 3:
4847  c[0] = get_fp_val(gb);
4848  c[1] = get_fp_val(gb);
4849  c[2] = get_fp_val(gb);
4850  c[3] = get_fp_val(gb);
4851  c[4] = get_fp_val(gb);
4852  break;
4853  }
4854  c[5] = get_fp_val(gb);
4855  if (get_bits1(gb))
4856  c[6] = get_fp_val(gb);
4857  else
4858  c[6] = 1 << 16;
4859 }
4860 
4861 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
4862 {
4863  AVCodecContext *avctx = v->s.avctx;
4864  int sprite, i;
4865 
4866  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4867  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
4868  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
4869  av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
4870  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
4871  for (i = 0; i < 7; i++)
4872  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
4873  sd->coefs[sprite][i] / (1<<16),
4874  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
4875  av_log(avctx, AV_LOG_DEBUG, "\n");
4876  }
4877 
4878  skip_bits(gb, 2);
4879  if (sd->effect_type = get_bits_long(gb, 30)) {
4880  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
4881  case 7:
4882  vc1_sprite_parse_transform(gb, sd->effect_params1);
4883  break;
4884  case 14:
4885  vc1_sprite_parse_transform(gb, sd->effect_params1);
4886  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
4887  break;
4888  default:
4889  for (i = 0; i < sd->effect_pcount1; i++)
4890  sd->effect_params1[i] = get_fp_val(gb);
4891  }
4892  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
4893  // effect 13 is simple alpha blending and matches the opacity above
4894  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
4895  for (i = 0; i < sd->effect_pcount1; i++)
4896  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4897  sd->effect_params1[i] / (1 << 16),
4898  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
4899  av_log(avctx, AV_LOG_DEBUG, "\n");
4900  }
4901 
4902  sd->effect_pcount2 = get_bits(gb, 16);
4903  if (sd->effect_pcount2 > 10) {
4904  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
4905  return;
4906  } else if (sd->effect_pcount2) {
4907  i = -1;
4908  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
4909  while (++i < sd->effect_pcount2) {
4910  sd->effect_params2[i] = get_fp_val(gb);
4911  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
4912  sd->effect_params2[i] / (1 << 16),
4913  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
4914  }
4915  av_log(avctx, AV_LOG_DEBUG, "\n");
4916  }
4917  }
4918  if (sd->effect_flag = get_bits1(gb))
4919  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
4920 
4921  if (get_bits_count(gb) >= gb->size_in_bits +
4922  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
4923  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
4924  if (get_bits_count(gb) < gb->size_in_bits - 8)
4925  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
4926 }
4927 
4928 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
4929 {
4930  int i, plane, row, sprite;
4931  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
4932  uint8_t* src_h[2][2];
4933  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
4934  int ysub[2];
4935  MpegEncContext *s = &v->s;
4936 
4937  for (i = 0; i < 2; i++) {
4938  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
4939  xadv[i] = sd->coefs[i][0];
4940  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
4941  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
4942 
4943  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
4944  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
4945  }
4946  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
4947 
4948  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
4949  int width = v->output_width>>!!plane;
4950 
4951  for (row = 0; row < v->output_height>>!!plane; row++) {
4952  uint8_t *dst = v->sprite_output_frame.data[plane] +
4953  v->sprite_output_frame.linesize[plane] * row;
4954 
4955  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
4956  uint8_t *iplane = s->current_picture.f.data[plane];
4957  int iline = s->current_picture.f.linesize[plane];
4958  int ycoord = yoff[sprite] + yadv[sprite] * row;
4959  int yline = ycoord >> 16;
4960  int next_line;
4961  ysub[sprite] = ycoord & 0xFFFF;
4962  if (sprite) {
4963  iplane = s->last_picture.f.data[plane];
4964  iline = s->last_picture.f.linesize[plane];
4965  }
4966  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
4967  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
4968  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
4969  if (ysub[sprite])
4970  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
4971  } else {
4972  if (sr_cache[sprite][0] != yline) {
4973  if (sr_cache[sprite][1] == yline) {
4974  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
4975  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
4976  } else {
4977  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
4978  sr_cache[sprite][0] = yline;
4979  }
4980  }
4981  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
4982  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
4983  iplane + next_line, xoff[sprite],
4984  xadv[sprite], width);
4985  sr_cache[sprite][1] = yline + 1;
4986  }
4987  src_h[sprite][0] = v->sr_rows[sprite][0];
4988  src_h[sprite][1] = v->sr_rows[sprite][1];
4989  }
4990  }
4991 
4992  if (!v->two_sprites) {
4993  if (ysub[0]) {
4994  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
4995  } else {
4996  memcpy(dst, src_h[0][0], width);
4997  }
4998  } else {
4999  if (ysub[0] && ysub[1]) {
5000  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5001  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5002  } else if (ysub[0]) {
5003  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5004  src_h[1][0], alpha, width);
5005  } else if (ysub[1]) {
5006  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5007  src_h[0][0], (1<<16)-1-alpha, width);
5008  } else {
5009  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5010  }
5011  }
5012  }
5013 
5014  if (!plane) {
5015  for (i = 0; i < 2; i++) {
5016  xoff[i] >>= 1;
5017  yoff[i] >>= 1;
5018  }
5019  }
5020 
5021  }
5022 }
5023 
5024 
5025 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5026 {
5027  MpegEncContext *s = &v->s;
5028  AVCodecContext *avctx = s->avctx;
5029  SpriteData sd;
5030 
5031  vc1_parse_sprites(v, gb, &sd);
5032 
5033  if (!s->current_picture.f.data[0]) {
5034  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5035  return -1;
5036  }
5037 
5038  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5039  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5040  v->two_sprites = 0;
5041  }
5042 
5043  if (v->sprite_output_frame.data[0])
5044  avctx->release_buffer(avctx, &v->sprite_output_frame);
5045 
5048  if (ff_get_buffer(avctx, &v->sprite_output_frame) < 0) {
5049  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
5050  return -1;
5051  }
5052 
5053  vc1_draw_sprites(v, &sd);
5054 
5055  return 0;
5056 }
5057 
5058 static void vc1_sprite_flush(AVCodecContext *avctx)
5059 {
5060  VC1Context *v = avctx->priv_data;
5061  MpegEncContext *s = &v->s;
5062  AVFrame *f = &s->current_picture.f;
5063  int plane, i;
5064 
5065  /* Windows Media Image codecs have a convergence interval of two keyframes.
5066  Since we can't enforce it, clear to black the missing sprite. This is
5067  wrong but it looks better than doing nothing. */
5068 
5069  if (f->data[0])
5070  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5071  for (i = 0; i < v->sprite_height>>!!plane; i++)
5072  memset(f->data[plane] + i * f->linesize[plane],
5073  plane ? 128 : 0, f->linesize[plane]);
5074 }
5075 
5076 #endif
5077 
5079 {
5080  MpegEncContext *s = &v->s;
5081  int i;
5082 
5083  /* Allocate mb bitplanes */
5088  v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5090 
5091  v->n_allocated_blks = s->mb_width + 2;
5092  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5093  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5094  v->cbp = v->cbp_base + s->mb_stride;
5095  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5096  v->ttblk = v->ttblk_base + s->mb_stride;
5097  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5098  v->is_intra = v->is_intra_base + s->mb_stride;
5099  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5100  v->luma_mv = v->luma_mv_base + s->mb_stride;
5101 
5102  /* allocate block type info in that way so it could be used with s->block_index[] */
5103  v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5104  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5105  v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5106  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5107 
5108  /* allocate memory to store block level MV info */
5109  v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5110  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5111  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5112  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5113  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5114  v->mv_f_last_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5115  v->mv_f_last[0] = v->mv_f_last_base + s->b8_stride + 1;
5116  v->mv_f_last[1] = v->mv_f_last[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5117  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5118  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5119  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5120 
5121  /* Init coded blocks info */
5122  if (v->profile == PROFILE_ADVANCED) {
5123 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5124 // return -1;
5125 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5126 // return -1;
5127  }
5128 
5129  ff_intrax8_common_init(&v->x8,s);
5130 
5132  for (i = 0; i < 4; i++)
5133  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width))) return -1;
5134  }
5135 
5136  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5137  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5138  !v->mb_type_base)
5139  return -1;
5140 
5141  return 0;
5142 }
5143 
5145 {
5146  int i;
5147  for (i = 0; i < 64; i++) {
5148 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5149  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5150  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5151  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5152  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5154  }
5155  v->left_blk_sh = 0;
5156  v->top_blk_sh = 3;
5157 }
5158 
5164 {
5165  VC1Context *v = avctx->priv_data;
5166  MpegEncContext *s = &v->s;
5167  GetBitContext gb;
5168 
5169  /* save the container output size for WMImage */
5170  v->output_width = avctx->width;
5171  v->output_height = avctx->height;
5172 
5173  if (!avctx->extradata_size || !avctx->extradata)
5174  return -1;
5175  if (!(avctx->flags & CODEC_FLAG_GRAY))
5176  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5177  else
5178  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5179  avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5180  v->s.avctx = avctx;
5181  avctx->flags |= CODEC_FLAG_EMU_EDGE;
5182  v->s.flags |= CODEC_FLAG_EMU_EDGE;
5183 
5184  if (ff_vc1_init_common(v) < 0)
5185  return -1;
5186  // ensure static VLC tables are initialized
5187  if (ff_msmpeg4_decode_init(avctx) < 0)
5188  return -1;
5190  return -1;
5191  // Hack to ensure the above functions will be called
5192  // again once we know all necessary settings.
5193  // That this is necessary might indicate a bug.
5194  ff_vc1_decode_end(avctx);
5195 
5197  ff_vc1dsp_init(&v->vc1dsp);
5198 
5199  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5200  int count = 0;
5201 
5202  // looks like WMV3 has a sequence header stored in the extradata
5203  // advanced sequence header may be before the first frame
5204  // the last byte of the extradata is a version number, 1 for the
5205  // samples we can decode
5206 
5207  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5208 
5209  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0)
5210  return -1;
5211 
5212  count = avctx->extradata_size*8 - get_bits_count(&gb);
5213  if (count > 0) {
5214  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5215  count, get_bits(&gb, count));
5216  } else if (count < 0) {
5217  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5218  }
5219  } else { // VC1/WVC1/WVP2
5220  const uint8_t *start = avctx->extradata;
5221  uint8_t *end = avctx->extradata + avctx->extradata_size;
5222  const uint8_t *next;
5223  int size, buf2_size;
5224  uint8_t *buf2 = NULL;
5225  int seq_initialized = 0, ep_initialized = 0;
5226 
5227  if (avctx->extradata_size < 16) {
5228  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5229  return -1;
5230  }
5231 
5233  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5234  next = start;
5235  for (; next < end; start = next) {
5236  next = find_next_marker(start + 4, end);
5237  size = next - start - 4;
5238  if (size <= 0)
5239  continue;
5240  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5241  init_get_bits(&gb, buf2, buf2_size * 8);
5242  switch (AV_RB32(start)) {
5243  case VC1_CODE_SEQHDR:
5244  if (ff_vc1_decode_sequence_header(avctx, v, &gb) < 0) {
5245  av_free(buf2);
5246  return -1;
5247  }
5248  seq_initialized = 1;
5249  break;
5250  case VC1_CODE_ENTRYPOINT:
5251  if (ff_vc1_decode_entry_point(avctx, v, &gb) < 0) {
5252  av_free(buf2);
5253  return -1;
5254  }
5255  ep_initialized = 1;
5256  break;
5257  }
5258  }
5259  av_free(buf2);
5260  if (!seq_initialized || !ep_initialized) {
5261  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5262  return -1;
5263  }
5264  v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5265  }
5266 
5267  avctx->profile = v->profile;
5268  if (v->profile == PROFILE_ADVANCED)
5269  avctx->level = v->level;
5270 
5271  avctx->has_b_frames = !!avctx->max_b_frames;
5272 
5273  s->mb_width = (avctx->coded_width + 15) >> 4;
5274  s->mb_height = (avctx->coded_height + 15) >> 4;
5275 
5276  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5278  } else {
5279  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5280  v->left_blk_sh = 3;
5281  v->top_blk_sh = 0;
5282  }
5283 
5284  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5285  v->sprite_width = avctx->coded_width;
5286  v->sprite_height = avctx->coded_height;
5287 
5288  avctx->coded_width = avctx->width = v->output_width;
5289  avctx->coded_height = avctx->height = v->output_height;
5290 
5291  // prevent 16.16 overflows
5292  if (v->sprite_width > 1 << 14 ||
5293  v->sprite_height > 1 << 14 ||
5294  v->output_width > 1 << 14 ||
5295  v->output_height > 1 << 14) return -1;
5296 
5297  if ((v->sprite_width&1) || (v->sprite_height&1)) {
5298  av_log_ask_for_sample(avctx, "odd sprites are not supported\n");
5299  return AVERROR_PATCHWELCOME;
5300  }
5301  }
5302  return 0;
5303 }
5304 
5309 {
5310  VC1Context *v = avctx->priv_data;
5311  int i;
5312 
5313  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5314  && v->sprite_output_frame.data[0])
5315  avctx->release_buffer(avctx, &v->sprite_output_frame);
5316  for (i = 0; i < 4; i++)
5317  av_freep(&v->sr_rows[i >> 1][i & 1]);
5318  av_freep(&v->hrd_rate);
5319  av_freep(&v->hrd_buffer);
5320  ff_MPV_common_end(&v->s);
5324  av_freep(&v->fieldtx_plane);
5325  av_freep(&v->acpred_plane);
5327  av_freep(&v->mb_type_base);
5329  av_freep(&v->mv_f_base);
5330  av_freep(&v->mv_f_last_base);
5331  av_freep(&v->mv_f_next_base);
5332  av_freep(&v->block);
5333  av_freep(&v->cbp_base);
5334  av_freep(&v->ttblk_base);
5335  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5336  av_freep(&v->luma_mv_base);
5338  return 0;
5339 }
5340 
5341 
5345 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5346  int *got_frame, AVPacket *avpkt)
5347 {
5348  const uint8_t *buf = avpkt->data;
5349  int buf_size = avpkt->size, n_slices = 0, i;
5350  VC1Context *v = avctx->priv_data;
5351  MpegEncContext *s = &v->s;
5352  AVFrame *pict = data;
5353  uint8_t *buf2 = NULL;
5354  const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5355  int mb_height, n_slices1=-1;
5356  struct {
5357  uint8_t *buf;
5358  GetBitContext gb;
5359  int mby_start;
5360  } *slices = NULL, *tmp;
5361 
5362  v->second_field = 0;
5363 
5364  if(s->flags & CODEC_FLAG_LOW_DELAY)
5365  s->low_delay = 1;
5366 
5367  /* no supplementary picture */
5368  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5369  /* special case for last picture */
5370  if (s->low_delay == 0 && s->next_picture_ptr) {
5371  *pict = s->next_picture_ptr->f;
5372  s->next_picture_ptr = NULL;
5373 
5374  *got_frame = 1;
5375  }
5376 
5377  return buf_size;
5378  }
5379 
5381  if (v->profile < PROFILE_ADVANCED)
5382  avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5383  else
5384  avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5385  }
5386 
5387  //for advanced profile we may need to parse and unescape data
5388  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5389  int buf_size2 = 0;
5390  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5391 
5392  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5393  const uint8_t *start, *end, *next;
5394  int size;
5395 
5396  next = buf;
5397  for (start = buf, end = buf + buf_size; next < end; start = next) {
5398  next = find_next_marker(start + 4, end);
5399  size = next - start - 4;
5400  if (size <= 0) continue;
5401  switch (AV_RB32(start)) {
5402  case VC1_CODE_FRAME:
5403  if (avctx->hwaccel ||
5405  buf_start = start;
5406  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5407  break;
5408  case VC1_CODE_FIELD: {
5409  int buf_size3;
5410  if (avctx->hwaccel ||
5412  buf_start_second_field = start;
5413  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5414  if (!tmp)
5415  goto err;
5416  slices = tmp;
5417  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5418  if (!slices[n_slices].buf)
5419  goto err;
5420  buf_size3 = vc1_unescape_buffer(start + 4, size,
5421  slices[n_slices].buf);
5422  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5423  buf_size3 << 3);
5424  /* assuming that the field marker is at the exact middle,
5425  hope it's correct */
5426  slices[n_slices].mby_start = s->mb_height >> 1;
5427  n_slices1 = n_slices - 1; // index of the last slice of the first field
5428  n_slices++;
5429  break;
5430  }
5431  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5432  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5433  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5434  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5435  break;
5436  case VC1_CODE_SLICE: {
5437  int buf_size3;
5438  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5439  if (!tmp)
5440  goto err;
5441  slices = tmp;
5442  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5443  if (!slices[n_slices].buf)
5444  goto err;
5445  buf_size3 = vc1_unescape_buffer(start + 4, size,
5446  slices[n_slices].buf);
5447  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5448  buf_size3 << 3);
5449  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5450  n_slices++;
5451  break;
5452  }
5453  }
5454  }
5455  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5456  const uint8_t *divider;
5457  int buf_size3;
5458 
5459  divider = find_next_marker(buf, buf + buf_size);
5460  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5461  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5462  goto err;
5463  } else { // found field marker, unescape second field
5464  if (avctx->hwaccel ||
5466  buf_start_second_field = divider;
5467  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5468  if (!tmp)
5469  goto err;
5470  slices = tmp;
5471  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5472  if (!slices[n_slices].buf)
5473  goto err;
5474  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5475  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5476  buf_size3 << 3);
5477  slices[n_slices].mby_start = s->mb_height >> 1;
5478  n_slices1 = n_slices - 1;
5479  n_slices++;
5480  }
5481  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5482  } else {
5483  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5484  }
5485  init_get_bits(&s->gb, buf2, buf_size2*8);
5486  } else
5487  init_get_bits(&s->gb, buf, buf_size*8);
5488 
5489  if (v->res_sprite) {
5490  v->new_sprite = !get_bits1(&s->gb);
5491  v->two_sprites = get_bits1(&s->gb);
5492  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5493  we're using the sprite compositor. These are intentionally kept separate
5494  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5495  the vc1 one for WVP2 */
5496  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5497  if (v->new_sprite) {
5498  // switch AVCodecContext parameters to those of the sprites
5499  avctx->width = avctx->coded_width = v->sprite_width;
5500  avctx->height = avctx->coded_height = v->sprite_height;
5501  } else {
5502  goto image;
5503  }
5504  }
5505  }
5506 
5507  if (s->context_initialized &&
5508  (s->width != avctx->coded_width ||
5509  s->height != avctx->coded_height)) {
5510  ff_vc1_decode_end(avctx);
5511  }
5512 
5513  if (!s->context_initialized) {
5515  goto err;
5516 
5517  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5518 
5519  if (v->profile == PROFILE_ADVANCED) {
5520  if(avctx->coded_width<=1 || avctx->coded_height<=1)
5521  goto err;
5522  s->h_edge_pos = avctx->coded_width;
5523  s->v_edge_pos = avctx->coded_height;
5524  }
5525  }
5526 
5527  /* We need to set current_picture_ptr before reading the header,
5528  * otherwise we cannot store anything in there. */
5529  if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5530  int i = ff_find_unused_picture(s, 0);
5531  if (i < 0)
5532  goto err;
5533  s->current_picture_ptr = &s->picture[i];
5534  }
5535 
5536  // do parse frame header
5537  v->pic_header_flag = 0;
5538  v->first_pic_header_flag = 1;
5539  if (v->profile < PROFILE_ADVANCED) {
5540  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5541  goto err;
5542  }
5543  } else {
5544  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5545  goto err;
5546  }
5547  }
5548  v->first_pic_header_flag = 0;
5549 
5550  if (avctx->debug & FF_DEBUG_PICT_INFO)
5551  av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
5552 
5553  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5554  && s->pict_type != AV_PICTURE_TYPE_I) {
5555  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5556  goto err;
5557  }
5558 
5559  if ((s->mb_height >> v->field_mode) == 0) {
5560  av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
5561  goto err;
5562  }
5563 
5564  // process pulldown flags
5566  // Pulldown flags are only valid when 'broadcast' has been set.
5567  // So ticks_per_frame will be 2
5568  if (v->rff) {
5569  // repeat field
5571  } else if (v->rptfrm) {
5572  // repeat frames
5573  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
5574  }
5575 
5576  // for skipping the frame
5579 
5580  /* skip B-frames if we don't have reference frames */
5581  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
5582  goto err;
5583  }
5584  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
5585  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
5586  avctx->skip_frame >= AVDISCARD_ALL) {
5587  goto end;
5588  }
5589 
5590  if (s->next_p_frame_damaged) {
5591  if (s->pict_type == AV_PICTURE_TYPE_B)
5592  goto end;
5593  else
5594  s->next_p_frame_damaged = 0;
5595  }
5596 
5597  if (ff_MPV_frame_start(s, avctx) < 0) {
5598  goto err;
5599  }
5600 
5603 
5606 
5609  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
5610  else if (avctx->hwaccel) {
5611  if (v->field_mode && buf_start_second_field) {
5612  // decode first field
5614  if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
5615  goto err;
5616  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
5617  goto err;
5618  if (avctx->hwaccel->end_frame(avctx) < 0)
5619  goto err;
5620 
5621  // decode second field
5622  s->gb = slices[n_slices1 + 1].gb;
5624  v->second_field = 1;
5625  v->pic_header_flag = 0;
5626  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5627  av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
5628  goto err;
5629  }
5631 
5632  if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
5633  goto err;
5634  if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
5635  goto err;
5636  if (avctx->hwaccel->end_frame(avctx) < 0)
5637  goto err;
5638  } else {
5640  if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5641  goto err;
5642  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
5643  goto err;
5644  if (avctx->hwaccel->end_frame(avctx) < 0)
5645  goto err;
5646  }
5647  } else {
5648  int header_ret = 0;
5649 
5650  if (v->fcm == ILACE_FRAME && s->pict_type == AV_PICTURE_TYPE_B)
5651  goto err; // This codepath is still incomplete thus it is disabled
5652 
5654 
5655  v->bits = buf_size * 8;
5656  v->end_mb_x = s->mb_width;
5657  if (v->field_mode) {
5658  uint8_t *tmp[2];
5659  s->current_picture.f.linesize[0] <<= 1;
5660  s->current_picture.f.linesize[1] <<= 1;
5661  s->current_picture.f.linesize[2] <<= 1;
5662  s->linesize <<= 1;
5663  s->uvlinesize <<= 1;
5664  tmp[0] = v->mv_f_last[0];
5665  tmp[1] = v->mv_f_last[1];
5666  v->mv_f_last[0] = v->mv_f_next[0];
5667  v->mv_f_last[1] = v->mv_f_next[1];
5668  v->mv_f_next[0] = v->mv_f[0];
5669  v->mv_f_next[1] = v->mv_f[1];
5670  v->mv_f[0] = tmp[0];
5671  v->mv_f[1] = tmp[1];
5672  }
5673  mb_height = s->mb_height >> v->field_mode;
5674  for (i = 0; i <= n_slices; i++) {
5675  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
5676  if (v->field_mode <= 0) {
5677  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
5678  "picture boundary (%d >= %d)\n", i,
5679  slices[i - 1].mby_start, mb_height);
5680  continue;
5681  }
5682  v->second_field = 1;
5683  v->blocks_off = s->b8_stride * (s->mb_height&~1);
5684  v->mb_off = s->mb_stride * s->mb_height >> 1;
5685  } else {
5686  v->second_field = 0;
5687  v->blocks_off = 0;
5688  v->mb_off = 0;
5689  }
5690  if (i) {
5691  v->pic_header_flag = 0;
5692  if (v->field_mode && i == n_slices1 + 2) {
5693  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
5694  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
5695  continue;
5696  }
5697  } else if (get_bits1(&s->gb)) {
5698  v->pic_header_flag = 1;
5699  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
5700  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
5701  continue;
5702  }
5703  }
5704  }
5705  if (header_ret < 0)
5706  continue;
5707  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
5708  if (!v->field_mode || v->second_field)
5709  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5710  else {
5711  if (i >= n_slices) {
5712  av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
5713  continue;
5714  }
5715  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
5716  }
5717  if (s->end_mb_y <= s->start_mb_y) {
5718  av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
5719  continue;
5720  }
5721  if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
5722  av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
5723  continue;
5724  }
5726  if (i != n_slices)
5727  s->gb = slices[i].gb;
5728  }
5729  if (v->field_mode) {
5730  v->second_field = 0;
5731  if (s->pict_type == AV_PICTURE_TYPE_B) {
5732  memcpy(v->mv_f_base, v->mv_f_next_base,
5733  2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5734  }
5735  s->current_picture.f.linesize[0] >>= 1;
5736  s->current_picture.f.linesize[1] >>= 1;
5737  s->current_picture.f.linesize[2] >>= 1;
5738  s->linesize >>= 1;
5739  s->uvlinesize >>= 1;
5740  }
5741  av_dlog(s->avctx, "Consumed %i/%i bits\n",
5742  get_bits_count(&s->gb), s->gb.size_in_bits);
5743 // if (get_bits_count(&s->gb) > buf_size * 8)
5744 // return -1;
5746  goto err;
5747  if(!v->field_mode)
5748  ff_er_frame_end(&s->er);
5749  }
5750 
5751  ff_MPV_frame_end(s);
5752 
5753  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5754 image:
5755  avctx->width = avctx->coded_width = v->output_width;
5756  avctx->height = avctx->coded_height = v->output_height;
5757  if (avctx->skip_frame >= AVDISCARD_NONREF)
5758  goto end;
5759 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5760  if (vc1_decode_sprites(v, &s->gb))
5761  goto err;
5762 #endif
5763  *pict = v->sprite_output_frame;
5764  *got_frame = 1;
5765  } else {
5766  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
5767  *pict = s->current_picture_ptr->f;
5768  } else if (s->last_picture_ptr != NULL) {
5769  *pict = s->last_picture_ptr->f;
5770  }
5771  if (s->last_picture_ptr || s->low_delay) {
5772  *got_frame = 1;
5773  ff_print_debug_info(s, pict);
5774  }
5775  }
5776 
5777 end:
5778  av_free(buf2);
5779  for (i = 0; i < n_slices; i++)
5780  av_free(slices[i].buf);
5781  av_free(slices);
5782  return buf_size;
5783 
5784 err:
5785  av_free(buf2);
5786  for (i = 0; i < n_slices; i++)
5787  av_free(slices[i].buf);
5788  av_free(slices);
5789  return -1;
5790 }
5791 
5792 
5793 static const AVProfile profiles[] = {
5794  { FF_PROFILE_VC1_SIMPLE, "Simple" },
5795  { FF_PROFILE_VC1_MAIN, "Main" },
5796  { FF_PROFILE_VC1_COMPLEX, "Complex" },
5797  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
5798  { FF_PROFILE_UNKNOWN },
5799 };
5800 
5802 #if CONFIG_DXVA2
5804 #endif
5805 #if CONFIG_VAAPI
5807 #endif
5808 #if CONFIG_VDPAU
5810 #endif
5813 };
5814 
5816  .name = "vc1",
5817  .type = AVMEDIA_TYPE_VIDEO,
5818  .id = AV_CODEC_ID_VC1,
5819  .priv_data_size = sizeof(VC1Context),
5820  .init = vc1_decode_init,
5823  .flush = ff_mpeg_flush,
5824  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5825  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
5826  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
5827  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5828 };
5829 
5830 #if CONFIG_WMV3_DECODER
5831 AVCodec ff_wmv3_decoder = {
5832  .name = "wmv3",
5833  .type = AVMEDIA_TYPE_VIDEO,
5834  .id = AV_CODEC_ID_WMV3,
5835  .priv_data_size = sizeof(VC1Context),
5836  .init = vc1_decode_init,
5839  .flush = ff_mpeg_flush,
5840  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
5841  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
5842  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
5843  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5844 };
5845 #endif
5846 
5847 #if CONFIG_WMV3_VDPAU_DECODER
5848 AVCodec ff_wmv3_vdpau_decoder = {
5849  .name = "wmv3_vdpau",
5850  .type = AVMEDIA_TYPE_VIDEO,
5851  .id = AV_CODEC_ID_WMV3,
5852  .priv_data_size = sizeof(VC1Context),
5853  .init = vc1_decode_init,
5857  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
5858  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
5859  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5860 };
5861 #endif
5862 
5863 #if CONFIG_VC1_VDPAU_DECODER
5864 AVCodec ff_vc1_vdpau_decoder = {
5865  .name = "vc1_vdpau",
5866  .type = AVMEDIA_TYPE_VIDEO,
5867  .id = AV_CODEC_ID_VC1,
5868  .priv_data_size = sizeof(VC1Context),
5869  .init = vc1_decode_init,
5873  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
5874  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
5875  .profiles = NULL_IF_CONFIG_SMALL(profiles)
5876 };
5877 #endif
5878 
5879 #if CONFIG_WMV3IMAGE_DECODER
5880 AVCodec ff_wmv3image_decoder = {
5881  .name = "wmv3image",
5882  .type = AVMEDIA_TYPE_VIDEO,
5883  .id = AV_CODEC_ID_WMV3IMAGE,
5884  .priv_data_size = sizeof(VC1Context),
5885  .init = vc1_decode_init,
5888  .capabilities = CODEC_CAP_DR1,
5889  .flush = vc1_sprite_flush,
5890  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
5891  .pix_fmts = ff_pixfmt_list_420
5892 };
5893 #endif
5894 
5895 #if CONFIG_VC1IMAGE_DECODER
5896 AVCodec ff_vc1image_decoder = {
5897  .name = "vc1image",
5898  .type = AVMEDIA_TYPE_VIDEO,
5899  .id = AV_CODEC_ID_VC1IMAGE,
5900  .priv_data_size = sizeof(VC1Context),
5901  .init = vc1_decode_init,
5904  .capabilities = CODEC_CAP_DR1,
5905  .flush = vc1_sprite_flush,
5906  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
5907  .pix_fmts = ff_pixfmt_list_420
5908 };
5909 #endif