FFmpeg  4.3
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47 
48 #include "internal.h"
49 #include "avcodec.h"
50 #include "mpegutils.h"
51 #include "h264dec.h"
52 #include "h264data.h"
53 #include "golomb.h"
54 #include "hpeldsp.h"
55 #include "mathops.h"
56 #include "rectangle.h"
57 #include "tpeldsp.h"
58 
59 #if CONFIG_ZLIB
60 #include <zlib.h>
61 #endif
62 
63 #include "svq1.h"
64 
65 /**
66  * @file
67  * svq3 decoder.
68  */
69 
70 typedef struct SVQ3Frame {
72 
74  int16_t (*motion_val[2])[2];
75 
77  uint32_t *mb_type;
78 
79 
81  int8_t *ref_index[2];
82 } SVQ3Frame;
83 
84 typedef struct SVQ3Context {
86 
92 
103  uint32_t watermark_key;
105  int buf_size;
112  int qscale;
113  int cbp;
118 
122 
123  int mb_x, mb_y;
124  int mb_xy;
127  int b_stride;
128 
129  uint32_t *mb2br_xy;
130 
133 
136 
137  unsigned int top_samples_available;
140 
142 
143  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
144  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
145  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
146  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
148  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
149  int block_offset[2 * (16 * 3)];
150 } SVQ3Context;
151 
152 #define FULLPEL_MODE 1
153 #define HALFPEL_MODE 2
154 #define THIRDPEL_MODE 3
155 #define PREDICT_MODE 4
156 
157 /* dual scan (from some older H.264 draft)
158  * o-->o-->o o
159  * | /|
160  * o o o / o
161  * | / | |/ |
162  * o o o o
163  * /
164  * o-->o-->o-->o
165  */
166 static const uint8_t svq3_scan[16] = {
167  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
168  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
169  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
170  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
171 };
172 
173 static const uint8_t luma_dc_zigzag_scan[16] = {
174  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
175  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
176  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
177  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
178 };
179 
180 static const uint8_t svq3_pred_0[25][2] = {
181  { 0, 0 },
182  { 1, 0 }, { 0, 1 },
183  { 0, 2 }, { 1, 1 }, { 2, 0 },
184  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
185  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
186  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
187  { 2, 4 }, { 3, 3 }, { 4, 2 },
188  { 4, 3 }, { 3, 4 },
189  { 4, 4 }
190 };
191 
192 static const int8_t svq3_pred_1[6][6][5] = {
193  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
194  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
195  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
196  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
197  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
198  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
199  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
200  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
201  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
202  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
203  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
204  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
205 };
206 
207 static const struct {
210 } svq3_dct_tables[2][16] = {
211  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
212  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
213  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
214  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
215 };
216 
217 static const uint32_t svq3_dequant_coeff[32] = {
218  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
219  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
220  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
221  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
222 };
223 
224 static int svq3_decode_end(AVCodecContext *avctx);
225 
226 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
227 {
228  const unsigned qmul = svq3_dequant_coeff[qp];
229 #define stride 16
230  int i;
231  int temp[16];
232  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
233 
234  for (i = 0; i < 4; i++) {
235  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
236  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
237  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
238  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
239 
240  temp[4 * i + 0] = z0 + z3;
241  temp[4 * i + 1] = z1 + z2;
242  temp[4 * i + 2] = z1 - z2;
243  temp[4 * i + 3] = z0 - z3;
244  }
245 
246  for (i = 0; i < 4; i++) {
247  const int offset = x_offset[i];
248  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
249  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
250  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
251  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
252 
253  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
254  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
255  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
256  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
257  }
258 }
259 #undef stride
260 
261 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
262  int stride, int qp, int dc)
263 {
264  const int qmul = svq3_dequant_coeff[qp];
265  int i;
266 
267  if (dc) {
268  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
269  : qmul * (block[0] >> 3) / 2);
270  block[0] = 0;
271  }
272 
273  for (i = 0; i < 4; i++) {
274  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
275  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
276  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
277  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
278 
279  block[0 + 4 * i] = z0 + z3;
280  block[1 + 4 * i] = z1 + z2;
281  block[2 + 4 * i] = z1 - z2;
282  block[3 + 4 * i] = z0 - z3;
283  }
284 
285  for (i = 0; i < 4; i++) {
286  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
287  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
288  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
289  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
290  const int rr = (dc + 0x80000u);
291 
292  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
293  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
294  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
295  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
296  }
297 
298  memset(block, 0, 16 * sizeof(int16_t));
299 }
300 
301 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
302  int index, const int type)
303 {
304  static const uint8_t *const scan_patterns[4] = {
306  };
307 
308  int run, level, sign, limit;
309  unsigned vlc;
310  const int intra = 3 * type >> 2;
311  const uint8_t *const scan = scan_patterns[type];
312 
313  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
314  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
315  if ((int32_t)vlc < 0)
316  return -1;
317 
318  sign = (vlc & 1) ? 0 : -1;
319  vlc = vlc + 1 >> 1;
320 
321  if (type == 3) {
322  if (vlc < 3) {
323  run = 0;
324  level = vlc;
325  } else if (vlc < 4) {
326  run = 1;
327  level = 1;
328  } else {
329  run = vlc & 0x3;
330  level = (vlc + 9 >> 2) - run;
331  }
332  } else {
333  if (vlc < 16U) {
334  run = svq3_dct_tables[intra][vlc].run;
335  level = svq3_dct_tables[intra][vlc].level;
336  } else if (intra) {
337  run = vlc & 0x7;
338  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
339  } else {
340  run = vlc & 0xF;
341  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
342  }
343  }
344 
345 
346  if ((index += run) >= limit)
347  return -1;
348 
349  block[scan[index]] = (level ^ sign) - sign;
350  }
351 
352  if (type != 2) {
353  break;
354  }
355  }
356 
357  return 0;
358 }
359 
360 static av_always_inline int
361 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
362  int i, int list, int part_width)
363 {
364  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
365 
366  if (topright_ref != PART_NOT_AVAILABLE) {
367  *C = s->mv_cache[list][i - 8 + part_width];
368  return topright_ref;
369  } else {
370  *C = s->mv_cache[list][i - 8 - 1];
371  return s->ref_cache[list][i - 8 - 1];
372  }
373 }
374 
375 /**
376  * Get the predicted MV.
377  * @param n the block index
378  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
379  * @param mx the x component of the predicted motion vector
380  * @param my the y component of the predicted motion vector
381  */
382 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
383  int part_width, int list,
384  int ref, int *const mx, int *const my)
385 {
386  const int index8 = scan8[n];
387  const int top_ref = s->ref_cache[list][index8 - 8];
388  const int left_ref = s->ref_cache[list][index8 - 1];
389  const int16_t *const A = s->mv_cache[list][index8 - 1];
390  const int16_t *const B = s->mv_cache[list][index8 - 8];
391  const int16_t *C;
392  int diagonal_ref, match_count;
393 
394 /* mv_cache
395  * B . . A T T T T
396  * U . . L . . , .
397  * U . . L . . . .
398  * U . . L . . , .
399  * . . . L . . . .
400  */
401 
402  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
403  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
404  if (match_count > 1) { //most common
405  *mx = mid_pred(A[0], B[0], C[0]);
406  *my = mid_pred(A[1], B[1], C[1]);
407  } else if (match_count == 1) {
408  if (left_ref == ref) {
409  *mx = A[0];
410  *my = A[1];
411  } else if (top_ref == ref) {
412  *mx = B[0];
413  *my = B[1];
414  } else {
415  *mx = C[0];
416  *my = C[1];
417  }
418  } else {
419  if (top_ref == PART_NOT_AVAILABLE &&
420  diagonal_ref == PART_NOT_AVAILABLE &&
421  left_ref != PART_NOT_AVAILABLE) {
422  *mx = A[0];
423  *my = A[1];
424  } else {
425  *mx = mid_pred(A[0], B[0], C[0]);
426  *my = mid_pred(A[1], B[1], C[1]);
427  }
428  }
429 }
430 
431 static inline void svq3_mc_dir_part(SVQ3Context *s,
432  int x, int y, int width, int height,
433  int mx, int my, int dxy,
434  int thirdpel, int dir, int avg)
435 {
436  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
437  uint8_t *src, *dest;
438  int i, emu = 0;
439  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
440  int linesize = s->cur_pic->f->linesize[0];
441  int uvlinesize = s->cur_pic->f->linesize[1];
442 
443  mx += x;
444  my += y;
445 
446  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
447  my < 0 || my >= s->v_edge_pos - height - 1) {
448  emu = 1;
449  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
450  my = av_clip(my, -16, s->v_edge_pos - height + 15);
451  }
452 
453  /* form component predictions */
454  dest = s->cur_pic->f->data[0] + x + y * linesize;
455  src = pic->f->data[0] + mx + my * linesize;
456 
457  if (emu) {
458  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
459  linesize, linesize,
460  width + 1, height + 1,
461  mx, my, s->h_edge_pos, s->v_edge_pos);
462  src = s->edge_emu_buffer;
463  }
464  if (thirdpel)
465  (avg ? s->tdsp.avg_tpel_pixels_tab
466  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
467  width, height);
468  else
469  (avg ? s->hdsp.avg_pixels_tab
470  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
471  height);
472 
473  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
474  mx = mx + (mx < (int) x) >> 1;
475  my = my + (my < (int) y) >> 1;
476  width = width >> 1;
477  height = height >> 1;
478  blocksize++;
479 
480  for (i = 1; i < 3; i++) {
481  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
482  src = pic->f->data[i] + mx + my * uvlinesize;
483 
484  if (emu) {
485  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
486  uvlinesize, uvlinesize,
487  width + 1, height + 1,
488  mx, my, (s->h_edge_pos >> 1),
489  s->v_edge_pos >> 1);
490  src = s->edge_emu_buffer;
491  }
492  if (thirdpel)
493  (avg ? s->tdsp.avg_tpel_pixels_tab
494  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
495  uvlinesize,
496  width, height);
497  else
498  (avg ? s->hdsp.avg_pixels_tab
499  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
500  uvlinesize,
501  height);
502  }
503  }
504 }
505 
506 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
507  int dir, int avg)
508 {
509  int i, j, k, mx, my, dx, dy, x, y;
510  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
511  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
512  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
513  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
514  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
515 
516  for (i = 0; i < 16; i += part_height)
517  for (j = 0; j < 16; j += part_width) {
518  const int b_xy = (4 * s->mb_x + (j >> 2)) +
519  (4 * s->mb_y + (i >> 2)) * s->b_stride;
520  int dxy;
521  x = 16 * s->mb_x + j;
522  y = 16 * s->mb_y + i;
523  k = (j >> 2 & 1) + (i >> 1 & 2) +
524  (j >> 1 & 4) + (i & 8);
525 
526  if (mode != PREDICT_MODE) {
527  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
528  } else {
529  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
530  my = s->next_pic->motion_val[0][b_xy][1] * 2;
531 
532  if (dir == 0) {
533  mx = mx * s->frame_num_offset /
534  s->prev_frame_num_offset + 1 >> 1;
535  my = my * s->frame_num_offset /
536  s->prev_frame_num_offset + 1 >> 1;
537  } else {
538  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
539  s->prev_frame_num_offset + 1 >> 1;
540  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
541  s->prev_frame_num_offset + 1 >> 1;
542  }
543  }
544 
545  /* clip motion vector prediction to frame border */
546  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
547  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
548 
549  /* get (optional) motion vector differential */
550  if (mode == PREDICT_MODE) {
551  dx = dy = 0;
552  } else {
553  dy = get_interleaved_se_golomb(&s->gb_slice);
554  dx = get_interleaved_se_golomb(&s->gb_slice);
555 
556  if (dx != (int16_t)dx || dy != (int16_t)dy) {
557  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
558  return -1;
559  }
560  }
561 
562  /* compute motion vector */
563  if (mode == THIRDPEL_MODE) {
564  int fx, fy;
565  mx = (mx + 1 >> 1) + dx;
566  my = (my + 1 >> 1) + dy;
567  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
568  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
569  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
570 
571  svq3_mc_dir_part(s, x, y, part_width, part_height,
572  fx, fy, dxy, 1, dir, avg);
573  mx += mx;
574  my += my;
575  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
576  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
577  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
578  dxy = (mx & 1) + 2 * (my & 1);
579 
580  svq3_mc_dir_part(s, x, y, part_width, part_height,
581  mx >> 1, my >> 1, dxy, 0, dir, avg);
582  mx *= 3;
583  my *= 3;
584  } else {
585  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
586  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
587 
588  svq3_mc_dir_part(s, x, y, part_width, part_height,
589  mx, my, 0, 0, dir, avg);
590  mx *= 6;
591  my *= 6;
592  }
593 
594  /* update mv_cache */
595  if (mode != PREDICT_MODE) {
596  int32_t mv = pack16to32(mx, my);
597 
598  if (part_height == 8 && i < 8) {
599  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
600 
601  if (part_width == 8 && j < 8)
602  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
603  }
604  if (part_width == 8 && j < 8)
605  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
606  if (part_width == 4 || part_height == 4)
607  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
608  }
609 
610  /* write back motion vectors */
611  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
612  part_width >> 2, part_height >> 2, s->b_stride,
613  pack16to32(mx, my), 4);
614  }
615 
616  return 0;
617 }
618 
620  int mb_type, const int *block_offset,
621  int linesize, uint8_t *dest_y)
622 {
623  int i;
624  if (!IS_INTRA4x4(mb_type)) {
625  for (i = 0; i < 16; i++)
626  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
627  uint8_t *const ptr = dest_y + block_offset[i];
628  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
629  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
630  }
631  }
632 }
633 
635  int mb_type,
636  const int *block_offset,
637  int linesize,
638  uint8_t *dest_y)
639 {
640  int i;
641  int qscale = s->qscale;
642 
643  if (IS_INTRA4x4(mb_type)) {
644  for (i = 0; i < 16; i++) {
645  uint8_t *const ptr = dest_y + block_offset[i];
646  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
647 
648  uint8_t *topright;
649  int nnz, tr;
650  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
651  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
652  av_assert2(s->mb_y || linesize <= block_offset[i]);
653  if (!topright_avail) {
654  tr = ptr[3 - linesize] * 0x01010101u;
655  topright = (uint8_t *)&tr;
656  } else
657  topright = ptr + 4 - linesize;
658  } else
659  topright = NULL;
660 
661  s->hpc.pred4x4[dir](ptr, topright, linesize);
662  nnz = s->non_zero_count_cache[scan8[i]];
663  if (nnz) {
664  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
665  }
666  }
667  } else {
668  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
669  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
670  }
671 }
672 
674 {
675  const int mb_x = s->mb_x;
676  const int mb_y = s->mb_y;
677  const int mb_xy = s->mb_xy;
678  const int mb_type = s->cur_pic->mb_type[mb_xy];
679  uint8_t *dest_y, *dest_cb, *dest_cr;
680  int linesize, uvlinesize;
681  int i, j;
682  const int *block_offset = &s->block_offset[0];
683  const int block_h = 16 >> 1;
684 
685  linesize = s->cur_pic->f->linesize[0];
686  uvlinesize = s->cur_pic->f->linesize[1];
687 
688  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
689  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
690  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
691 
692  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
693  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
694 
695  if (IS_INTRA(mb_type)) {
696  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
697  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
698 
699  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
700  }
701 
702  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
703 
704  if (s->cbp & 0x30) {
705  uint8_t *dest[2] = { dest_cb, dest_cr };
706  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
707  s->dequant4_coeff[4][0]);
708  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
709  s->dequant4_coeff[4][0]);
710  for (j = 1; j < 3; j++) {
711  for (i = j * 16; i < j * 16 + 4; i++)
712  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
713  uint8_t *const ptr = dest[j - 1] + block_offset[i];
714  svq3_add_idct_c(ptr, s->mb + i * 16,
715  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
716  }
717  }
718  }
719 }
720 
721 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
722 {
723  int i, j, k, m, dir, mode;
724  int cbp = 0;
725  uint32_t vlc;
726  int8_t *top, *left;
727  const int mb_xy = s->mb_xy;
728  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
729 
730  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
731  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
732  s->topright_samples_available = 0xFFFF;
733 
734  if (mb_type == 0) { /* SKIP */
735  if (s->pict_type == AV_PICTURE_TYPE_P ||
736  s->next_pic->mb_type[mb_xy] == -1) {
737  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
738  0, 0, 0, 0, 0, 0);
739 
740  if (s->pict_type == AV_PICTURE_TYPE_B)
741  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
742  0, 0, 0, 0, 1, 1);
743 
744  mb_type = MB_TYPE_SKIP;
745  } else {
746  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
747  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
748  return -1;
749  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
750  return -1;
751 
752  mb_type = MB_TYPE_16x16;
753  }
754  } else if (mb_type < 8) { /* INTER */
755  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
757  else if (s->halfpel_flag &&
758  s->thirdpel_flag == !get_bits1(&s->gb_slice))
759  mode = HALFPEL_MODE;
760  else
761  mode = FULLPEL_MODE;
762 
763  /* fill caches */
764  /* note ref_cache should contain here:
765  * ????????
766  * ???11111
767  * N??11111
768  * N??11111
769  * N??11111
770  */
771 
772  for (m = 0; m < 2; m++) {
773  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
774  for (i = 0; i < 4; i++)
775  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
776  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
777  } else {
778  for (i = 0; i < 4; i++)
779  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
780  }
781  if (s->mb_y > 0) {
782  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
783  s->cur_pic->motion_val[m][b_xy - s->b_stride],
784  4 * 2 * sizeof(int16_t));
785  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
786  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
787 
788  if (s->mb_x < s->mb_width - 1) {
789  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
790  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
791  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
792  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
793  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
794  } else
795  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
796  if (s->mb_x > 0) {
797  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
798  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
799  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
800  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
801  } else
802  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
803  } else
804  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
805  PART_NOT_AVAILABLE, 8);
806 
807  if (s->pict_type != AV_PICTURE_TYPE_B)
808  break;
809  }
810 
811  /* decode motion vector(s) and form prediction(s) */
812  if (s->pict_type == AV_PICTURE_TYPE_P) {
813  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
814  return -1;
815  } else { /* AV_PICTURE_TYPE_B */
816  if (mb_type != 2) {
817  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
818  return -1;
819  } else {
820  for (i = 0; i < 4; i++)
821  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
822  0, 4 * 2 * sizeof(int16_t));
823  }
824  if (mb_type != 1) {
825  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
826  return -1;
827  } else {
828  for (i = 0; i < 4; i++)
829  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
830  0, 4 * 2 * sizeof(int16_t));
831  }
832  }
833 
834  mb_type = MB_TYPE_16x16;
835  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
836  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
837  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
838 
839  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
840 
841  if (mb_type == 8) {
842  if (s->mb_x > 0) {
843  for (i = 0; i < 4; i++)
844  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
845  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
846  s->left_samples_available = 0x5F5F;
847  }
848  if (s->mb_y > 0) {
849  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
850  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
851  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
852  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
853 
854  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
855  s->top_samples_available = 0x33FF;
856  }
857 
858  /* decode prediction codes for luma blocks */
859  for (i = 0; i < 16; i += 2) {
860  vlc = get_interleaved_ue_golomb(&s->gb_slice);
861 
862  if (vlc >= 25U) {
863  av_log(s->avctx, AV_LOG_ERROR,
864  "luma prediction:%"PRIu32"\n", vlc);
865  return -1;
866  }
867 
868  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
869  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
870 
871  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
872  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
873 
874  if (left[1] == -1 || left[2] == -1) {
875  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
876  return -1;
877  }
878  }
879  } else { /* mb_type == 33, DC_128_PRED block type */
880  for (i = 0; i < 4; i++)
881  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
882  }
883 
884  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
885  i4x4[4] = i4x4_cache[7 + 8 * 3];
886  i4x4[5] = i4x4_cache[7 + 8 * 2];
887  i4x4[6] = i4x4_cache[7 + 8 * 1];
888 
889  if (mb_type == 8) {
890  ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
891  s->avctx, s->top_samples_available,
892  s->left_samples_available);
893 
894  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
895  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
896  } else {
897  for (i = 0; i < 4; i++)
898  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
899 
900  s->top_samples_available = 0x33FF;
901  s->left_samples_available = 0x5F5F;
902  }
903 
904  mb_type = MB_TYPE_INTRA4x4;
905  } else { /* INTRA16x16 */
906  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
907  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
908 
909  if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
910  s->left_samples_available, dir, 0)) < 0) {
911  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
912  return s->intra16x16_pred_mode;
913  }
914 
915  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
916  mb_type = MB_TYPE_INTRA16x16;
917  }
918 
919  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
920  for (i = 0; i < 4; i++)
921  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
922  0, 4 * 2 * sizeof(int16_t));
923  if (s->pict_type == AV_PICTURE_TYPE_B) {
924  for (i = 0; i < 4; i++)
925  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
926  0, 4 * 2 * sizeof(int16_t));
927  }
928  }
929  if (!IS_INTRA4x4(mb_type)) {
930  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
931  }
932  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
933  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
934  }
935 
936  if (!IS_INTRA16x16(mb_type) &&
937  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
938  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
939  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
940  return -1;
941  }
942 
943  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
945  }
946  if (IS_INTRA16x16(mb_type) ||
947  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
948  s->qscale += get_interleaved_se_golomb(&s->gb_slice);
949 
950  if (s->qscale > 31u) {
951  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
952  return -1;
953  }
954  }
955  if (IS_INTRA16x16(mb_type)) {
956  AV_ZERO128(s->mb_luma_dc[0] + 0);
957  AV_ZERO128(s->mb_luma_dc[0] + 8);
958  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
959  av_log(s->avctx, AV_LOG_ERROR,
960  "error while decoding intra luma dc\n");
961  return -1;
962  }
963  }
964 
965  if (cbp) {
966  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
967  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
968 
969  for (i = 0; i < 4; i++)
970  if ((cbp & (1 << i))) {
971  for (j = 0; j < 4; j++) {
972  k = index ? (1 * (j & 1) + 2 * (i & 1) +
973  2 * (j & 2) + 4 * (i & 2))
974  : (4 * i + j);
975  s->non_zero_count_cache[scan8[k]] = 1;
976 
977  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
978  av_log(s->avctx, AV_LOG_ERROR,
979  "error while decoding block\n");
980  return -1;
981  }
982  }
983  }
984 
985  if ((cbp & 0x30)) {
986  for (i = 1; i < 3; ++i)
987  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
988  av_log(s->avctx, AV_LOG_ERROR,
989  "error while decoding chroma dc block\n");
990  return -1;
991  }
992 
993  if ((cbp & 0x20)) {
994  for (i = 1; i < 3; i++) {
995  for (j = 0; j < 4; j++) {
996  k = 16 * i + j;
997  s->non_zero_count_cache[scan8[k]] = 1;
998 
999  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
1000  av_log(s->avctx, AV_LOG_ERROR,
1001  "error while decoding chroma ac block\n");
1002  return -1;
1003  }
1004  }
1005  }
1006  }
1007  }
1008  }
1009 
1010  s->cbp = cbp;
1011  s->cur_pic->mb_type[mb_xy] = mb_type;
1012 
1013  if (IS_INTRA(mb_type))
1014  s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1015  s->left_samples_available, DC_PRED8x8, 1);
1016 
1017  return 0;
1018 }
1019 
1021 {
1022  SVQ3Context *s = avctx->priv_data;
1023  const int mb_xy = s->mb_xy;
1024  int i, header;
1025  unsigned slice_id;
1026 
1027  header = get_bits(&s->gb, 8);
1028 
1029  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1030  /* TODO: what? */
1031  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1032  return -1;
1033  } else {
1034  int slice_bits, slice_bytes, slice_length;
1035  int length = header >> 5 & 3;
1036 
1037  slice_length = show_bits(&s->gb, 8 * length);
1038  slice_bits = slice_length * 8;
1039  slice_bytes = slice_length + length - 1;
1040 
1041  skip_bits(&s->gb, 8);
1042 
1043  av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
1044  if (!s->slice_buf)
1045  return AVERROR(ENOMEM);
1046 
1047  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1048  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1049  return AVERROR_INVALIDDATA;
1050  }
1051  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1052 
1053  if (s->watermark_key) {
1054  uint32_t header = AV_RL32(&s->slice_buf[1]);
1055  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1056  }
1057  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1058 
1059  if (length > 0) {
1060  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1061  }
1062  skip_bits_long(&s->gb, slice_bytes * 8);
1063  }
1064 
1065  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1066  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1067  return -1;
1068  }
1069 
1070  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1071 
1072  if ((header & 0x9F) == 2) {
1073  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1074  get_bits(&s->gb_slice, i);
1075  } else if (get_bits1(&s->gb_slice)) {
1076  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1077  return AVERROR_PATCHWELCOME;
1078  }
1079 
1080  s->slice_num = get_bits(&s->gb_slice, 8);
1081  s->qscale = get_bits(&s->gb_slice, 5);
1082  s->adaptive_quant = get_bits1(&s->gb_slice);
1083 
1084  /* unknown fields */
1085  skip_bits1(&s->gb_slice);
1086 
1087  if (s->has_watermark)
1088  skip_bits1(&s->gb_slice);
1089 
1090  skip_bits1(&s->gb_slice);
1091  skip_bits(&s->gb_slice, 2);
1092 
1093  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1094  return AVERROR_INVALIDDATA;
1095 
1096  /* reset intra predictors and invalidate motion vector references */
1097  if (s->mb_x > 0) {
1098  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1099  -1, 4 * sizeof(int8_t));
1100  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1101  -1, 8 * sizeof(int8_t) * s->mb_x);
1102  }
1103  if (s->mb_y > 0) {
1104  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1105  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1106 
1107  if (s->mb_x > 0)
1108  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1109  }
1110 
1111  return 0;
1112 }
1113 
1115 {
1116  int q, x;
1117  const int max_qp = 51;
1118 
1119  for (q = 0; q < max_qp + 1; q++) {
1120  int shift = ff_h264_quant_div6[q] + 2;
1121  int idx = ff_h264_quant_rem6[q];
1122  for (x = 0; x < 16; x++)
1123  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1124  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1125  }
1126 }
1127 
1129 {
1130  SVQ3Context *s = avctx->priv_data;
1131  int m, x, y;
1132  unsigned char *extradata;
1133  unsigned char *extradata_end;
1134  unsigned int size;
1135  int marker_found = 0;
1136  int ret;
1137 
1138  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1139  s->last_pic = av_mallocz(sizeof(*s->last_pic));
1140  s->next_pic = av_mallocz(sizeof(*s->next_pic));
1141  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1142  ret = AVERROR(ENOMEM);
1143  goto fail;
1144  }
1145 
1146  s->cur_pic->f = av_frame_alloc();
1147  s->last_pic->f = av_frame_alloc();
1148  s->next_pic->f = av_frame_alloc();
1149  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1150  return AVERROR(ENOMEM);
1151 
1152  ff_h264dsp_init(&s->h264dsp, 8, 1);
1153  ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1154  ff_videodsp_init(&s->vdsp, 8);
1155 
1156 
1157  avctx->bits_per_raw_sample = 8;
1158 
1159  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1160  ff_tpeldsp_init(&s->tdsp);
1161 
1162  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1163  avctx->color_range = AVCOL_RANGE_JPEG;
1164 
1165  s->avctx = avctx;
1166  s->halfpel_flag = 1;
1167  s->thirdpel_flag = 1;
1168  s->has_watermark = 0;
1169 
1170  /* prowl for the "SEQH" marker in the extradata */
1171  extradata = (unsigned char *)avctx->extradata;
1172  extradata_end = avctx->extradata + avctx->extradata_size;
1173  if (extradata) {
1174  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1175  if (!memcmp(extradata, "SEQH", 4)) {
1176  marker_found = 1;
1177  break;
1178  }
1179  extradata++;
1180  }
1181  }
1182 
1183  /* if a match was found, parse the extra data */
1184  if (marker_found) {
1185  GetBitContext gb;
1186  int frame_size_code;
1187  int unk0, unk1, unk2, unk3, unk4;
1188  int w,h;
1189 
1190  size = AV_RB32(&extradata[4]);
1191  if (size > extradata_end - extradata - 8) {
1193  goto fail;
1194  }
1195  init_get_bits(&gb, extradata + 8, size * 8);
1196 
1197  /* 'frame size code' and optional 'width, height' */
1198  frame_size_code = get_bits(&gb, 3);
1199  switch (frame_size_code) {
1200  case 0:
1201  w = 160;
1202  h = 120;
1203  break;
1204  case 1:
1205  w = 128;
1206  h = 96;
1207  break;
1208  case 2:
1209  w = 176;
1210  h = 144;
1211  break;
1212  case 3:
1213  w = 352;
1214  h = 288;
1215  break;
1216  case 4:
1217  w = 704;
1218  h = 576;
1219  break;
1220  case 5:
1221  w = 240;
1222  h = 180;
1223  break;
1224  case 6:
1225  w = 320;
1226  h = 240;
1227  break;
1228  case 7:
1229  w = get_bits(&gb, 12);
1230  h = get_bits(&gb, 12);
1231  break;
1232  }
1233  ret = ff_set_dimensions(avctx, w, h);
1234  if (ret < 0)
1235  goto fail;
1236 
1237  s->halfpel_flag = get_bits1(&gb);
1238  s->thirdpel_flag = get_bits1(&gb);
1239 
1240  /* unknown fields */
1241  unk0 = get_bits1(&gb);
1242  unk1 = get_bits1(&gb);
1243  unk2 = get_bits1(&gb);
1244  unk3 = get_bits1(&gb);
1245 
1246  s->low_delay = get_bits1(&gb);
1247 
1248  /* unknown field */
1249  unk4 = get_bits1(&gb);
1250 
1251  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1252  unk0, unk1, unk2, unk3, unk4);
1253 
1254  if (skip_1stop_8data_bits(&gb) < 0) {
1256  goto fail;
1257  }
1258 
1259  s->has_watermark = get_bits1(&gb);
1260  avctx->has_b_frames = !s->low_delay;
1261  if (s->has_watermark) {
1262 #if CONFIG_ZLIB
1263  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1264  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1265  int u1 = get_interleaved_ue_golomb(&gb);
1266  int u2 = get_bits(&gb, 8);
1267  int u3 = get_bits(&gb, 2);
1268  int u4 = get_interleaved_ue_golomb(&gb);
1269  unsigned long buf_len = watermark_width *
1270  watermark_height * 4;
1271  int offset = get_bits_count(&gb) + 7 >> 3;
1272  uint8_t *buf;
1273 
1274  if (watermark_height <= 0 ||
1275  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1276  ret = -1;
1277  goto fail;
1278  }
1279 
1280  buf = av_malloc(buf_len);
1281  if (!buf) {
1282  ret = AVERROR(ENOMEM);
1283  goto fail;
1284  }
1285  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1286  watermark_width, watermark_height);
1287  av_log(avctx, AV_LOG_DEBUG,
1288  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1289  u1, u2, u3, u4, offset);
1290  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1291  size - offset) != Z_OK) {
1292  av_log(avctx, AV_LOG_ERROR,
1293  "could not uncompress watermark logo\n");
1294  av_free(buf);
1295  ret = -1;
1296  goto fail;
1297  }
1298  s->watermark_key = av_bswap16(av_crc(av_crc_get_table(AV_CRC_16_CCITT), 0, buf, buf_len));
1299 
1300  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1301  av_log(avctx, AV_LOG_DEBUG,
1302  "watermark key %#"PRIx32"\n", s->watermark_key);
1303  av_free(buf);
1304 #else
1305  av_log(avctx, AV_LOG_ERROR,
1306  "this svq3 file contains watermark which need zlib support compiled in\n");
1307  ret = -1;
1308  goto fail;
1309 #endif
1310  }
1311  }
1312 
1313  s->mb_width = (avctx->width + 15) / 16;
1314  s->mb_height = (avctx->height + 15) / 16;
1315  s->mb_stride = s->mb_width + 1;
1316  s->mb_num = s->mb_width * s->mb_height;
1317  s->b_stride = 4 * s->mb_width;
1318  s->h_edge_pos = s->mb_width * 16;
1319  s->v_edge_pos = s->mb_height * 16;
1320 
1321  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1322  if (!s->intra4x4_pred_mode)
1323  return AVERROR(ENOMEM);
1324 
1325  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1326  sizeof(*s->mb2br_xy));
1327  if (!s->mb2br_xy)
1328  return AVERROR(ENOMEM);
1329 
1330  for (y = 0; y < s->mb_height; y++)
1331  for (x = 0; x < s->mb_width; x++) {
1332  const int mb_xy = x + y * s->mb_stride;
1333 
1334  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1335  }
1336 
1338 
1339  return 0;
1340 fail:
1341  svq3_decode_end(avctx);
1342  return ret;
1343 }
1344 
1345 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1346 {
1347  int i;
1348  for (i = 0; i < 2; i++) {
1351  }
1353 
1354  av_frame_unref(pic->f);
1355 }
1356 
1357 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1358 {
1359  SVQ3Context *s = avctx->priv_data;
1360  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1361  const int mb_array_size = s->mb_stride * s->mb_height;
1362  const int b4_stride = s->mb_width * 4 + 1;
1363  const int b4_array_size = b4_stride * s->mb_height * 4;
1364  int ret;
1365 
1366  if (!pic->motion_val_buf[0]) {
1367  int i;
1368 
1369  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1370  if (!pic->mb_type_buf)
1371  return AVERROR(ENOMEM);
1372  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1373 
1374  for (i = 0; i < 2; i++) {
1375  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1376  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1377  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1378  ret = AVERROR(ENOMEM);
1379  goto fail;
1380  }
1381 
1382  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1383  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1384  }
1385  }
1386 
1387  ret = ff_get_buffer(avctx, pic->f,
1388  (s->pict_type != AV_PICTURE_TYPE_B) ?
1390  if (ret < 0)
1391  goto fail;
1392 
1393  if (!s->edge_emu_buffer) {
1394  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1395  if (!s->edge_emu_buffer)
1396  return AVERROR(ENOMEM);
1397  }
1398 
1399  return 0;
1400 fail:
1401  free_picture(avctx, pic);
1402  return ret;
1403 }
1404 
1405 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1406  int *got_frame, AVPacket *avpkt)
1407 {
1408  SVQ3Context *s = avctx->priv_data;
1409  int buf_size = avpkt->size;
1410  int left;
1411  uint8_t *buf;
1412  int ret, m, i;
1413 
1414  /* special case for last picture */
1415  if (buf_size == 0) {
1416  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1417  ret = av_frame_ref(data, s->next_pic->f);
1418  if (ret < 0)
1419  return ret;
1420  s->last_frame_output = 1;
1421  *got_frame = 1;
1422  }
1423  return 0;
1424  }
1425 
1426  s->mb_x = s->mb_y = s->mb_xy = 0;
1427 
1428  if (s->watermark_key) {
1429  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1430  if (!s->buf)
1431  return AVERROR(ENOMEM);
1432  memcpy(s->buf, avpkt->data, buf_size);
1433  buf = s->buf;
1434  } else {
1435  buf = avpkt->data;
1436  }
1437 
1438  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1439  if (ret < 0)
1440  return ret;
1441 
1442  if (svq3_decode_slice_header(avctx))
1443  return -1;
1444 
1445  s->pict_type = s->slice_type;
1446 
1447  if (s->pict_type != AV_PICTURE_TYPE_B)
1448  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1449 
1450  av_frame_unref(s->cur_pic->f);
1451 
1452  /* for skipping the frame */
1453  s->cur_pic->f->pict_type = s->pict_type;
1454  s->cur_pic->f->key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
1455 
1456  ret = get_buffer(avctx, s->cur_pic);
1457  if (ret < 0)
1458  return ret;
1459 
1460  for (i = 0; i < 16; i++) {
1461  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1462  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1463  }
1464  for (i = 0; i < 16; i++) {
1465  s->block_offset[16 + i] =
1466  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1467  s->block_offset[48 + 16 + i] =
1468  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1469  }
1470 
1471  if (s->pict_type != AV_PICTURE_TYPE_I) {
1472  if (!s->last_pic->f->data[0]) {
1473  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1474  av_frame_unref(s->last_pic->f);
1475  ret = get_buffer(avctx, s->last_pic);
1476  if (ret < 0)
1477  return ret;
1478  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1479  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1480  s->last_pic->f->linesize[1]);
1481  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1482  s->last_pic->f->linesize[2]);
1483  }
1484 
1485  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1486  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1487  av_frame_unref(s->next_pic->f);
1488  ret = get_buffer(avctx, s->next_pic);
1489  if (ret < 0)
1490  return ret;
1491  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1492  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1493  s->next_pic->f->linesize[1]);
1494  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1495  s->next_pic->f->linesize[2]);
1496  }
1497  }
1498 
1499  if (avctx->debug & FF_DEBUG_PICT_INFO)
1500  av_log(s->avctx, AV_LOG_DEBUG,
1501  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1502  av_get_picture_type_char(s->pict_type),
1503  s->halfpel_flag, s->thirdpel_flag,
1504  s->adaptive_quant, s->qscale, s->slice_num);
1505 
1506  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1507  avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1508  avctx->skip_frame >= AVDISCARD_ALL)
1509  return 0;
1510 
1511  if (s->next_p_frame_damaged) {
1512  if (s->pict_type == AV_PICTURE_TYPE_B)
1513  return 0;
1514  else
1515  s->next_p_frame_damaged = 0;
1516  }
1517 
1518  if (s->pict_type == AV_PICTURE_TYPE_B) {
1519  s->frame_num_offset = s->slice_num - s->prev_frame_num;
1520 
1521  if (s->frame_num_offset < 0)
1522  s->frame_num_offset += 256;
1523  if (s->frame_num_offset == 0 ||
1524  s->frame_num_offset >= s->prev_frame_num_offset) {
1525  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1526  return -1;
1527  }
1528  } else {
1529  s->prev_frame_num = s->frame_num;
1530  s->frame_num = s->slice_num;
1531  s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1532 
1533  if (s->prev_frame_num_offset < 0)
1534  s->prev_frame_num_offset += 256;
1535  }
1536 
1537  for (m = 0; m < 2; m++) {
1538  int i;
1539  for (i = 0; i < 4; i++) {
1540  int j;
1541  for (j = -1; j < 4; j++)
1542  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1543  if (i < 3)
1544  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1545  }
1546  }
1547 
1548  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1549  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1550  unsigned mb_type;
1551  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1552 
1553  if ((get_bits_left(&s->gb_slice)) <= 7) {
1554  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1555  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1556 
1557  if (svq3_decode_slice_header(avctx))
1558  return -1;
1559  }
1560  if (s->slice_type != s->pict_type) {
1561  avpriv_request_sample(avctx, "non constant slice type");
1562  }
1563  /* TODO: support s->mb_skip_run */
1564  }
1565 
1566  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1567 
1568  if (s->pict_type == AV_PICTURE_TYPE_I)
1569  mb_type += 8;
1570  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1571  mb_type += 4;
1572  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1573  av_log(s->avctx, AV_LOG_ERROR,
1574  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1575  return -1;
1576  }
1577 
1578  if (mb_type != 0 || s->cbp)
1579  hl_decode_mb(s);
1580 
1581  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1582  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1583  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1584  }
1585 
1586  ff_draw_horiz_band(avctx, s->cur_pic->f,
1587  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1588  16 * s->mb_y, 16, PICT_FRAME, 0,
1589  s->low_delay);
1590  }
1591 
1592  left = buf_size*8 - get_bits_count(&s->gb_slice);
1593 
1594  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1595  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1596  //av_hex_dump(stderr, buf+buf_size-8, 8);
1597  }
1598 
1599  if (left < 0) {
1600  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1601  return -1;
1602  }
1603 
1604  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1605  ret = av_frame_ref(data, s->cur_pic->f);
1606  else if (s->last_pic->f->data[0])
1607  ret = av_frame_ref(data, s->last_pic->f);
1608  if (ret < 0)
1609  return ret;
1610 
1611  /* Do not output the last pic after seeking. */
1612  if (s->last_pic->f->data[0] || s->low_delay)
1613  *got_frame = 1;
1614 
1615  if (s->pict_type != AV_PICTURE_TYPE_B) {
1616  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1617  } else {
1618  av_frame_unref(s->cur_pic->f);
1619  }
1620 
1621  return buf_size;
1622 }
1623 
1625 {
1626  SVQ3Context *s = avctx->priv_data;
1627 
1628  free_picture(avctx, s->cur_pic);
1629  free_picture(avctx, s->next_pic);
1630  free_picture(avctx, s->last_pic);
1631  av_frame_free(&s->cur_pic->f);
1632  av_frame_free(&s->next_pic->f);
1633  av_frame_free(&s->last_pic->f);
1634  av_freep(&s->cur_pic);
1635  av_freep(&s->next_pic);
1636  av_freep(&s->last_pic);
1637  av_freep(&s->slice_buf);
1638  av_freep(&s->intra4x4_pred_mode);
1639  av_freep(&s->edge_emu_buffer);
1640  av_freep(&s->mb2br_xy);
1641 
1642 
1643  av_freep(&s->buf);
1644  s->buf_size = 0;
1645 
1646  return 0;
1647 }
1648 
1650  .name = "svq3",
1651  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1652  .type = AVMEDIA_TYPE_VIDEO,
1653  .id = AV_CODEC_ID_SVQ3,
1654  .priv_data_size = sizeof(SVQ3Context),
1656  .close = svq3_decode_end,
1658  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1661  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1662  AV_PIX_FMT_NONE},
1663 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
SVQ3Context::frame_num
int frame_num
Definition: svq3.c:114
SVQ3Context::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: svq3.c:141
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:75
svq3_dequant_coeff
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:217
AVCodec
AVCodec.
Definition: codec.h:190
SVQ3Context::next_pic
SVQ3Frame * next_pic
Definition: svq3.c:94
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
SVQ3Context::slice_type
enum AVPictureType slice_type
Definition: svq3.c:120
SVQ3Context::gb_slice
GetBitContext gb_slice
Definition: svq3.c:97
SVQ3Context::vdsp
VideoDSPContext vdsp
Definition: svq3.c:91
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
SVQ3Context::slice_num
int slice_num
Definition: svq3.c:111
level
uint8_t level
Definition: svq3.c:209
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
svq3_decode_slice_header
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1020
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
SVQ3Context::avctx
AVCodecContext * avctx
Definition: svq3.c:85
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
SVQ3Context::mb_num
int mb_num
Definition: svq3.c:126
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
SVQ3Context::v_edge_pos
int v_edge_pos
Definition: svq3.c:109
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVPictureType
AVPictureType
Definition: avutil.h:272
ff_h264_chroma_qp
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
SVQ3Context::left_samples_available
unsigned int left_samples_available
Definition: svq3.c:139
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
ff_h264_golomb_to_inter_cbp
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
SVQ3Context::h_edge_pos
int h_edge_pos
Definition: svq3.c:108
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
IMbInfo::cbp
uint8_t cbp
Definition: h264data.h:36
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
DC_PRED
@ DC_PRED
Definition: vp9.h:48
MB_TYPE_INTRA4x4
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:51
SVQ3Context::slice_buf
uint8_t * slice_buf
Definition: svq3.c:98
data
const char data[16]
Definition: mxf.c:91
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:54
SVQ3Context::mb
int16_t mb[16 *48 *2]
Definition: svq3.c:145
PREDICT_MODE
#define PREDICT_MODE
Definition: svq3.c:155
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:192
free_picture
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1345
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_golomb_to_intra4x4_cbp
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
SVQ3Context::frame_num_offset
int frame_num_offset
Definition: svq3.c:115
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:52
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
SVQ3Context::last_frame_output
int last_frame_output
Definition: svq3.c:110
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
get_buffer
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1357
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
A
#define A(x)
Definition: vp56_arith.h:28
crc.h
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
golomb.h
exp golomb vlc stuff
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
SVQ3Context::last_pic
SVQ3Frame * last_pic
Definition: svq3.c:95
SVQ3Context::qscale
int qscale
Definition: svq3.c:112
U
#define U(x)
Definition: vp56_arith.h:37
SVQ3Context::topright_samples_available
unsigned int topright_samples_available
Definition: svq3.c:138
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
fail
#define fail()
Definition: checkasm.h:123
GetBitContext
Definition: get_bits.h:61
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
SVQ3Context::slice_size
int slice_size
Definition: svq3.c:99
SVQ3Context::tdsp
TpelDSPContext tdsp
Definition: svq3.c:90
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
SVQ3Context::thirdpel_flag
int thirdpel_flag
Definition: svq3.c:101
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
SVQ3Context::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:134
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
SVQ3Context::gb
GetBitContext gb
Definition: svq3.c:96
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
SVQ3Context::cbp
int cbp
Definition: svq3.c:113
FULLPEL_MODE
#define FULLPEL_MODE
Definition: svq3.c:152
SVQ3Context::mb_y
int mb_y
Definition: svq3.c:123
SVQ3Context::mb_x
int mb_x
Definition: svq3.c:123
SVQ3Context::adaptive_quant
int adaptive_quant
Definition: svq3.c:106
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
SVQ3Context::buf_size
int buf_size
Definition: svq3.c:105
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
TpelDSPContext
thirdpel DSP context
Definition: tpeldsp.h:42
SVQ3Context::pict_type
enum AVPictureType pict_type
Definition: svq3.c:119
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
svq3_mc_dir
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:506
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
ff_tpeldsp_init
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
QP_MAX_NUM
#define QP_MAX_NUM
Definition: h264.h:27
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
h264data.h
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
svq3_pred_motion
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:382
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:81
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:239
SVQ3Context::top_samples_available
unsigned int top_samples_available
Definition: svq3.c:137
IS_INTRA
#define IS_INTRA(x, y)
SVQ3Frame::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: svq3.c:73
AV_CODEC_ID_SVQ3
@ AV_CODEC_ID_SVQ3
Definition: codec_id.h:72
SVQ3Context::b_stride
int b_stride
Definition: svq3.c:127
SVQ3Context::prev_frame_num_offset
int prev_frame_num_offset
Definition: svq3.c:116
SVQ3Context::h264dsp
H264DSPContext h264dsp
Definition: svq3.c:87
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
int32_t
int32_t
Definition: audio_convert.c:194
IMbInfo::pred_mode
uint8_t pred_mode
Definition: h264data.h:35
if
if(ret)
Definition: filter_design.txt:179
SVQ3Context::next_p_frame_damaged
int next_p_frame_damaged
Definition: svq3.c:107
SVQ3Frame::motion_val
int16_t(*[2] motion_val)[2]
Definition: svq3.c:74
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:208
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
SVQ3Context::mb_width
int mb_width
Definition: svq3.c:125
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
SVQ3Context::mb2br_xy
uint32_t * mb2br_xy
Definition: svq3.c:129
SVQ3Frame::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: svq3.c:80
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
src
#define src
Definition: vp8dsp.c:254
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
ff_h264_chroma_dc_scan
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
SVQ3Context
Definition: svq3.c:84
svq3_dct_tables
static const struct @135 svq3_dct_tables[2][16]
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
AV_RB32
#define AV_RB32
Definition: intreadwrite.h:130
SVQ3Frame::mb_type_buf
AVBufferRef * mb_type_buf
Definition: svq3.c:76
SVQ3Context::mb_luma_dc
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:146
tpeldsp.h
index
int index
Definition: gxfenc.c:89
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
hl_decode_mb_idct_luma
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:619
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
H264DSPContext
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
SVQ3Context::intra16x16_pred_mode
int intra16x16_pred_mode
Definition: svq3.c:132
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
SVQ3Context::hpc
H264PredContext hpc
Definition: svq3.c:88
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
init_dequant4_coeff_table
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1114
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
svq3_fetch_diagonal_mv
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:361
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
AVPacket::size
int size
Definition: packet.h:356
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
rectangle.h
hl_decode_mb
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:673
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
size
int size
Definition: twinvq_data.h:11134
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
avg
#define avg(a, b, c, d)
Definition: colorspacedsp_template.c:28
header
static const uint8_t header[24]
Definition: sdr2.c:67
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
av_bswap16
#define av_bswap16
Definition: bswap.h:31
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
ff_h264_quant_rem6
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:76
hl_decode_mb_predict_luma
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:634
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
av_log2
#define av_log2
Definition: intmath.h:83
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:666
SVQ3Context::prev_frame_num
int prev_frame_num
Definition: svq3.c:117
svq3_add_idct_c
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:261
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
h264dec.h
svq3_decode_frame
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1405
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
svq3_luma_dc_dequant_idct_c
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:226
stride
#define stride
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_CRC_16_CCITT
@ AV_CRC_16_CCITT
Definition: crc.h:52
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
SVQ3Frame
Definition: svq3.c:70
THIRDPEL_MODE
#define THIRDPEL_MODE
Definition: svq3.c:154
SVQ3Context::mv_cache
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:143
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
SVQ3Context::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:147
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264dec.h:397
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
svq3_decode_mb
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:721
svq3_scan
static const uint8_t svq3_scan[16]
Definition: svq3.c:166
avcodec.h
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
SVQ3Context::halfpel_flag
int halfpel_flag
Definition: svq3.c:100
mid_pred
#define mid_pred
Definition: mathops.h:97
svq3_pred_1
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:192
ret
ret
Definition: filter_design.txt:187
SVQ3Frame::mb_type
uint32_t * mb_type
Definition: svq3.c:77
SVQ3Context::mb_height
int mb_height
Definition: svq3.c:125
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
SVQ3Context::hdsp
HpelDSPContext hdsp
Definition: svq3.c:89
ff_svq3_decoder
AVCodec ff_svq3_decoder
Definition: svq3.c:1649
SVQ3Context::low_delay
int low_delay
Definition: svq3.c:121
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
svq3_decode_block
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:301
B
#define B
Definition: huffyuvdsp.h:32
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ff_h264_dequant4_coeff_init
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
SVQ3Frame::f
AVFrame * f
Definition: svq3.c:71
SVQ3Context::block_offset
int block_offset[2 *(16 *3)]
Definition: svq3.c:149
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
AV_RL32
#define AV_RL32
Definition: intreadwrite.h:146
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
SVQ3Frame::ref_index
int8_t * ref_index[2]
Definition: svq3.c:81
mode
mode
Definition: ebur128.h:83
ff_h264_check_intra4x4_pred_mode
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:131
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_h264_i_mb_type_info
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
svq1.h
SVQ3Context::chroma_pred_mode
int chroma_pred_mode
Definition: svq3.c:131
SVQ3Context::watermark_key
uint32_t watermark_key
Definition: svq3.c:103
SVQ3Context::mb_xy
int mb_xy
Definition: svq3.c:124
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:650
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
temp
else temp
Definition: vf_mcdeint.c:256
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
luma_dc_zigzag_scan
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:173
ff_h264_quant_div6
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
VideoDSPContext
Definition: videodsp.h:41
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1611
H264PredContext
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
shift
static int shift(int a, int b)
Definition: sonic.c:82
svq3_mc_dir_part
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:431
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
svq3_decode_end
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1624
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
SVQ3Context::dequant4_coeff
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:148
SVQ3Context::ref_cache
int8_t ref_cache[2][5 *8]
Definition: svq3.c:144
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:504
SVQ3Context::mb_stride
int mb_stride
Definition: svq3.c:126
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
hpeldsp.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
svq3_decode_init
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1128
h
h
Definition: vp9dsp_template.c:2038
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
int
int
Definition: ffmpeg_filter.c:192
SVQ3Context::buf
uint8_t * buf
Definition: svq3.c:104
SVQ3Context::cur_pic
SVQ3Frame * cur_pic
Definition: svq3.c:93
SVQ3Context::has_watermark
int has_watermark
Definition: svq3.c:102
SVQ3Context::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: svq3.c:135
svq3_pred_0
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:180
HALFPEL_MODE
#define HALFPEL_MODE
Definition: svq3.c:153
ff_h264_check_intra_pred_mode
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:179