FFmpeg  2.8.15
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
diracdec.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
3  * Copyright (C) 2009 David Conrad
4  * Copyright (C) 2011 Jordi Ortiz
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Dirac Decoder
26  * @author Marco Gerards <marco@gnu.org>, David Conrad, Jordi Ortiz <nenjordi@gmail.com>
27  */
28 
29 #include "avcodec.h"
30 #include "get_bits.h"
31 #include "bytestream.h"
32 #include "internal.h"
33 #include "golomb.h"
34 #include "dirac_arith.h"
35 #include "mpeg12data.h"
36 #include "libavcodec/mpegvideo.h"
37 #include "mpegvideoencdsp.h"
38 #include "dirac_dwt.h"
39 #include "dirac.h"
40 #include "diracdsp.h"
41 #include "videodsp.h"
42 
43 /**
44  * The spec limits the number of wavelet decompositions to 4 for both
45  * level 1 (VC-2) and 128 (long-gop default).
46  * 5 decompositions is the maximum before >16-bit buffers are needed.
47  * Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting
48  * the others to 4 decompositions (or 3 for the fidelity filter).
49  *
50  * We use this instead of MAX_DECOMPOSITIONS to save some memory.
51  */
52 #define MAX_DWT_LEVELS 5
53 
54 /**
55  * The spec limits this to 3 for frame coding, but in practice can be as high as 6
56  */
57 #define MAX_REFERENCE_FRAMES 8
58 #define MAX_DELAY 5 /* limit for main profile for frame coding (TODO: field coding) */
59 #define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
60 #define MAX_QUANT 68 /* max quant for VC-2 */
61 #define MAX_BLOCKSIZE 32 /* maximum xblen/yblen we support */
62 
63 /**
64  * DiracBlock->ref flags, if set then the block does MC from the given ref
65  */
66 #define DIRAC_REF_MASK_REF1 1
67 #define DIRAC_REF_MASK_REF2 2
68 #define DIRAC_REF_MASK_GLOBAL 4
69 
70 /**
71  * Value of Picture.reference when Picture is not a reference picture, but
72  * is held for delayed output.
73  */
74 #define DELAYED_PIC_REF 4
75 
76 #define CALC_PADDING(size, depth) \
77  (((size + (1 << depth) - 1) >> depth) << depth)
78 
79 #define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
80 
81 typedef struct {
83  int interpolated[3]; /* 1 if hpel[] is valid */
84  uint8_t *hpel[3][4];
85  uint8_t *hpel_base[3][4];
86  int reference;
87 } DiracFrame;
88 
89 typedef struct {
90  union {
91  int16_t mv[2][2];
92  int16_t dc[3];
93  } u; /* anonymous unions aren't in C99 :( */
95 } DiracBlock;
96 
97 typedef struct SubBand {
98  int level;
100  int stride;
101  int width;
102  int height;
103  int quant;
105  struct SubBand *parent;
106 
107  /* for low delay */
108  unsigned length;
110 } SubBand;
111 
112 typedef struct Plane {
113  int width;
114  int height;
115  ptrdiff_t stride;
116 
123 
124  /* block length */
127  /* block separation (block n+1 starts after this many pixels in block n) */
130  /* amount of overspill on each edge (half of the overlap between blocks) */
133 
135 } Plane;
136 
137 typedef struct DiracContext {
145  int frame_number; /* number of the next frame to display */
149 
150  int zero_res; /* zero residue flag */
151  int is_arith; /* whether coeffs use arith or golomb coding */
152  int low_delay; /* use the low delay syntax */
153  int globalmc_flag; /* use global motion compensation */
154  int num_refs; /* number of reference pictures */
155 
156  /* wavelet decoding */
157  unsigned wavelet_depth; /* depth of the IDWT */
158  unsigned wavelet_idx;
159 
160  /**
161  * schroedinger older than 1.0.8 doesn't store
162  * quant delta if only one codebook exists in a band
163  */
164  unsigned old_delta_quant;
165  unsigned codeblock_mode;
166 
167  struct {
168  unsigned width;
169  unsigned height;
171 
172  struct {
173  unsigned num_x; /* number of horizontal slices */
174  unsigned num_y; /* number of vertical slices */
175  AVRational bytes; /* average bytes per slice */
176  uint8_t quant[MAX_DWT_LEVELS][4]; /* [DIRAC_STD] E.1 */
177  } lowdelay;
178 
179  struct {
180  int pan_tilt[2]; /* pan/tilt vector */
181  int zrs[2][2]; /* zoom/rotate/shear matrix */
182  int perspective[2]; /* perspective vector */
183  unsigned zrs_exp;
184  unsigned perspective_exp;
185  } globalmc[2];
186 
187  /* motion compensation */
188  uint8_t mv_precision; /* [DIRAC_STD] REFS_WT_PRECISION */
189  int16_t weight[2]; /* [DIRAC_STD] REF1_WT and REF2_WT */
190  unsigned weight_log2denom; /* [DIRAC_STD] REFS_WT_PRECISION */
191 
192  int blwidth; /* number of blocks (horizontally) */
193  int blheight; /* number of blocks (vertically) */
194  int sbwidth; /* number of superblocks (horizontally) */
195  int sbheight; /* number of superblocks (vertically) */
196 
199 
202 
203  uint16_t *mctmp; /* buffer holding the MC data multiplied by OBMC weights */
206 
208 
209  void (*put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
210  void (*avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
211  void (*add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
214 
217 
221 } DiracContext;
222 
223 /**
224  * Dirac Specification ->
225  * Parse code values. 9.6.1 Table 9.1
226  */
229  pc_eos = 0x10,
230  pc_aux_data = 0x20,
231  pc_padding = 0x30,
232 };
233 
240 };
241 
242 static const uint8_t default_qmat[][4][4] = {
243  { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
244  { { 4, 2, 2, 0}, { 0, 4, 4, 2}, { 0, 5, 5, 3}, { 0, 7, 7, 5} },
245  { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
246  { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
247  { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
248  { { 0, 4, 4, 8}, { 0, 8, 8, 12}, { 0, 13, 13, 17}, { 0, 17, 17, 21} },
249  { { 3, 1, 1, 0}, { 0, 4, 4, 2}, { 0, 6, 6, 5}, { 0, 9, 9, 7} },
250 };
251 
252 static const int qscale_tab[MAX_QUANT+1] = {
253  4, 5, 6, 7, 8, 10, 11, 13,
254  16, 19, 23, 27, 32, 38, 45, 54,
255  64, 76, 91, 108, 128, 152, 181, 215,
256  256, 304, 362, 431, 512, 609, 724, 861,
257  1024, 1218, 1448, 1722, 2048, 2435, 2896, 3444,
258  4096, 4871, 5793, 6889, 8192, 9742, 11585, 13777,
259  16384, 19484, 23170, 27554, 32768, 38968, 46341, 55109,
260  65536, 77936
261 };
262 
263 static const int qoffset_intra_tab[MAX_QUANT+1] = {
264  1, 2, 3, 4, 4, 5, 6, 7,
265  8, 10, 12, 14, 16, 19, 23, 27,
266  32, 38, 46, 54, 64, 76, 91, 108,
267  128, 152, 181, 216, 256, 305, 362, 431,
268  512, 609, 724, 861, 1024, 1218, 1448, 1722,
269  2048, 2436, 2897, 3445, 4096, 4871, 5793, 6889,
270  8192, 9742, 11585, 13777, 16384, 19484, 23171, 27555,
271  32768, 38968
272 };
273 
274 static const int qoffset_inter_tab[MAX_QUANT+1] = {
275  1, 2, 2, 3, 3, 4, 4, 5,
276  6, 7, 9, 10, 12, 14, 17, 20,
277  24, 29, 34, 41, 48, 57, 68, 81,
278  96, 114, 136, 162, 192, 228, 272, 323,
279  384, 457, 543, 646, 768, 913, 1086, 1292,
280  1536, 1827, 2172, 2583, 3072, 3653, 4344, 5166,
281  6144, 7307, 8689, 10333, 12288, 14613, 17378, 20666,
282  24576, 29226
283 };
284 
285 /* magic number division by 3 from schroedinger */
286 static inline int divide3(int x)
287 {
288  return (int)((x+1U)*21845 + 10922) >> 16;
289 }
290 
291 static DiracFrame *remove_frame(DiracFrame *framelist[], int picnum)
292 {
293  DiracFrame *remove_pic = NULL;
294  int i, remove_idx = -1;
295 
296  for (i = 0; framelist[i]; i++)
297  if (framelist[i]->avframe->display_picture_number == picnum) {
298  remove_pic = framelist[i];
299  remove_idx = i;
300  }
301 
302  if (remove_pic)
303  for (i = remove_idx; framelist[i]; i++)
304  framelist[i] = framelist[i+1];
305 
306  return remove_pic;
307 }
308 
309 static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
310 {
311  int i;
312  for (i = 0; i < maxframes; i++)
313  if (!framelist[i]) {
314  framelist[i] = frame;
315  return 0;
316  }
317  return -1;
318 }
319 
321 {
322  int sbwidth = DIVRNDUP(s->source.width, 4);
323  int sbheight = DIVRNDUP(s->source.height, 4);
324  int i, w, h, top_padding;
325 
326  /* todo: think more about this / use or set Plane here */
327  for (i = 0; i < 3; i++) {
328  int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0);
329  int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0);
330  w = s->source.width >> (i ? s->chroma_x_shift : 0);
331  h = s->source.height >> (i ? s->chroma_y_shift : 0);
332 
333  /* we allocate the max we support here since num decompositions can
334  * change from frame to frame. Stride is aligned to 16 for SIMD, and
335  * 1<<MAX_DWT_LEVELS top padding to avoid if(y>0) in arith decoding
336  * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that
337  * on each side */
338  top_padding = FFMAX(1<<MAX_DWT_LEVELS, max_yblen/2);
339  w = FFALIGN(CALC_PADDING(w, MAX_DWT_LEVELS), 8); /* FIXME: Should this be 16 for SSE??? */
340  h = top_padding + CALC_PADDING(h, MAX_DWT_LEVELS) + max_yblen/2;
341 
342  s->plane[i].idwt_buf_base = av_mallocz_array((w+max_xblen), h * sizeof(IDWTELEM));
343  s->plane[i].idwt_tmp = av_malloc_array((w+16), sizeof(IDWTELEM));
344  s->plane[i].idwt_buf = s->plane[i].idwt_buf_base + top_padding*w;
345  if (!s->plane[i].idwt_buf_base || !s->plane[i].idwt_tmp)
346  return AVERROR(ENOMEM);
347  }
348 
349  /* fixme: allocate using real stride here */
350  s->sbsplit = av_malloc_array(sbwidth, sbheight);
351  s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
352 
353  if (!s->sbsplit || !s->blmotion)
354  return AVERROR(ENOMEM);
355  return 0;
356 }
357 
359 {
360  int w = s->source.width;
361  int h = s->source.height;
362 
363  av_assert0(stride >= w);
364  stride += 64;
365 
366  if (s->buffer_stride >= stride)
367  return 0;
368  s->buffer_stride = 0;
369 
371  memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
372  av_freep(&s->mctmp);
373  av_freep(&s->mcscratch);
374 
376 
377  s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
379 
380  if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
381  return AVERROR(ENOMEM);
382 
383  s->buffer_stride = stride;
384  return 0;
385 }
386 
388 {
389  int i, j, k;
390 
391  for (i = 0; i < MAX_FRAMES; i++) {
392  if (s->all_frames[i].avframe->data[0]) {
394  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
395  }
396 
397  for (j = 0; j < 3; j++)
398  for (k = 1; k < 4; k++)
399  av_freep(&s->all_frames[i].hpel_base[j][k]);
400  }
401 
402  memset(s->ref_frames, 0, sizeof(s->ref_frames));
403  memset(s->delay_frames, 0, sizeof(s->delay_frames));
404 
405  for (i = 0; i < 3; i++) {
406  av_freep(&s->plane[i].idwt_buf_base);
407  av_freep(&s->plane[i].idwt_tmp);
408  }
409 
410  s->buffer_stride = 0;
411  av_freep(&s->sbsplit);
412  av_freep(&s->blmotion);
414 
415  av_freep(&s->mctmp);
416  av_freep(&s->mcscratch);
417 }
418 
420 {
421  DiracContext *s = avctx->priv_data;
422  int i;
423 
424  s->avctx = avctx;
425  s->frame_number = -1;
426 
429  ff_videodsp_init(&s->vdsp, 8);
430 
431  for (i = 0; i < MAX_FRAMES; i++) {
433  if (!s->all_frames[i].avframe) {
434  while (i > 0)
435  av_frame_free(&s->all_frames[--i].avframe);
436  return AVERROR(ENOMEM);
437  }
438  }
439 
440  return 0;
441 }
442 
444 {
445  DiracContext *s = avctx->priv_data;
447  s->seen_sequence_header = 0;
448  s->frame_number = -1;
449 }
450 
452 {
453  DiracContext *s = avctx->priv_data;
454  int i;
455 
456  dirac_decode_flush(avctx);
457  for (i = 0; i < MAX_FRAMES; i++)
459 
460  return 0;
461 }
462 
463 #define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
464 
465 static inline void coeff_unpack_arith(DiracArith *c, int qfactor, int qoffset,
466  SubBand *b, IDWTELEM *buf, int x, int y)
467 {
468  int coeff, sign;
469  int sign_pred = 0;
470  int pred_ctx = CTX_ZPZN_F1;
471 
472  /* Check if the parent subband has a 0 in the corresponding position */
473  if (b->parent)
474  pred_ctx += !!b->parent->ibuf[b->parent->stride * (y>>1) + (x>>1)] << 1;
475 
476  if (b->orientation == subband_hl)
477  sign_pred = buf[-b->stride];
478 
479  /* Determine if the pixel has only zeros in its neighbourhood */
480  if (x) {
481  pred_ctx += !(buf[-1] | buf[-b->stride] | buf[-1-b->stride]);
482  if (b->orientation == subband_lh)
483  sign_pred = buf[-1];
484  } else {
485  pred_ctx += !buf[-b->stride];
486  }
487 
488  coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA);
489  if (coeff) {
490  coeff = (coeff * qfactor + qoffset + 2) >> 2;
491  sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred));
492  coeff = (coeff ^ -sign) + sign;
493  }
494  *buf = coeff;
495 }
496 
497 static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
498 {
499  int sign, coeff;
500 
501  coeff = svq3_get_ue_golomb(gb);
502  if (coeff) {
503  coeff = (coeff * qfactor + qoffset + 2) >> 2;
504  sign = get_bits1(gb);
505  coeff = (coeff ^ -sign) + sign;
506  }
507  return coeff;
508 }
509 
510 /**
511  * Decode the coeffs in the rectangle defined by left, right, top, bottom
512  * [DIRAC_STD] 13.4.3.2 Codeblock unpacking loop. codeblock()
513  */
514 static inline void codeblock(DiracContext *s, SubBand *b,
515  GetBitContext *gb, DiracArith *c,
516  int left, int right, int top, int bottom,
517  int blockcnt_one, int is_arith)
518 {
519  int x, y, zero_block;
520  int qoffset, qfactor;
521  IDWTELEM *buf;
522 
523  /* check for any coded coefficients in this codeblock */
524  if (!blockcnt_one) {
525  if (is_arith)
526  zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
527  else
528  zero_block = get_bits1(gb);
529 
530  if (zero_block)
531  return;
532  }
533 
534  if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
535  int quant;
536  if (is_arith)
538  else
539  quant = dirac_get_se_golomb(gb);
540  if (quant > INT_MAX - b->quant || b->quant + quant < 0) {
541  av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
542  return;
543  }
544  b->quant += quant;
545  }
546 
547  b->quant = FFMIN(b->quant, MAX_QUANT);
548 
549  qfactor = qscale_tab[b->quant];
550  /* TODO: context pointer? */
551  if (!s->num_refs)
552  qoffset = qoffset_intra_tab[b->quant];
553  else
554  qoffset = qoffset_inter_tab[b->quant];
555 
556  buf = b->ibuf + top * b->stride;
557  for (y = top; y < bottom; y++) {
558  for (x = left; x < right; x++) {
559  /* [DIRAC_STD] 13.4.4 Subband coefficients. coeff_unpack() */
560  if (is_arith)
561  coeff_unpack_arith(c, qfactor, qoffset, b, buf+x, x, y);
562  else
563  buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
564  }
565  buf += b->stride;
566  }
567 }
568 
569 /**
570  * Dirac Specification ->
571  * 13.3 intra_dc_prediction(band)
572  */
573 static inline void intra_dc_prediction(SubBand *b)
574 {
575  IDWTELEM *buf = b->ibuf;
576  int x, y;
577 
578  for (x = 1; x < b->width; x++)
579  buf[x] += buf[x-1];
580  buf += b->stride;
581 
582  for (y = 1; y < b->height; y++) {
583  buf[0] += buf[-b->stride];
584 
585  for (x = 1; x < b->width; x++) {
586  int pred = buf[x - 1] + buf[x - b->stride] + buf[x - b->stride-1];
587  buf[x] += divide3(pred);
588  }
589  buf += b->stride;
590  }
591 }
592 
593 /**
594  * Dirac Specification ->
595  * 13.4.2 Non-skipped subbands. subband_coeffs()
596  */
598 {
599  int cb_x, cb_y, left, right, top, bottom;
600  DiracArith c;
601  GetBitContext gb;
602  int cb_width = s->codeblock[b->level + (b->orientation != subband_ll)].width;
603  int cb_height = s->codeblock[b->level + (b->orientation != subband_ll)].height;
604  int blockcnt_one = (cb_width + cb_height) == 2;
605 
606  if (!b->length)
607  return;
608 
609  init_get_bits8(&gb, b->coeff_data, b->length);
610 
611  if (is_arith)
612  ff_dirac_init_arith_decoder(&c, &gb, b->length);
613 
614  top = 0;
615  for (cb_y = 0; cb_y < cb_height; cb_y++) {
616  bottom = (b->height * (cb_y+1LL)) / cb_height;
617  left = 0;
618  for (cb_x = 0; cb_x < cb_width; cb_x++) {
619  right = (b->width * (cb_x+1LL)) / cb_width;
620  codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
621  left = right;
622  }
623  top = bottom;
624  }
625 
626  if (b->orientation == subband_ll && s->num_refs == 0)
628 }
629 
630 static int decode_subband_arith(AVCodecContext *avctx, void *b)
631 {
632  DiracContext *s = avctx->priv_data;
633  decode_subband_internal(s, b, 1);
634  return 0;
635 }
636 
637 static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
638 {
639  DiracContext *s = avctx->priv_data;
640  SubBand **b = arg;
641  decode_subband_internal(s, *b, 0);
642  return 0;
643 }
644 
645 /**
646  * Dirac Specification ->
647  * [DIRAC_STD] 13.4.1 core_transform_data()
648  */
650 {
651  AVCodecContext *avctx = s->avctx;
652  SubBand *bands[3*MAX_DWT_LEVELS+1];
653  enum dirac_subband orientation;
654  int level, num_bands = 0;
655 
656  /* Unpack all subbands at all levels. */
657  for (level = 0; level < s->wavelet_depth; level++) {
658  for (orientation = !!level; orientation < 4; orientation++) {
659  SubBand *b = &s->plane[comp].band[level][orientation];
660  bands[num_bands++] = b;
661 
662  align_get_bits(&s->gb);
663  /* [DIRAC_STD] 13.4.2 subband() */
664  b->length = svq3_get_ue_golomb(&s->gb);
665  if (b->length) {
666  b->quant = svq3_get_ue_golomb(&s->gb);
667  align_get_bits(&s->gb);
668  b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
669  b->length = FFMIN(b->length, FFMAX(get_bits_left(&s->gb)/8, 0));
670  skip_bits_long(&s->gb, b->length*8);
671  }
672  }
673  /* arithmetic coding has inter-level dependencies, so we can only execute one level at a time */
674  if (s->is_arith)
675  avctx->execute(avctx, decode_subband_arith, &s->plane[comp].band[level][!!level],
676  NULL, 4-!!level, sizeof(SubBand));
677  }
678  /* golomb coding has no inter-level dependencies, so we can execute all subbands in parallel */
679  if (!s->is_arith)
680  avctx->execute(avctx, decode_subband_golomb, bands, NULL, num_bands, sizeof(SubBand*));
681 }
682 
683 /* [DIRAC_STD] 13.5.5.2 Luma slice subband data. luma_slice_band(level,orient,sx,sy) --> if b2 == NULL */
684 /* [DIRAC_STD] 13.5.5.3 Chroma slice subband data. chroma_slice_band(level,orient,sx,sy) --> if b2 != NULL */
686  int slice_x, int slice_y, int bits_end,
687  SubBand *b1, SubBand *b2)
688 {
689  int left = b1->width * slice_x / s->lowdelay.num_x;
690  int right = b1->width *(slice_x+1) / s->lowdelay.num_x;
691  int top = b1->height * slice_y / s->lowdelay.num_y;
692  int bottom = b1->height *(slice_y+1) / s->lowdelay.num_y;
693 
694  int qfactor = qscale_tab[FFMIN(quant, MAX_QUANT)];
695  int qoffset = qoffset_intra_tab[FFMIN(quant, MAX_QUANT)];
696 
697  IDWTELEM *buf1 = b1->ibuf + top * b1->stride;
698  IDWTELEM *buf2 = b2 ? b2->ibuf + top * b2->stride : NULL;
699  int x, y;
700  /* we have to constantly check for overread since the spec explicitly
701  requires this, with the meaning that all remaining coeffs are set to 0 */
702  if (get_bits_count(gb) >= bits_end)
703  return;
704 
705  for (y = top; y < bottom; y++) {
706  for (x = left; x < right; x++) {
707  buf1[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
708  if (get_bits_count(gb) >= bits_end)
709  return;
710  if (buf2) {
711  buf2[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
712  if (get_bits_count(gb) >= bits_end)
713  return;
714  }
715  }
716  buf1 += b1->stride;
717  if (buf2)
718  buf2 += b2->stride;
719  }
720 }
721 
724  int slice_x;
725  int slice_y;
726  int bytes;
727 };
728 
729 
730 /**
731  * Dirac Specification ->
732  * 13.5.2 Slices. slice(sx,sy)
733  */
734 static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
735 {
736  DiracContext *s = avctx->priv_data;
737  struct lowdelay_slice *slice = arg;
738  GetBitContext *gb = &slice->gb;
739  enum dirac_subband orientation;
740  int level, quant, chroma_bits, chroma_end;
741 
742  int quant_base = get_bits(gb, 7); /*[DIRAC_STD] qindex */
743  int length_bits = av_log2(8 * slice->bytes)+1;
744  int luma_bits = get_bits_long(gb, length_bits);
745  int luma_end = get_bits_count(gb) + FFMIN(luma_bits, get_bits_left(gb));
746 
747  /* [DIRAC_STD] 13.5.5.2 luma_slice_band */
748  for (level = 0; level < s->wavelet_depth; level++)
749  for (orientation = !!level; orientation < 4; orientation++) {
750  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
751  lowdelay_subband(s, gb, quant, slice->slice_x, slice->slice_y, luma_end,
752  &s->plane[0].band[level][orientation], NULL);
753  }
754 
755  /* consume any unused bits from luma */
756  skip_bits_long(gb, get_bits_count(gb) - luma_end);
757 
758  chroma_bits = 8*slice->bytes - 7 - length_bits - luma_bits;
759  chroma_end = get_bits_count(gb) + FFMIN(chroma_bits, get_bits_left(gb));
760  /* [DIRAC_STD] 13.5.5.3 chroma_slice_band */
761  for (level = 0; level < s->wavelet_depth; level++)
762  for (orientation = !!level; orientation < 4; orientation++) {
763  quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
764  lowdelay_subband(s, gb, quant, slice->slice_x, slice->slice_y, chroma_end,
765  &s->plane[1].band[level][orientation],
766  &s->plane[2].band[level][orientation]);
767  }
768 
769  return 0;
770 }
771 
772 /**
773  * Dirac Specification ->
774  * 13.5.1 low_delay_transform_data()
775  */
777 {
778  AVCodecContext *avctx = s->avctx;
779  int slice_x, slice_y, bytes, bufsize;
780  const uint8_t *buf;
781  struct lowdelay_slice *slices;
782  int slice_num = 0;
783 
784  slices = av_mallocz_array(s->lowdelay.num_x, s->lowdelay.num_y * sizeof(struct lowdelay_slice));
785  if (!slices)
786  return AVERROR(ENOMEM);
787 
788  align_get_bits(&s->gb);
789  /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
790  buf = s->gb.buffer + get_bits_count(&s->gb)/8;
791  bufsize = get_bits_left(&s->gb);
792 
793  for (slice_y = 0; bufsize > 0 && slice_y < s->lowdelay.num_y; slice_y++)
794  for (slice_x = 0; bufsize > 0 && slice_x < s->lowdelay.num_x; slice_x++) {
795  bytes = (slice_num+1) * s->lowdelay.bytes.num / s->lowdelay.bytes.den
796  - slice_num * s->lowdelay.bytes.num / s->lowdelay.bytes.den;
797 
798  slices[slice_num].bytes = bytes;
799  slices[slice_num].slice_x = slice_x;
800  slices[slice_num].slice_y = slice_y;
801  init_get_bits(&slices[slice_num].gb, buf, bufsize);
802  slice_num++;
803 
804  buf += bytes;
805  if (bufsize/8 >= bytes)
806  bufsize -= bytes*8;
807  else
808  bufsize = 0;
809  }
810 
811  avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
812  sizeof(struct lowdelay_slice)); /* [DIRAC_STD] 13.5.2 Slices */
813  intra_dc_prediction(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
814  intra_dc_prediction(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
815  intra_dc_prediction(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
816  av_free(slices);
817  return 0;
818 }
819 
821 {
822  int i, w, h, level, orientation;
823 
824  for (i = 0; i < 3; i++) {
825  Plane *p = &s->plane[i];
826 
827  p->width = s->source.width >> (i ? s->chroma_x_shift : 0);
828  p->height = s->source.height >> (i ? s->chroma_y_shift : 0);
829  p->idwt_width = w = CALC_PADDING(p->width , s->wavelet_depth);
830  p->idwt_height = h = CALC_PADDING(p->height, s->wavelet_depth);
831  p->idwt_stride = FFALIGN(p->idwt_width, 8);
832 
833  for (level = s->wavelet_depth-1; level >= 0; level--) {
834  w = w>>1;
835  h = h>>1;
836  for (orientation = !!level; orientation < 4; orientation++) {
837  SubBand *b = &p->band[level][orientation];
838 
839  b->ibuf = p->idwt_buf;
840  b->level = level;
841  b->stride = p->idwt_stride << (s->wavelet_depth - level);
842  b->width = w;
843  b->height = h;
844  b->orientation = orientation;
845 
846  if (orientation & 1)
847  b->ibuf += w;
848  if (orientation > 1)
849  b->ibuf += b->stride>>1;
850 
851  if (level)
852  b->parent = &p->band[level-1][orientation];
853  }
854  }
855 
856  if (i > 0) {
857  p->xblen = s->plane[0].xblen >> s->chroma_x_shift;
858  p->yblen = s->plane[0].yblen >> s->chroma_y_shift;
859  p->xbsep = s->plane[0].xbsep >> s->chroma_x_shift;
860  p->ybsep = s->plane[0].ybsep >> s->chroma_y_shift;
861  }
862 
863  p->xoffset = (p->xblen - p->xbsep)/2;
864  p->yoffset = (p->yblen - p->ybsep)/2;
865  }
866 }
867 
868 /**
869  * Unpack the motion compensation parameters
870  * Dirac Specification ->
871  * 11.2 Picture prediction data. picture_prediction()
872  */
874 {
875  static const uint8_t default_blen[] = { 4, 12, 16, 24 };
876 
877  GetBitContext *gb = &s->gb;
878  unsigned idx, ref;
879 
880  align_get_bits(gb);
881  /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */
882  /* Luma and Chroma are equal. 11.2.3 */
883  idx = svq3_get_ue_golomb(gb); /* [DIRAC_STD] index */
884 
885  if (idx > 4) {
886  av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n");
887  return AVERROR_INVALIDDATA;
888  }
889 
890  if (idx == 0) {
891  s->plane[0].xblen = svq3_get_ue_golomb(gb);
892  s->plane[0].yblen = svq3_get_ue_golomb(gb);
893  s->plane[0].xbsep = svq3_get_ue_golomb(gb);
894  s->plane[0].ybsep = svq3_get_ue_golomb(gb);
895  } else {
896  /*[DIRAC_STD] preset_block_params(index). Table 11.1 */
897  s->plane[0].xblen = default_blen[idx-1];
898  s->plane[0].yblen = default_blen[idx-1];
899  s->plane[0].xbsep = 4 * idx;
900  s->plane[0].ybsep = 4 * idx;
901  }
902  /*[DIRAC_STD] 11.2.4 motion_data_dimensions()
903  Calculated in function dirac_unpack_block_motion_data */
904 
905  if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
906  s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
907  !s->plane[0].xblen || !s->plane[0].yblen) {
909  "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
910  s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
911  return AVERROR_INVALIDDATA;
912  }
913  if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
914  av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
915  return AVERROR_INVALIDDATA;
916  }
917  if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) {
918  av_log(s->avctx, AV_LOG_ERROR, "Block separation greater than size\n");
919  return AVERROR_INVALIDDATA;
920  }
921  if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) {
922  av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n");
923  return AVERROR_PATCHWELCOME;
924  }
925 
926  /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision()
927  Read motion vector precision */
929  if (s->mv_precision > 3) {
930  av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n");
931  return AVERROR_INVALIDDATA;
932  }
933 
934  /*[DIRAC_STD] 11.2.6 Global motion. global_motion()
935  Read the global motion compensation parameters */
936  s->globalmc_flag = get_bits1(gb);
937  if (s->globalmc_flag) {
938  memset(s->globalmc, 0, sizeof(s->globalmc));
939  /* [DIRAC_STD] pan_tilt(gparams) */
940  for (ref = 0; ref < s->num_refs; ref++) {
941  if (get_bits1(gb)) {
942  s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb);
943  s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb);
944  }
945  /* [DIRAC_STD] zoom_rotate_shear(gparams)
946  zoom/rotation/shear parameters */
947  if (get_bits1(gb)) {
948  s->globalmc[ref].zrs_exp = svq3_get_ue_golomb(gb);
949  s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb);
950  s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb);
951  s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb);
952  s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb);
953  } else {
954  s->globalmc[ref].zrs[0][0] = 1;
955  s->globalmc[ref].zrs[1][1] = 1;
956  }
957  /* [DIRAC_STD] perspective(gparams) */
958  if (get_bits1(gb)) {
960  s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb);
961  s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb);
962  }
963  if (s->globalmc[ref].perspective_exp + (uint64_t)s->globalmc[ref].zrs_exp > 30) {
964  return AVERROR_INVALIDDATA;
965  }
966 
967  }
968  }
969 
970  /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode()
971  Picture prediction mode, not currently used. */
972  if (svq3_get_ue_golomb(gb)) {
973  av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n");
974  return AVERROR_INVALIDDATA;
975  }
976 
977  /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights()
978  just data read, weight calculation will be done later on. */
979  s->weight_log2denom = 1;
980  s->weight[0] = 1;
981  s->weight[1] = 1;
982 
983  if (get_bits1(gb)) {
985  s->weight[0] = dirac_get_se_golomb(gb);
986  if (s->num_refs == 2)
987  s->weight[1] = dirac_get_se_golomb(gb);
988  }
989  return 0;
990 }
991 
992 /**
993  * Dirac Specification ->
994  * 11.3 Wavelet transform data. wavelet_transform()
995  */
997 {
998  GetBitContext *gb = &s->gb;
999  int i, level;
1000  unsigned tmp;
1001 
1002 #define CHECKEDREAD(dst, cond, errmsg) \
1003  tmp = svq3_get_ue_golomb(gb); \
1004  if (cond) { \
1005  av_log(s->avctx, AV_LOG_ERROR, errmsg); \
1006  return AVERROR_INVALIDDATA; \
1007  }\
1008  dst = tmp;
1009 
1010  align_get_bits(gb);
1011 
1012  s->zero_res = s->num_refs ? get_bits1(gb) : 0;
1013  if (s->zero_res)
1014  return 0;
1015 
1016  /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */
1017  CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n")
1018 
1019  CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n")
1020 
1021  if (!s->low_delay) {
1022  /* Codeblock parameters (core syntax only) */
1023  if (get_bits1(gb)) {
1024  for (i = 0; i <= s->wavelet_depth; i++) {
1025  CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
1026  CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
1027  }
1028 
1029  CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
1030  } else
1031  for (i = 0; i <= s->wavelet_depth; i++)
1032  s->codeblock[i].width = s->codeblock[i].height = 1;
1033  } else {
1034  /* Slice parameters + quantization matrix*/
1035  /*[DIRAC_STD] 11.3.4 Slice coding Parameters (low delay syntax only). slice_parameters() */
1038  if (s->lowdelay.num_x * s->lowdelay.num_y == 0 ||
1039  s->lowdelay.num_x * (uint64_t)s->lowdelay.num_y > INT_MAX) {
1040  av_log(s->avctx,AV_LOG_ERROR,"Invalid numx/y\n");
1041  s->lowdelay.num_x = s->lowdelay.num_y = 0;
1042  return AVERROR_INVALIDDATA;
1043  }
1044 
1047 
1048  if (s->lowdelay.bytes.den <= 0) {
1049  av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n");
1050  return AVERROR_INVALIDDATA;
1051  }
1052 
1053  /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */
1054  if (get_bits1(gb)) {
1055  av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n");
1056  /* custom quantization matrix */
1057  s->lowdelay.quant[0][0] = svq3_get_ue_golomb(gb);
1058  for (level = 0; level < s->wavelet_depth; level++) {
1059  s->lowdelay.quant[level][1] = svq3_get_ue_golomb(gb);
1060  s->lowdelay.quant[level][2] = svq3_get_ue_golomb(gb);
1061  s->lowdelay.quant[level][3] = svq3_get_ue_golomb(gb);
1062  }
1063  } else {
1064  if (s->wavelet_depth > 4) {
1065  av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth);
1066  return AVERROR_INVALIDDATA;
1067  }
1068  /* default quantization matrix */
1069  for (level = 0; level < s->wavelet_depth; level++)
1070  for (i = 0; i < 4; i++) {
1071  s->lowdelay.quant[level][i] = default_qmat[s->wavelet_idx][level][i];
1072  /* haar with no shift differs for different depths */
1073  if (s->wavelet_idx == 3)
1074  s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level);
1075  }
1076  }
1077  }
1078  return 0;
1079 }
1080 
1081 static inline int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
1082 {
1083  static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
1084 
1085  if (!(x|y))
1086  return 0;
1087  else if (!y)
1088  return sbsplit[-1];
1089  else if (!x)
1090  return sbsplit[-stride];
1091 
1092  return avgsplit[sbsplit[-1] + sbsplit[-stride] + sbsplit[-stride-1]];
1093 }
1094 
1095 static inline int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
1096 {
1097  int pred;
1098 
1099  if (!(x|y))
1100  return 0;
1101  else if (!y)
1102  return block[-1].ref & refmask;
1103  else if (!x)
1104  return block[-stride].ref & refmask;
1105 
1106  /* return the majority */
1107  pred = (block[-1].ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].ref & refmask);
1108  return (pred >> 1) & refmask;
1109 }
1110 
1111 static inline void pred_block_dc(DiracBlock *block, int stride, int x, int y)
1112 {
1113  int i, n = 0;
1114 
1115  memset(block->u.dc, 0, sizeof(block->u.dc));
1116 
1117  if (x && !(block[-1].ref & 3)) {
1118  for (i = 0; i < 3; i++)
1119  block->u.dc[i] += block[-1].u.dc[i];
1120  n++;
1121  }
1122 
1123  if (y && !(block[-stride].ref & 3)) {
1124  for (i = 0; i < 3; i++)
1125  block->u.dc[i] += block[-stride].u.dc[i];
1126  n++;
1127  }
1128 
1129  if (x && y && !(block[-1-stride].ref & 3)) {
1130  for (i = 0; i < 3; i++)
1131  block->u.dc[i] += block[-1-stride].u.dc[i];
1132  n++;
1133  }
1134 
1135  if (n == 2) {
1136  for (i = 0; i < 3; i++)
1137  block->u.dc[i] = (block->u.dc[i]+1)>>1;
1138  } else if (n == 3) {
1139  for (i = 0; i < 3; i++)
1140  block->u.dc[i] = divide3(block->u.dc[i]);
1141  }
1142 }
1143 
1144 static inline void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
1145 {
1146  int16_t *pred[3];
1147  int refmask = ref+1;
1148  int mask = refmask | DIRAC_REF_MASK_GLOBAL; /* exclude gmc blocks */
1149  int n = 0;
1150 
1151  if (x && (block[-1].ref & mask) == refmask)
1152  pred[n++] = block[-1].u.mv[ref];
1153 
1154  if (y && (block[-stride].ref & mask) == refmask)
1155  pred[n++] = block[-stride].u.mv[ref];
1156 
1157  if (x && y && (block[-stride-1].ref & mask) == refmask)
1158  pred[n++] = block[-stride-1].u.mv[ref];
1159 
1160  switch (n) {
1161  case 0:
1162  block->u.mv[ref][0] = 0;
1163  block->u.mv[ref][1] = 0;
1164  break;
1165  case 1:
1166  block->u.mv[ref][0] = pred[0][0];
1167  block->u.mv[ref][1] = pred[0][1];
1168  break;
1169  case 2:
1170  block->u.mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
1171  block->u.mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
1172  break;
1173  case 3:
1174  block->u.mv[ref][0] = mid_pred(pred[0][0], pred[1][0], pred[2][0]);
1175  block->u.mv[ref][1] = mid_pred(pred[0][1], pred[1][1], pred[2][1]);
1176  break;
1177  }
1178 }
1179 
1180 static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
1181 {
1182  int ez = s->globalmc[ref].zrs_exp;
1183  int ep = s->globalmc[ref].perspective_exp;
1184  int (*A)[2] = s->globalmc[ref].zrs;
1185  int *b = s->globalmc[ref].pan_tilt;
1186  int *c = s->globalmc[ref].perspective;
1187 
1188  int m = (1<<ep) - (c[0]*x + c[1]*y);
1189  int64_t mx = m * (int64_t)((A[0][0] * x + A[0][1]*y) + (1<<ez) * b[0]);
1190  int64_t my = m * (int64_t)((A[1][0] * x + A[1][1]*y) + (1<<ez) * b[1]);
1191 
1192  block->u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
1193  block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
1194 }
1195 
1197  int stride, int x, int y)
1198 {
1199  int i;
1200 
1201  block->ref = pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_REF1);
1202  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF1);
1203 
1204  if (s->num_refs == 2) {
1205  block->ref |= pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_REF2);
1206  block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF2) << 1;
1207  }
1208 
1209  if (!block->ref) {
1210  pred_block_dc(block, stride, x, y);
1211  for (i = 0; i < 3; i++)
1212  block->u.dc[i] += (unsigned)dirac_get_arith_int(arith+1+i, CTX_DC_F1, CTX_DC_DATA);
1213  return;
1214  }
1215 
1216  if (s->globalmc_flag) {
1217  block->ref |= pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_GLOBAL);
1218  block->ref ^= dirac_get_arith_bit(arith, CTX_GLOBAL_BLOCK) << 2;
1219  }
1220 
1221  for (i = 0; i < s->num_refs; i++)
1222  if (block->ref & (i+1)) {
1223  if (block->ref & DIRAC_REF_MASK_GLOBAL) {
1224  global_mv(s, block, x, y, i);
1225  } else {
1226  pred_mv(block, stride, x, y, i);
1227  block->u.mv[i][0] += (unsigned)dirac_get_arith_int(arith + 4 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1228  block->u.mv[i][1] += (unsigned)dirac_get_arith_int(arith + 5 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
1229  }
1230  }
1231 }
1232 
1233 /**
1234  * Copies the current block to the other blocks covered by the current superblock split mode
1235  */
1237 {
1238  int x, y;
1239  DiracBlock *dst = block;
1240 
1241  for (x = 1; x < size; x++)
1242  dst[x] = *block;
1243 
1244  for (y = 1; y < size; y++) {
1245  dst += stride;
1246  for (x = 0; x < size; x++)
1247  dst[x] = *block;
1248  }
1249 }
1250 
1251 /**
1252  * Dirac Specification ->
1253  * 12. Block motion data syntax
1254  */
1256 {
1257  GetBitContext *gb = &s->gb;
1258  uint8_t *sbsplit = s->sbsplit;
1259  int i, x, y, q, p;
1260  DiracArith arith[8];
1261 
1262  align_get_bits(gb);
1263 
1264  /* [DIRAC_STD] 11.2.4 and 12.2.1 Number of blocks and superblocks */
1265  s->sbwidth = DIVRNDUP(s->source.width, 4*s->plane[0].xbsep);
1266  s->sbheight = DIVRNDUP(s->source.height, 4*s->plane[0].ybsep);
1267  s->blwidth = 4 * s->sbwidth;
1268  s->blheight = 4 * s->sbheight;
1269 
1270  /* [DIRAC_STD] 12.3.1 Superblock splitting modes. superblock_split_modes()
1271  decode superblock split modes */
1272  ff_dirac_init_arith_decoder(arith, gb, svq3_get_ue_golomb(gb)); /* svq3_get_ue_golomb(gb) is the length */
1273  for (y = 0; y < s->sbheight; y++) {
1274  for (x = 0; x < s->sbwidth; x++) {
1275  unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
1276  if (split > 2)
1277  return AVERROR_INVALIDDATA;
1278  sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
1279  }
1280  sbsplit += s->sbwidth;
1281  }
1282 
1283  /* setup arith decoding */
1285  for (i = 0; i < s->num_refs; i++) {
1286  ff_dirac_init_arith_decoder(arith + 4 + 2 * i, gb, svq3_get_ue_golomb(gb));
1287  ff_dirac_init_arith_decoder(arith + 5 + 2 * i, gb, svq3_get_ue_golomb(gb));
1288  }
1289  for (i = 0; i < 3; i++)
1290  ff_dirac_init_arith_decoder(arith+1+i, gb, svq3_get_ue_golomb(gb));
1291 
1292  for (y = 0; y < s->sbheight; y++)
1293  for (x = 0; x < s->sbwidth; x++) {
1294  int blkcnt = 1 << s->sbsplit[y * s->sbwidth + x];
1295  int step = 4 >> s->sbsplit[y * s->sbwidth + x];
1296 
1297  for (q = 0; q < blkcnt; q++)
1298  for (p = 0; p < blkcnt; p++) {
1299  int bx = 4 * x + p*step;
1300  int by = 4 * y + q*step;
1301  DiracBlock *block = &s->blmotion[by*s->blwidth + bx];
1302  decode_block_params(s, arith, block, s->blwidth, bx, by);
1303  propagate_block_data(block, s->blwidth, step);
1304  }
1305  }
1306 
1307  return 0;
1308 }
1309 
1310 static int weight(int i, int blen, int offset)
1311 {
1312 #define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
1313  (1 + (6*(i) + offset - 1) / (2*offset - 1))
1314 
1315  if (i < 2*offset)
1316  return ROLLOFF(i);
1317  else if (i > blen-1 - 2*offset)
1318  return ROLLOFF(blen-1 - i);
1319  return 8;
1320 }
1321 
1322 static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride,
1323  int left, int right, int wy)
1324 {
1325  int x;
1326  for (x = 0; left && x < p->xblen >> 1; x++)
1327  obmc_weight[x] = wy*8;
1328  for (; x < p->xblen >> right; x++)
1329  obmc_weight[x] = wy*weight(x, p->xblen, p->xoffset);
1330  for (; x < p->xblen; x++)
1331  obmc_weight[x] = wy*8;
1332  for (; x < stride; x++)
1333  obmc_weight[x] = 0;
1334 }
1335 
1336 static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride,
1337  int left, int right, int top, int bottom)
1338 {
1339  int y;
1340  for (y = 0; top && y < p->yblen >> 1; y++) {
1341  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1342  obmc_weight += stride;
1343  }
1344  for (; y < p->yblen >> bottom; y++) {
1345  int wy = weight(y, p->yblen, p->yoffset);
1346  init_obmc_weight_row(p, obmc_weight, stride, left, right, wy);
1347  obmc_weight += stride;
1348  }
1349  for (; y < p->yblen; y++) {
1350  init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
1351  obmc_weight += stride;
1352  }
1353 }
1354 
1355 static void init_obmc_weights(DiracContext *s, Plane *p, int by)
1356 {
1357  int top = !by;
1358  int bottom = by == s->blheight-1;
1359 
1360  /* don't bother re-initing for rows 2 to blheight-2, the weights don't change */
1361  if (top || bottom || by == 1) {
1362  init_obmc_weight(p, s->obmc_weight[0], MAX_BLOCKSIZE, 1, 0, top, bottom);
1363  init_obmc_weight(p, s->obmc_weight[1], MAX_BLOCKSIZE, 0, 0, top, bottom);
1364  init_obmc_weight(p, s->obmc_weight[2], MAX_BLOCKSIZE, 0, 1, top, bottom);
1365  }
1366 }
1367 
1368 static const uint8_t epel_weights[4][4][4] = {
1369  {{ 16, 0, 0, 0 },
1370  { 12, 4, 0, 0 },
1371  { 8, 8, 0, 0 },
1372  { 4, 12, 0, 0 }},
1373  {{ 12, 0, 4, 0 },
1374  { 9, 3, 3, 1 },
1375  { 6, 6, 2, 2 },
1376  { 3, 9, 1, 3 }},
1377  {{ 8, 0, 8, 0 },
1378  { 6, 2, 6, 2 },
1379  { 4, 4, 4, 4 },
1380  { 2, 6, 2, 6 }},
1381  {{ 4, 0, 12, 0 },
1382  { 3, 1, 9, 3 },
1383  { 2, 2, 6, 6 },
1384  { 1, 3, 3, 9 }}
1385 };
1386 
1387 /**
1388  * For block x,y, determine which of the hpel planes to do bilinear
1389  * interpolation from and set src[] to the location in each hpel plane
1390  * to MC from.
1391  *
1392  * @return the index of the put_dirac_pixels_tab function to use
1393  * 0 for 1 plane (fpel,hpel), 1 for 2 planes (qpel), 2 for 4 planes (qpel), and 3 for epel
1394  */
1396  int x, int y, int ref, int plane)
1397 {
1398  Plane *p = &s->plane[plane];
1399  uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane];
1400  int motion_x = block->u.mv[ref][0];
1401  int motion_y = block->u.mv[ref][1];
1402  int mx, my, i, epel, nplanes = 0;
1403 
1404  if (plane) {
1405  motion_x >>= s->chroma_x_shift;
1406  motion_y >>= s->chroma_y_shift;
1407  }
1408 
1409  mx = motion_x & ~(-1U << s->mv_precision);
1410  my = motion_y & ~(-1U << s->mv_precision);
1411  motion_x >>= s->mv_precision;
1412  motion_y >>= s->mv_precision;
1413  /* normalize subpel coordinates to epel */
1414  /* TODO: template this function? */
1415  mx <<= 3 - s->mv_precision;
1416  my <<= 3 - s->mv_precision;
1417 
1418  x += motion_x;
1419  y += motion_y;
1420  epel = (mx|my)&1;
1421 
1422  /* hpel position */
1423  if (!((mx|my)&3)) {
1424  nplanes = 1;
1425  src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x;
1426  } else {
1427  /* qpel or epel */
1428  nplanes = 4;
1429  for (i = 0; i < 4; i++)
1430  src[i] = ref_hpel[i] + y*p->stride + x;
1431 
1432  /* if we're interpolating in the right/bottom halves, adjust the planes as needed
1433  we increment x/y because the edge changes for half of the pixels */
1434  if (mx > 4) {
1435  src[0] += 1;
1436  src[2] += 1;
1437  x++;
1438  }
1439  if (my > 4) {
1440  src[0] += p->stride;
1441  src[1] += p->stride;
1442  y++;
1443  }
1444 
1445  /* hpel planes are:
1446  [0]: F [1]: H
1447  [2]: V [3]: C */
1448  if (!epel) {
1449  /* check if we really only need 2 planes since either mx or my is
1450  a hpel position. (epel weights of 0 handle this there) */
1451  if (!(mx&3)) {
1452  /* mx == 0: average [0] and [2]
1453  mx == 4: average [1] and [3] */
1454  src[!mx] = src[2 + !!mx];
1455  nplanes = 2;
1456  } else if (!(my&3)) {
1457  src[0] = src[(my>>1) ];
1458  src[1] = src[(my>>1)+1];
1459  nplanes = 2;
1460  }
1461  } else {
1462  /* adjust the ordering if needed so the weights work */
1463  if (mx > 4) {
1464  FFSWAP(const uint8_t *, src[0], src[1]);
1465  FFSWAP(const uint8_t *, src[2], src[3]);
1466  }
1467  if (my > 4) {
1468  FFSWAP(const uint8_t *, src[0], src[2]);
1469  FFSWAP(const uint8_t *, src[1], src[3]);
1470  }
1471  src[4] = epel_weights[my&3][mx&3];
1472  }
1473  }
1474 
1475  /* fixme: v/h _edge_pos */
1476  if (x + p->xblen > p->width +EDGE_WIDTH/2 ||
1477  y + p->yblen > p->height+EDGE_WIDTH/2 ||
1478  x < 0 || y < 0) {
1479  for (i = 0; i < nplanes; i++) {
1480  s->vdsp.emulated_edge_mc(s->edge_emu_buffer[i], src[i],
1481  p->stride, p->stride,
1482  p->xblen, p->yblen, x, y,
1483  p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2);
1484  src[i] = s->edge_emu_buffer[i];
1485  }
1486  }
1487  return (nplanes>>1) + epel;
1488 }
1489 
1490 static void add_dc(uint16_t *dst, int dc, int stride,
1491  uint8_t *obmc_weight, int xblen, int yblen)
1492 {
1493  int x, y;
1494  dc += 128;
1495 
1496  for (y = 0; y < yblen; y++) {
1497  for (x = 0; x < xblen; x += 2) {
1498  dst[x ] += dc * obmc_weight[x ];
1499  dst[x+1] += dc * obmc_weight[x+1];
1500  }
1501  dst += stride;
1502  obmc_weight += MAX_BLOCKSIZE;
1503  }
1504 }
1505 
1507  uint16_t *mctmp, uint8_t *obmc_weight,
1508  int plane, int dstx, int dsty)
1509 {
1510  Plane *p = &s->plane[plane];
1511  const uint8_t *src[5];
1512  int idx;
1513 
1514  switch (block->ref&3) {
1515  case 0: /* DC */
1516  add_dc(mctmp, block->u.dc[plane], p->stride, obmc_weight, p->xblen, p->yblen);
1517  return;
1518  case 1:
1519  case 2:
1520  idx = mc_subpel(s, block, src, dstx, dsty, (block->ref&3)-1, plane);
1521  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1522  if (s->weight_func)
1524  s->weight[0] + s->weight[1], p->yblen);
1525  break;
1526  case 3:
1527  idx = mc_subpel(s, block, src, dstx, dsty, 0, plane);
1528  s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1529  idx = mc_subpel(s, block, src, dstx, dsty, 1, plane);
1530  if (s->biweight_func) {
1531  /* fixme: +32 is a quick hack */
1532  s->put_pixels_tab[idx](s->mcscratch + 32, src, p->stride, p->yblen);
1534  s->weight[0], s->weight[1], p->yblen);
1535  } else
1536  s->avg_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
1537  break;
1538  }
1539  s->add_obmc(mctmp, s->mcscratch, p->stride, obmc_weight, p->yblen);
1540 }
1541 
1542 static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
1543 {
1544  Plane *p = &s->plane[plane];
1545  int x, dstx = p->xbsep - p->xoffset;
1546 
1547  block_mc(s, block, mctmp, s->obmc_weight[0], plane, -p->xoffset, dsty);
1548  mctmp += p->xbsep;
1549 
1550  for (x = 1; x < s->blwidth-1; x++) {
1551  block_mc(s, block+x, mctmp, s->obmc_weight[1], plane, dstx, dsty);
1552  dstx += p->xbsep;
1553  mctmp += p->xbsep;
1554  }
1555  block_mc(s, block+x, mctmp, s->obmc_weight[2], plane, dstx, dsty);
1556 }
1557 
1558 static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
1559 {
1560  int idx = 0;
1561  if (xblen > 8)
1562  idx = 1;
1563  if (xblen > 16)
1564  idx = 2;
1565 
1566  memcpy(s->put_pixels_tab, s->diracdsp.put_dirac_pixels_tab[idx], sizeof(s->put_pixels_tab));
1567  memcpy(s->avg_pixels_tab, s->diracdsp.avg_dirac_pixels_tab[idx], sizeof(s->avg_pixels_tab));
1568  s->add_obmc = s->diracdsp.add_dirac_obmc[idx];
1569  if (s->weight_log2denom > 1 || s->weight[0] != 1 || s->weight[1] != 1) {
1572  } else {
1573  s->weight_func = NULL;
1574  s->biweight_func = NULL;
1575  }
1576 }
1577 
1579 {
1580  /* chroma allocates an edge of 8 when subsampled
1581  which for 4:2:2 means an h edge of 16 and v edge of 8
1582  just use 8 for everything for the moment */
1583  int i, edge = EDGE_WIDTH/2;
1584 
1585  ref->hpel[plane][0] = ref->avframe->data[plane];
1586  s->mpvencdsp.draw_edges(ref->hpel[plane][0], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
1587 
1588  /* no need for hpel if we only have fpel vectors */
1589  if (!s->mv_precision)
1590  return 0;
1591 
1592  for (i = 1; i < 4; i++) {
1593  if (!ref->hpel_base[plane][i])
1594  ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
1595  if (!ref->hpel_base[plane][i]) {
1596  return AVERROR(ENOMEM);
1597  }
1598  /* we need to be 16-byte aligned even for chroma */
1599  ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
1600  }
1601 
1602  if (!ref->interpolated[plane]) {
1603  s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
1604  ref->hpel[plane][3], ref->hpel[plane][0],
1605  ref->avframe->linesize[plane], width, height);
1606  s->mpvencdsp.draw_edges(ref->hpel[plane][1], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1607  s->mpvencdsp.draw_edges(ref->hpel[plane][2], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1608  s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
1609  }
1610  ref->interpolated[plane] = 1;
1611 
1612  return 0;
1613 }
1614 
1615 /**
1616  * Dirac Specification ->
1617  * 13.0 Transform data syntax. transform_data()
1618  */
1620 {
1621  DWTContext d;
1622  int y, i, comp, dsty;
1623  int ret;
1624 
1625  if (s->low_delay) {
1626  /* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
1627  for (comp = 0; comp < 3; comp++) {
1628  Plane *p = &s->plane[comp];
1629  memset(p->idwt_buf, 0, p->idwt_stride * p->idwt_height * sizeof(IDWTELEM));
1630  }
1631  if (!s->zero_res) {
1632  if ((ret = decode_lowdelay(s)) < 0)
1633  return ret;
1634  }
1635  }
1636 
1637  for (comp = 0; comp < 3; comp++) {
1638  Plane *p = &s->plane[comp];
1640 
1641  /* FIXME: small resolutions */
1642  for (i = 0; i < 4; i++)
1643  s->edge_emu_buffer[i] = s->edge_emu_buffer_base + i*FFALIGN(p->width, 16);
1644 
1645  if (!s->zero_res && !s->low_delay)
1646  {
1647  memset(p->idwt_buf, 0, p->idwt_stride * p->idwt_height * sizeof(IDWTELEM));
1648  decode_component(s, comp); /* [DIRAC_STD] 13.4.1 core_transform_data() */
1649  }
1651  s->wavelet_idx+2, s->wavelet_depth, p->idwt_tmp);
1652  if (ret < 0)
1653  return ret;
1654 
1655  if (!s->num_refs) { /* intra */
1656  for (y = 0; y < p->height; y += 16) {
1657  ff_spatial_idwt_slice2(&d, y+16); /* decode */
1658  s->diracdsp.put_signed_rect_clamped(frame + y*p->stride, p->stride,
1659  p->idwt_buf + y*p->idwt_stride, p->idwt_stride, p->width, 16);
1660  }
1661  } else { /* inter */
1662  int rowheight = p->ybsep*p->stride;
1663 
1664  select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
1665 
1666  for (i = 0; i < s->num_refs; i++) {
1667  int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
1668  if (ret < 0)
1669  return ret;
1670  }
1671 
1672  memset(s->mctmp, 0, 4*p->yoffset*p->stride);
1673 
1674  dsty = -p->yoffset;
1675  for (y = 0; y < s->blheight; y++) {
1676  int h = 0,
1677  start = FFMAX(dsty, 0);
1678  uint16_t *mctmp = s->mctmp + y*rowheight;
1679  DiracBlock *blocks = s->blmotion + y*s->blwidth;
1680 
1681  init_obmc_weights(s, p, y);
1682 
1683  if (y == s->blheight-1 || start+p->ybsep > p->height)
1684  h = p->height - start;
1685  else
1686  h = p->ybsep - (start - dsty);
1687  if (h < 0)
1688  break;
1689 
1690  memset(mctmp+2*p->yoffset*p->stride, 0, 2*rowheight);
1691  mc_row(s, blocks, mctmp, comp, dsty);
1692 
1693  mctmp += (start - dsty)*p->stride + p->xoffset;
1694  ff_spatial_idwt_slice2(&d, start + h); /* decode */
1695  s->diracdsp.add_rect_clamped(frame + start*p->stride, mctmp, p->stride,
1696  p->idwt_buf + start*p->idwt_stride, p->idwt_stride, p->width, h);
1697 
1698  dsty += p->ybsep;
1699  }
1700  }
1701  }
1702 
1703 
1704  return 0;
1705 }
1706 
1708 {
1709  int ret, i;
1710  int chroma_x_shift, chroma_y_shift;
1711  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift, &chroma_y_shift);
1712 
1713  f->width = avctx->width + 2 * EDGE_WIDTH;
1714  f->height = avctx->height + 2 * EDGE_WIDTH + 2;
1715  ret = ff_get_buffer(avctx, f, flags);
1716  if (ret < 0)
1717  return ret;
1718 
1719  for (i = 0; f->data[i]; i++) {
1720  int offset = (EDGE_WIDTH >> (i && i<3 ? chroma_y_shift : 0)) *
1721  f->linesize[i] + 32;
1722  f->data[i] += offset;
1723  }
1724  f->width = avctx->width;
1725  f->height = avctx->height;
1726 
1727  return 0;
1728 }
1729 
1730 /**
1731  * Dirac Specification ->
1732  * 11.1.1 Picture Header. picture_header()
1733  */
1735 {
1736  unsigned retire, picnum;
1737  int i, j, ret;
1738  int64_t refdist, refnum;
1739  GetBitContext *gb = &s->gb;
1740 
1741  /* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
1743 
1744 
1745  av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
1746 
1747  /* if this is the first keyframe after a sequence header, start our
1748  reordering from here */
1749  if (s->frame_number < 0)
1750  s->frame_number = picnum;
1751 
1752  s->ref_pics[0] = s->ref_pics[1] = NULL;
1753  for (i = 0; i < s->num_refs; i++) {
1754  refnum = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
1755  refdist = INT64_MAX;
1756 
1757  /* find the closest reference to the one we want */
1758  /* Jordi: this is needed if the referenced picture hasn't yet arrived */
1759  for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
1760  if (s->ref_frames[j]
1761  && FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum) < refdist) {
1762  s->ref_pics[i] = s->ref_frames[j];
1763  refdist = FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum);
1764  }
1765 
1766  if (!s->ref_pics[i] || refdist)
1767  av_log(s->avctx, AV_LOG_DEBUG, "Reference not found\n");
1768 
1769  /* if there were no references at all, allocate one */
1770  if (!s->ref_pics[i])
1771  for (j = 0; j < MAX_FRAMES; j++)
1772  if (!s->all_frames[j].avframe->data[0]) {
1773  s->ref_pics[i] = &s->all_frames[j];
1775  if (ret < 0)
1776  return ret;
1777  break;
1778  }
1779 
1780  if (!s->ref_pics[i]) {
1781  av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
1782  return AVERROR_INVALIDDATA;
1783  }
1784 
1785  }
1786 
1787  /* retire the reference frames that are not used anymore */
1788  if (s->current_picture->reference) {
1789  retire = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
1790  if (retire != picnum) {
1791  DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
1792 
1793  if (retire_pic)
1794  retire_pic->reference &= DELAYED_PIC_REF;
1795  else
1796  av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
1797  }
1798 
1799  /* if reference array is full, remove the oldest as per the spec */
1801  av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
1803  }
1804  }
1805 
1806  if (s->num_refs) {
1807  ret = dirac_unpack_prediction_parameters(s); /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
1808  if (ret < 0)
1809  return ret;
1810  ret = dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
1811  if (ret < 0)
1812  return ret;
1813  }
1814  ret = dirac_unpack_idwt_params(s); /* [DIRAC_STD] 11.3 Wavelet transform data */
1815  if (ret < 0)
1816  return ret;
1817 
1818  init_planes(s);
1819  return 0;
1820 }
1821 
1822 static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
1823 {
1824  DiracFrame *out = s->delay_frames[0];
1825  int i, out_idx = 0;
1826  int ret;
1827 
1828  /* find frame with lowest picture number */
1829  for (i = 1; s->delay_frames[i]; i++)
1831  out = s->delay_frames[i];
1832  out_idx = i;
1833  }
1834 
1835  for (i = out_idx; s->delay_frames[i]; i++)
1836  s->delay_frames[i] = s->delay_frames[i+1];
1837 
1838  if (out) {
1839  out->reference ^= DELAYED_PIC_REF;
1840  if((ret = av_frame_ref(picture, out->avframe)) < 0)
1841  return ret;
1842  *got_frame = 1;
1843  }
1844 
1845  return 0;
1846 }
1847 
1848 /**
1849  * Dirac Specification ->
1850  * 9.6 Parse Info Header Syntax. parse_info()
1851  * 4 byte start code + byte parse code + 4 byte size + 4 byte previous size
1852  */
1853 #define DATA_UNIT_HEADER_SIZE 13
1854 
1855 /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3
1856  inside the function parse_sequence() */
1857 static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
1858 {
1859  DiracContext *s = avctx->priv_data;
1860  DiracFrame *pic = NULL;
1861  int ret, i, parse_code;
1862  unsigned tmp;
1863 
1864  if (size < DATA_UNIT_HEADER_SIZE)
1865  return AVERROR_INVALIDDATA;
1866 
1867  parse_code = buf[4];
1868 
1869  init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE));
1870 
1871  if (parse_code == pc_seq_header) {
1872  if (s->seen_sequence_header)
1873  return 0;
1874 
1875  /* [DIRAC_STD] 10. Sequence header */
1876  ret = avpriv_dirac_parse_sequence_header(avctx, &s->gb, &s->source);
1877  if (ret < 0)
1878  return ret;
1879 
1881 
1882  ret = alloc_sequence_buffers(s);
1883  if (ret < 0)
1884  return ret;
1885 
1886  s->seen_sequence_header = 1;
1887  } else if (parse_code == pc_eos) { /* [DIRAC_STD] End of Sequence */
1889  s->seen_sequence_header = 0;
1890  } else if (parse_code == pc_aux_data) {
1891  if (buf[13] == 1) { /* encoder implementation/version */
1892  int ver[3];
1893  /* versions older than 1.0.8 don't store quant delta for
1894  subbands with only one codeblock */
1895  if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
1896  if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
1897  s->old_delta_quant = 1;
1898  }
1899  } else if (parse_code & 0x8) { /* picture data unit */
1900  if (!s->seen_sequence_header) {
1901  av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n");
1902  return AVERROR_INVALIDDATA;
1903  }
1904 
1905  /* find an unused frame */
1906  for (i = 0; i < MAX_FRAMES; i++)
1907  if (s->all_frames[i].avframe->data[0] == NULL)
1908  pic = &s->all_frames[i];
1909  if (!pic) {
1910  av_log(avctx, AV_LOG_ERROR, "framelist full\n");
1911  return AVERROR_INVALIDDATA;
1912  }
1913 
1914  av_frame_unref(pic->avframe);
1915 
1916  /* [DIRAC_STD] Defined in 9.6.1 ... */
1917  tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
1918  if (tmp > 2) {
1919  av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n");
1920  return AVERROR_INVALIDDATA;
1921  }
1922  s->num_refs = tmp;
1923  s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
1924  s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
1925  pic->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
1926  pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
1927  pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
1928 
1929  if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
1930  return ret;
1931  s->current_picture = pic;
1932  s->plane[0].stride = pic->avframe->linesize[0];
1933  s->plane[1].stride = pic->avframe->linesize[1];
1934  s->plane[2].stride = pic->avframe->linesize[2];
1935 
1936  if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
1937  return AVERROR(ENOMEM);
1938 
1939  /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
1940  ret = dirac_decode_picture_header(s);
1941  if (ret < 0)
1942  return ret;
1943 
1944  /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */
1945  ret = dirac_decode_frame_internal(s);
1946  if (ret < 0)
1947  return ret;
1948  }
1949  return 0;
1950 }
1951 
1952 static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
1953 {
1954  DiracContext *s = avctx->priv_data;
1955  AVFrame *picture = data;
1956  uint8_t *buf = pkt->data;
1957  int buf_size = pkt->size;
1958  int i, buf_idx = 0;
1959  int ret;
1960  unsigned data_unit_size;
1961 
1962  /* release unused frames */
1963  for (i = 0; i < MAX_FRAMES; i++)
1964  if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].reference) {
1966  memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
1967  }
1968 
1969  s->current_picture = NULL;
1970  *got_frame = 0;
1971 
1972  /* end of stream, so flush delayed pics */
1973  if (buf_size == 0)
1974  return get_delayed_pic(s, (AVFrame *)data, got_frame);
1975 
1976  for (;;) {
1977  /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
1978  [DIRAC_STD] PARSE_INFO_PREFIX = "BBCD" as defined in ISO/IEC 646
1979  BBCD start code search */
1980  for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) {
1981  if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' &&
1982  buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D')
1983  break;
1984  }
1985  /* BBCD found or end of data */
1986  if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size)
1987  break;
1988 
1989  data_unit_size = AV_RB32(buf+buf_idx+5);
1990  if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
1991  if(data_unit_size > buf_size - buf_idx)
1993  "Data unit with size %d is larger than input buffer, discarding\n",
1994  data_unit_size);
1995  buf_idx += 4;
1996  continue;
1997  }
1998  /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */
1999  ret = dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size);
2000  if (ret < 0)
2001  {
2002  av_log(s->avctx, AV_LOG_ERROR,"Error in dirac_decode_data_unit\n");
2003  return ret;
2004  }
2005  buf_idx += data_unit_size;
2006  }
2007 
2008  if (!s->current_picture)
2009  return buf_size;
2010 
2012  DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
2013 
2015 
2017  int min_num = s->delay_frames[0]->avframe->display_picture_number;
2018  /* Too many delayed frames, so we display the frame with the lowest pts */
2019  av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
2020 
2021  for (i = 1; s->delay_frames[i]; i++)
2022  if (s->delay_frames[i]->avframe->display_picture_number < min_num)
2023  min_num = s->delay_frames[i]->avframe->display_picture_number;
2024 
2025  delayed_frame = remove_frame(s->delay_frames, min_num);
2027  }
2028 
2029  if (delayed_frame) {
2030  delayed_frame->reference ^= DELAYED_PIC_REF;
2031  if((ret=av_frame_ref(data, delayed_frame->avframe)) < 0)
2032  return ret;
2033  *got_frame = 1;
2034  }
2036  /* The right frame at the right time :-) */
2037  if((ret=av_frame_ref(data, s->current_picture->avframe)) < 0)
2038  return ret;
2039  *got_frame = 1;
2040  }
2041 
2042  if (*got_frame)
2043  s->frame_number = picture->display_picture_number + 1;
2044 
2045  return buf_idx;
2046 }
2047 
2049  .name = "dirac",
2050  .long_name = NULL_IF_CONFIG_SMALL("BBC Dirac VC-2"),
2051  .type = AVMEDIA_TYPE_VIDEO,
2052  .id = AV_CODEC_ID_DIRAC,
2053  .priv_data_size = sizeof(DiracContext),
2055  .close = dirac_decode_end,
2057  .capabilities = AV_CODEC_CAP_DELAY,
2059 };
#define CHECKEDREAD(dst, cond, errmsg)
int quant
Definition: diracdec.c:103
int plane
Definition: avisynth_c.h:291
void(* add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
Definition: diracdec.c:211
#define NULL
Definition: coverity.c:32
unsigned height
Definition: dirac.h:39
const char * s
Definition: avisynth_c.h:631
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int blheight
Definition: diracdec.c:193
static av_cold int dirac_decode_end(AVCodecContext *avctx)
Definition: diracdec.c:451
static void codeblock(DiracContext *s, SubBand *b, GetBitContext *gb, DiracArith *c, int left, int right, int top, int bottom, int blockcnt_one, int is_arith)
Decode the coeffs in the rectangle defined by left, right, top, bottom [DIRAC_STD] 13...
Definition: diracdec.c:514
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
dirac_weight_func weight_func
Definition: diracdec.c:212
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
struct DiracContext::@44 globalmc[2]
uint8_t * sbsplit
Definition: diracdec.c:197
#define CTX_SB_DATA
Definition: dirac_arith.h:66
#define CTX_PMODE_REF2
Definition: dirac_arith.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:261
DiracFrame * ref_frames[MAX_REFERENCE_FRAMES+1]
Definition: diracdec.c:218
static int divide3(int x)
Definition: diracdec.c:286
static int dirac_decode_frame_internal(DiracContext *s)
Dirac Specification -> 13.0 Transform data syntax.
Definition: diracdec.c:1619
#define SIGN_CTX(x)
Definition: diracdec.c:463
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:218
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static void propagate_block_data(DiracBlock *block, int stride, int size)
Copies the current block to the other blocks covered by the current superblock split mode...
Definition: diracdec.c:1236
dirac_weight_func weight_dirac_pixels_tab[3]
Definition: diracdsp.h:49
int num
numerator
Definition: rational.h:44
int idwt_width
Definition: diracdec.c:117
int size
Definition: avcodec.h:1434
const char * b
Definition: vf_curves.c:109
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:74
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
Definition: dirac_arith.c:86
unsigned width
Definition: diracdec.c:168
#define DATA_UNIT_HEADER_SIZE
Dirac Specification -> 9.6 Parse Info Header Syntax.
Definition: diracdec.c:1853
const uint8_t * buffer
Definition: get_bits.h:56
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:53
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
Definition: golomb.h:115
uint8_t yoffset
Definition: diracdec.c:132
static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
Definition: diracdec.c:1180
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1732
static const int qoffset_inter_tab[MAX_QUANT+1]
Definition: diracdec.c:274
static int dirac_get_arith_uint(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:170
static int alloc_buffers(DiracContext *s, int stride)
Definition: diracdec.c:358
mpegvideo header.
dirac_source_params source
Definition: diracdec.c:143
static void coeff_unpack_arith(DiracArith *c, int qfactor, int qoffset, SubBand *b, IDWTELEM *buf, int x, int y)
Definition: diracdec.c:465
unsigned height
Definition: diracdec.c:169
static void lowdelay_subband(DiracContext *s, GetBitContext *gb, int quant, int slice_x, int slice_y, int bits_end, SubBand *b1, SubBand *b2)
Definition: diracdec.c:685
static AVPacket pkt
#define EDGE_TOP
static void dirac_decode_flush(AVCodecContext *avctx)
Definition: diracdec.c:443
const uint8_t * coeff_data
Definition: diracdec.c:109
static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
Definition: diracdec.c:1822
static int dirac_unpack_idwt_params(DiracContext *s)
Dirac Specification -> 11.3 Wavelet transform data.
Definition: diracdec.c:996
#define DIRAC_REF_MASK_REF2
Definition: diracdec.c:67
AVCodec.
Definition: avcodec.h:3482
int zrs[2][2]
Definition: diracdec.c:181
unsigned codeblock_mode
Definition: diracdec.c:165
short IDWTELEM
Definition: dirac_dwt.h:27
int num_refs
Definition: diracdec.c:154
#define FFALIGN(x, a)
Definition: common.h:97
uint8_t xoffset
Definition: diracdec.c:131
unsigned weight_log2denom
Definition: diracdec.c:190
#define CTX_GLOBAL_BLOCK
Definition: dirac_arith.h:69
int width
Definition: diracdec.c:101
GetBitContext gb
Definition: diracdec.c:723
void(* put_signed_rect_clamped)(uint8_t *dst, int dst_stride, const int16_t *src, int src_stride, int width, int height)
Definition: diracdsp.h:44
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:882
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DiracFrame * delay_frames[MAX_DELAY+1]
Definition: diracdec.c:219
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
uint8_t * mcscratch
Definition: diracdec.c:204
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
void(* add_rect_clamped)(uint8_t *dst, const uint16_t *src, int stride, const int16_t *idwt, int idwt_stride, int width, int height)
Definition: diracdsp.h:46
uint8_t
#define av_cold
Definition: attributes.h:74
unsigned wavelet_idx
Definition: diracdec.c:158
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
Interface to Dirac Decoder/Encoder.
#define CTX_PMODE_REF1
Definition: dirac_arith.h:67
static int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
Definition: diracdec.c:497
#define DIVRNDUP(a, b)
Definition: diracdec.c:79
static av_cold int dirac_decode_init(AVCodecContext *avctx)
Definition: diracdec.c:419
#define AV_RB32
Definition: intreadwrite.h:130
uint8_t quant[MAX_DWT_LEVELS][4]
Definition: diracdec.c:176
unsigned num_x
Definition: diracdec.c:173
int low_delay
Definition: diracdec.c:152
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:366
Plane plane[3]
Definition: diracdec.c:146
static int dirac_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:255
static AVFrame * frame
uint8_t * data
Definition: avcodec.h:1433
static void free_sequence_buffers(DiracContext *s)
Definition: diracdec.c:387
IDWTELEM * idwt_buf_base
Definition: diracdec.c:121
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:213
bitstream reader API header.
struct DiracContext::@42 codeblock[MAX_DWT_LEVELS+1]
static const uint8_t epel_weights[4][4][4]
Definition: diracdec.c:1368
ptrdiff_t size
Definition: opengl_enc.c:101
uint8_t xblen
Definition: diracdec.c:125
static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
Dirac Specification -> 13.5.2 Slices.
Definition: diracdec.c:734
#define CTX_DC_DATA
Definition: dirac_arith.h:73
#define A(x)
Definition: vp56_arith.h:28
static const int qoffset_intra_tab[MAX_QUANT+1]
Definition: diracdec.c:263
#define av_log(a,...)
unsigned m
Definition: audioconvert.c:187
void(* avg_dirac_pixels_tab[3][4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdsp.h:42
static void pred_block_dc(DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1111
int pan_tilt[2]
Definition: diracdec.c:180
int interpolated[3]
Definition: diracdec.c:83
#define EDGE_WIDTH
Definition: mpegpicture.h:33
#define ROLLOFF(i)
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:594
dirac_subband
Definition: diracdec.c:234
av_cold void ff_diracdsp_init(DiracDSPContext *c)
Definition: diracdsp.c:176
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
unsigned length
Definition: diracdec.c:108
static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
Definition: diracdec.c:1952
void(* dirac_hpel_filter)(uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, const uint8_t *src, int stride, int width, int height)
Definition: diracdsp.h:30
uint8_t * hpel[3][4]
Definition: diracdec.c:84
static const uint16_t mask[17]
Definition: lzw.c:38
uint16_t * mctmp
Definition: diracdec.c:203
#define AVERROR(e)
Definition: error.h:43
#define DIRAC_REF_MASK_GLOBAL
Definition: diracdec.c:68
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
static int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
Definition: diracdec.c:1081
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:178
static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
Definition: diracdec.c:309
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
Definition: diracdec.c:1144
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
union DiracBlock::@41 u
const char * arg
Definition: jacosubdec.c:66
unsigned num_y
Definition: diracdec.c:174
static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
Definition: diracdec.c:1578
static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5], int x, int y, int ref, int plane)
For block x,y, determine which of the hpel planes to do bilinear interpolation from and set src[] to ...
Definition: diracdec.c:1395
unsigned wavelet_depth
Definition: diracdec.c:157
#define CTX_MV_DATA
Definition: dirac_arith.h:71
int stride
Definition: diracdec.c:100
const char * name
Name of the codec implementation.
Definition: avcodec.h:3489
DiracFrame * current_picture
Definition: diracdec.c:215
#define MAX_DWT_LEVELS
The spec limits the number of wavelet decompositions to 4 for both level 1 (VC-2) and 128 (long-gop d...
Definition: diracdec.c:52
unsigned old_delta_quant
schroedinger older than 1.0.8 doesn't store quant delta if only one codebook exists in a band ...
Definition: diracdec.c:164
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define FFMAX(a, b)
Definition: common.h:90
Libavcodec external API header.
int avpriv_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, dirac_source_params *source)
Definition: dirac.c:292
static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
Definition: diracdec.c:1857
DiracDSPContext diracdsp
Definition: diracdec.c:141
int orientation
Definition: diracdec.c:99
#define MAX_BLOCKSIZE
Definition: diracdec.c:61
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
static void init_planes(DiracContext *s)
Definition: diracdec.c:820
int globalmc_flag
Definition: diracdec.c:153
AVCodec ff_dirac_decoder
Definition: diracdec.c:2048
static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block, int stride, int x, int y)
Definition: diracdec.c:1196
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
#define FFMIN(a, b)
Definition: common.h:92
int display_picture_number
picture number in display order
Definition: frame.h:278
#define CALC_PADDING(size, depth)
Definition: diracdec.c:76
DiracFrame * ref_pics[2]
Definition: diracdec.c:216
void(* avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:210
static void block_mc(DiracContext *s, DiracBlock *block, uint16_t *mctmp, uint8_t *obmc_weight, int plane, int dstx, int dsty)
Definition: diracdec.c:1506
float y
static DiracFrame * remove_frame(DiracFrame *framelist[], int picnum)
Definition: diracdec.c:291
void ff_spatial_idwt_slice2(DWTContext *d, int y)
Definition: dirac_dwt.c:548
int width
picture width / height.
Definition: avcodec.h:1691
int perspective[2]
Definition: diracdec.c:182
static int dirac_unpack_prediction_parameters(DiracContext *s)
Unpack the motion compensation parameters Dirac Specification -> 11.2 Picture prediction data...
Definition: diracdec.c:873
MpegvideoEncDSPContext mpvencdsp
Definition: diracdec.c:139
static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
Definition: diracdec.c:1542
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
unsigned perspective_exp
Definition: diracdec.c:184
int chroma_y_shift
Definition: diracdec.c:148
static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
Definition: diracdec.c:1558
float u
int n
Definition: avisynth_c.h:547
int16_t dc[3]
Definition: diracdec.c:92
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: imgconvert.c:43
uint8_t * edge_emu_buffer_base
Definition: diracdec.c:201
static void intra_dc_prediction(SubBand *b)
Dirac Specification -> 13.3 intra_dc_prediction(band)
Definition: diracdec.c:573
static void decode_component(DiracContext *s, int comp)
Dirac Specification -> [DIRAC_STD] 13.4.1 core_transform_data()
Definition: diracdec.c:649
#define av_log2
Definition: intmath.h:100
static void init_obmc_weights(DiracContext *s, Plane *p, int by)
Definition: diracdec.c:1355
static const float pred[4]
Definition: siprdata.h:259
void(* add_dirac_obmc[3])(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen)
Definition: diracdsp.h:47
SubBand band[MAX_DWT_LEVELS][4]
Definition: diracdec.c:134
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static const int8_t mv[256][2]
Definition: 4xm.c:77
static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
Definition: diracdec.c:1707
VideoDSPContext vdsp
Definition: diracdec.c:140
uint8_t ybsep
Definition: diracdec.c:129
static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
Dirac Specification -> 13.4.2 Non-skipped subbands.
Definition: diracdec.c:597
AVS_Value src
Definition: avisynth_c.h:482
uint8_t * edge_emu_buffer[4]
Definition: diracdec.c:200
int seen_sequence_header
Definition: diracdec.c:144
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:446
static const uint8_t default_qmat[][4][4]
Definition: diracdec.c:242
void(* dirac_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int h)
Definition: diracdsp.h:26
main external API structure.
Definition: avcodec.h:1512
static void(WINAPI *cond_broadcast)(pthread_cond_t *cond)
int buffer_stride
Definition: diracdec.c:205
MPEG1/2 tables.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:1048
DiracFrame all_frames[MAX_FRAMES]
Definition: diracdec.c:220
int reference
Definition: diracdec.c:86
Arithmetic decoder for Dirac.
struct SubBand * parent
Definition: diracdec.c:105
void * buf
Definition: avisynth_c.h:553
dirac_biweight_func biweight_dirac_pixels_tab[3]
Definition: diracdsp.h:50
IDWTELEM * idwt_buf
Definition: diracdec.c:120
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:305
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
static int decode_lowdelay(DiracContext *s)
Dirac Specification -> 13.5.1 low_delay_transform_data()
Definition: diracdec.c:776
int frame_number
Definition: diracdec.c:145
static int dirac_get_arith_bit(DiracArith *c, int ctx)
Definition: dirac_arith.h:129
AVCodecContext * avctx
Definition: diracdec.c:138
rational number numerator/denominator
Definition: rational.h:43
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:415
GetBitContext gb
Definition: diracdec.c:142
#define mid_pred
Definition: mathops.h:95
dirac_biweight_func biweight_func
Definition: diracdec.c:213
int ff_spatial_idwt_init2(DWTContext *d, IDWTELEM *buffer, int width, int height, int stride, enum dwt_type type, int decomposition_count, IDWTELEM *temp)
Definition: dirac_dwt.c:450
uint8_t xbsep
Definition: diracdec.c:128
int chroma_x_shift
Definition: diracdec.c:147
AVRational bytes
Definition: diracdec.c:175
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:338
static int decode_subband_arith(AVCodecContext *avctx, void *b)
Definition: diracdec.c:630
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1310
static const int qscale_tab[MAX_QUANT+1]
Definition: diracdec.c:252
#define MAX_DELAY
Definition: diracdec.c:58
int zero_res
Definition: diracdec.c:150
const uint8_t * quant
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:465
#define MAX_FRAMES
Definition: diracdec.c:59
static int flags
Definition: cpu.c:47
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
uint8_t level
Definition: svq3.c:150
static int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
Definition: diracdec.c:1095
AVFrame * avframe
Definition: diracdec.c:82
#define MAX_QUANT
Definition: diracdec.c:60
DiracBlock * blmotion
Definition: diracdec.c:198
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:521
#define MAX_REFERENCE_FRAMES
The spec limits this to 3 for frame coding, but in practice can be as high as 6.
Definition: diracdec.c:57
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
int idwt_stride
Definition: diracdec.c:119
static int dirac_decode_picture_header(DiracContext *s)
Dirac Specification -> 11.1.1 Picture Header.
Definition: diracdec.c:1734
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:280
ptrdiff_t stride
Definition: diracdec.c:115
static double c[64]
static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
Definition: diracdec.c:637
int16_t weight[2]
Definition: diracdec.c:189
int16_t mv[2][2]
Definition: diracdec.c:91
static int dirac_get_arith_int(DiracArith *c, int follow_ctx, int data_ctx)
Definition: dirac_arith.h:185
#define CTX_MV_F1
Definition: dirac_arith.h:70
int sbheight
Definition: diracdec.c:195
int den
denominator
Definition: rational.h:45
static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int wy)
Definition: diracdec.c:1322
Core video DSP helper functions.
void(* put_dirac_pixels_tab[3][4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
dirac_pixels_tab[width][subpel] width is 2 for 32, 1 for 16, 0 for 8 subpel is 0 for fpel and hpel (o...
Definition: diracdsp.h:41
#define CTX_DC_F1
Definition: dirac_arith.h:72
int idwt_height
Definition: diracdec.c:118
void * priv_data
Definition: avcodec.h:1554
dirac_parse_code
Dirac Specification -> Parse code values.
Definition: diracdec.c:227
static int alloc_sequence_buffers(DiracContext *s)
Definition: diracdec.c:320
void(* put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h)
Definition: diracdec.c:209
#define av_free(p)
static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride, int left, int right, int top, int bottom)
Definition: diracdec.c:1336
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:3083
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
#define CTX_SB_F1
Definition: dirac_arith.h:65
void(* dirac_biweight_func)(uint8_t *dst, const uint8_t *src, int stride, int log2_denom, int weightd, int weights, int h)
Definition: diracdsp.h:27
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
int height
Definition: diracdec.c:102
IDWTELEM * ibuf
Definition: diracdec.c:104
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:454
struct DiracContext::@43 lowdelay
#define EDGE_BOTTOM
int width
Definition: diracdec.c:113
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:228
int height
Definition: frame.h:220
uint8_t ref
Definition: diracdec.c:94
int is_arith
Definition: diracdec.c:151
#define av_freep(p)
static void comp(unsigned char *dst, int dst_stride, unsigned char *src, int src_stride, int add)
Definition: eamad.c:83
static void add_dc(uint16_t *dst, int dc, int stride, uint8_t *obmc_weight, int xblen, int yblen)
Definition: diracdec.c:1490
void INT64 start
Definition: avisynth_c.h:553
#define av_always_inline
Definition: attributes.h:37
#define av_malloc_array(a, b)
uint8_t * hpel_base[3][4]
Definition: diracdec.c:85
#define FFSWAP(type, a, b)
Definition: common.h:95
#define stride
int height
Definition: diracdec.c:114
IDWTELEM * idwt_tmp
Definition: diracdec.c:122
exp golomb vlc stuff
This structure stores compressed data.
Definition: avcodec.h:1410
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1216
unsigned width
Definition: dirac.h:38
#define DIRAC_REF_MASK_REF1
DiracBlock->ref flags, if set then the block does MC from the given ref.
Definition: diracdec.c:66
unsigned zrs_exp
Definition: diracdec.c:183
#define FFMAX3(a, b, c)
Definition: common.h:91
uint8_t mv_precision
Definition: diracdec.c:188
static int dirac_unpack_block_motion_data(DiracContext *s)
Dirac Specification ->
Definition: diracdec.c:1255
uint8_t obmc_weight[3][MAX_BLOCKSIZE *MAX_BLOCKSIZE]
Definition: diracdec.c:207
int level
Definition: diracdec.c:98
uint8_t yblen
Definition: diracdec.c:126
static int width
static int16_t block[64]
Definition: dct-test.c:110