FFmpeg  3.3.9
utvideodec.c
Go to the documentation of this file.
1 /*
2  * Ut Video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video decoder
25  */
26 
27 #include <inttypes.h>
28 #include <stdlib.h>
29 
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/pixdesc.h"
32 #include "avcodec.h"
33 #include "bswapdsp.h"
34 #include "bytestream.h"
35 #include "get_bits.h"
36 #include "internal.h"
37 #include "thread.h"
38 #include "utvideo.h"
39 
40 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
41 {
42  int i;
43  HuffEntry he[1024];
44  int last;
45  uint32_t codes[1024];
46  uint8_t bits[1024];
47  uint16_t syms[1024];
48  uint32_t code;
49 
50  *fsym = -1;
51  for (i = 0; i < 1024; i++) {
52  he[i].sym = i;
53  he[i].len = *src++;
54  }
55  qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
56 
57  if (!he[0].len) {
58  *fsym = he[0].sym;
59  return 0;
60  }
61 
62  last = 1023;
63  while (he[last].len == 255 && last)
64  last--;
65 
66  if (he[last].len > 32) {
67  return -1;
68  }
69 
70  code = 1;
71  for (i = last; i >= 0; i--) {
72  codes[i] = code >> (32 - he[i].len);
73  bits[i] = he[i].len;
74  syms[i] = he[i].sym;
75  code += 0x80000000u >> (he[i].len - 1);
76  }
77 
78  return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
79  bits, sizeof(*bits), sizeof(*bits),
80  codes, sizeof(*codes), sizeof(*codes),
81  syms, sizeof(*syms), sizeof(*syms), 0);
82 }
83 
84 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
85 {
86  int i;
87  HuffEntry he[256];
88  int last;
89  uint32_t codes[256];
90  uint8_t bits[256];
91  uint8_t syms[256];
92  uint32_t code;
93 
94  *fsym = -1;
95  for (i = 0; i < 256; i++) {
96  he[i].sym = i;
97  he[i].len = *src++;
98  }
99  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
100 
101  if (!he[0].len) {
102  *fsym = he[0].sym;
103  return 0;
104  }
105 
106  last = 255;
107  while (he[last].len == 255 && last)
108  last--;
109 
110  if (he[last].len > 32)
111  return -1;
112 
113  code = 1;
114  for (i = last; i >= 0; i--) {
115  codes[i] = code >> (32 - he[i].len);
116  bits[i] = he[i].len;
117  syms[i] = he[i].sym;
118  code += 0x80000000u >> (he[i].len - 1);
119  }
120 
121  return ff_init_vlc_sparse(vlc, FFMIN(he[last].len, 11), last + 1,
122  bits, sizeof(*bits), sizeof(*bits),
123  codes, sizeof(*codes), sizeof(*codes),
124  syms, sizeof(*syms), sizeof(*syms), 0);
125 }
126 
127 static int decode_plane10(UtvideoContext *c, int plane_no,
128  uint16_t *dst, int step, ptrdiff_t stride,
129  int width, int height,
130  const uint8_t *src, const uint8_t *huff,
131  int use_pred)
132 {
133  int i, j, slice, pix, ret;
134  int sstart, send;
135  VLC vlc;
136  GetBitContext gb;
137  int prev, fsym;
138 
139  if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
140  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
141  return ret;
142  }
143  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
144  send = 0;
145  for (slice = 0; slice < c->slices; slice++) {
146  uint16_t *dest;
147 
148  sstart = send;
149  send = (height * (slice + 1) / c->slices);
150  dest = dst + sstart * stride;
151 
152  prev = 0x200;
153  for (j = sstart; j < send; j++) {
154  for (i = 0; i < width * step; i += step) {
155  pix = fsym;
156  if (use_pred) {
157  prev += pix;
158  prev &= 0x3FF;
159  pix = prev;
160  }
161  dest[i] = pix;
162  }
163  dest += stride;
164  }
165  }
166  return 0;
167  }
168 
169  send = 0;
170  for (slice = 0; slice < c->slices; slice++) {
171  uint16_t *dest;
172  int slice_data_start, slice_data_end, slice_size;
173 
174  sstart = send;
175  send = (height * (slice + 1) / c->slices);
176  dest = dst + sstart * stride;
177 
178  // slice offset and size validation was done earlier
179  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
180  slice_data_end = AV_RL32(src + slice * 4);
181  slice_size = slice_data_end - slice_data_start;
182 
183  if (!slice_size) {
184  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
185  "yet a slice has a length of zero.\n");
186  goto fail;
187  }
188 
189  memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
190  slice_size);
191  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
192  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
193  (uint32_t *) c->slice_bits,
194  (slice_data_end - slice_data_start + 3) >> 2);
195  init_get_bits(&gb, c->slice_bits, slice_size * 8);
196 
197  prev = 0x200;
198  for (j = sstart; j < send; j++) {
199  for (i = 0; i < width * step; i += step) {
200  if (get_bits_left(&gb) <= 0) {
202  "Slice decoding ran out of bits\n");
203  goto fail;
204  }
205  pix = get_vlc2(&gb, vlc.table, vlc.bits, 3);
206  if (pix < 0) {
207  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
208  goto fail;
209  }
210  if (use_pred) {
211  prev += pix;
212  prev &= 0x3FF;
213  pix = prev;
214  }
215  dest[i] = pix;
216  }
217  dest += stride;
218  }
219  if (get_bits_left(&gb) > 32)
221  "%d bits left after decoding slice\n", get_bits_left(&gb));
222  }
223 
224  ff_free_vlc(&vlc);
225 
226  return 0;
227 fail:
228  ff_free_vlc(&vlc);
229  return AVERROR_INVALIDDATA;
230 }
231 
232 static int decode_plane(UtvideoContext *c, int plane_no,
233  uint8_t *dst, int step, ptrdiff_t stride,
234  int width, int height,
235  const uint8_t *src, int use_pred)
236 {
237  int i, j, slice, pix;
238  int sstart, send;
239  VLC vlc;
240  GetBitContext gb;
241  int prev, fsym;
242  const int cmask = ~(!plane_no && c->avctx->pix_fmt == AV_PIX_FMT_YUV420P);
243 
244  if (build_huff(src, &vlc, &fsym)) {
245  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
246  return AVERROR_INVALIDDATA;
247  }
248  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
249  send = 0;
250  for (slice = 0; slice < c->slices; slice++) {
251  uint8_t *dest;
252 
253  sstart = send;
254  send = (height * (slice + 1) / c->slices) & cmask;
255  dest = dst + sstart * stride;
256 
257  prev = 0x80;
258  for (j = sstart; j < send; j++) {
259  for (i = 0; i < width * step; i += step) {
260  pix = fsym;
261  if (use_pred) {
262  prev += pix;
263  pix = prev;
264  }
265  dest[i] = pix;
266  }
267  dest += stride;
268  }
269  }
270  return 0;
271  }
272 
273  src += 256;
274 
275  send = 0;
276  for (slice = 0; slice < c->slices; slice++) {
277  uint8_t *dest;
278  int slice_data_start, slice_data_end, slice_size;
279 
280  sstart = send;
281  send = (height * (slice + 1) / c->slices) & cmask;
282  dest = dst + sstart * stride;
283 
284  // slice offset and size validation was done earlier
285  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
286  slice_data_end = AV_RL32(src + slice * 4);
287  slice_size = slice_data_end - slice_data_start;
288 
289  if (!slice_size) {
290  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
291  "yet a slice has a length of zero.\n");
292  goto fail;
293  }
294 
295  memcpy(c->slice_bits, src + slice_data_start + c->slices * 4,
296  slice_size);
297  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
298  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
299  (uint32_t *) c->slice_bits,
300  (slice_data_end - slice_data_start + 3) >> 2);
301  init_get_bits(&gb, c->slice_bits, slice_size * 8);
302 
303  prev = 0x80;
304  for (j = sstart; j < send; j++) {
305  for (i = 0; i < width * step; i += step) {
306  if (get_bits_left(&gb) <= 0) {
308  "Slice decoding ran out of bits\n");
309  goto fail;
310  }
311  pix = get_vlc2(&gb, vlc.table, vlc.bits, 3);
312  if (pix < 0) {
313  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
314  goto fail;
315  }
316  if (use_pred) {
317  prev += pix;
318  pix = prev;
319  }
320  dest[i] = pix;
321  }
322  dest += stride;
323  }
324  if (get_bits_left(&gb) > 32)
326  "%d bits left after decoding slice\n", get_bits_left(&gb));
327  }
328 
329  ff_free_vlc(&vlc);
330 
331  return 0;
332 fail:
333  ff_free_vlc(&vlc);
334  return AVERROR_INVALIDDATA;
335 }
336 
337 static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride,
338  int width, int height)
339 {
340  int i, j;
341  uint8_t r, g, b;
342 
343  for (j = 0; j < height; j++) {
344  for (i = 0; i < width * step; i += step) {
345  r = src[i];
346  g = src[i + 1];
347  b = src[i + 2];
348  src[i] = r + g - 0x80;
349  src[i + 2] = b + g - 0x80;
350  }
351  src += stride;
352  }
353 }
354 
356 {
357  uint16_t *src_r = (uint16_t *)frame->data[2];
358  uint16_t *src_g = (uint16_t *)frame->data[0];
359  uint16_t *src_b = (uint16_t *)frame->data[1];
360  int r, g, b;
361  int i, j;
362 
363  for (j = 0; j < height; j++) {
364  for (i = 0; i < width; i++) {
365  r = src_r[i];
366  g = src_g[i];
367  b = src_b[i];
368  src_r[i] = (r + g - 0x200) & 0x3FF;
369  src_b[i] = (b + g - 0x200) & 0x3FF;
370  }
371  src_r += frame->linesize[2] / 2;
372  src_g += frame->linesize[0] / 2;
373  src_b += frame->linesize[1] / 2;
374  }
375 }
376 
377 #undef A
378 #undef B
379 #undef C
380 
382  int width, int height, int slices, int rmode)
383 {
384  int i, j, slice;
385  int A, B, C;
386  uint8_t *bsrc;
387  int slice_start, slice_height;
388  const int cmask = ~rmode;
389 
390  for (slice = 0; slice < slices; slice++) {
391  slice_start = ((slice * height) / slices) & cmask;
392  slice_height = ((((slice + 1) * height) / slices) & cmask) -
393  slice_start;
394 
395  if (!slice_height)
396  continue;
397  bsrc = src + slice_start * stride;
398 
399  // first line - left neighbour prediction
400  bsrc[0] += 0x80;
401  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
402  bsrc += stride;
403  if (slice_height <= 1)
404  continue;
405  // second line - first element has top prediction, the rest uses median
406  C = bsrc[-stride];
407  bsrc[0] += C;
408  A = bsrc[0];
409  for (i = 1; i < width; i++) {
410  B = bsrc[i - stride];
411  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
412  C = B;
413  A = bsrc[i];
414  }
415  bsrc += stride;
416  // the rest of lines use continuous median prediction
417  for (j = 2; j < slice_height; j++) {
418  c->llviddsp.add_median_pred(bsrc, bsrc - stride,
419  bsrc, width, &A, &B);
420  bsrc += stride;
421  }
422  }
423 }
424 
425 /* UtVideo interlaced mode treats every two lines as a single one,
426  * so restoring function should take care of possible padding between
427  * two parts of the same "line".
428  */
430  int width, int height, int slices, int rmode)
431 {
432  int i, j, slice;
433  int A, B, C;
434  uint8_t *bsrc;
435  int slice_start, slice_height;
436  const int cmask = ~(rmode ? 3 : 1);
437  const ptrdiff_t stride2 = stride << 1;
438 
439  for (slice = 0; slice < slices; slice++) {
440  slice_start = ((slice * height) / slices) & cmask;
441  slice_height = ((((slice + 1) * height) / slices) & cmask) -
442  slice_start;
443  slice_height >>= 1;
444  if (!slice_height)
445  continue;
446 
447  bsrc = src + slice_start * stride;
448 
449  // first line - left neighbour prediction
450  bsrc[0] += 0x80;
451  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
452  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
453  bsrc += stride2;
454  if (slice_height <= 1)
455  continue;
456  // second line - first element has top prediction, the rest uses median
457  C = bsrc[-stride2];
458  bsrc[0] += C;
459  A = bsrc[0];
460  for (i = 1; i < width; i++) {
461  B = bsrc[i - stride2];
462  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
463  C = B;
464  A = bsrc[i];
465  }
466  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
467  bsrc + stride, width, &A, &B);
468  bsrc += stride2;
469  // the rest of lines use continuous median prediction
470  for (j = 2; j < slice_height; j++) {
471  c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
472  bsrc, width, &A, &B);
473  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
474  bsrc + stride, width, &A, &B);
475  bsrc += stride2;
476  }
477  }
478 }
479 
480 static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride,
481  int width, int height, int slices, int rmode)
482 {
483  int i, j, slice;
484  int A, B, C;
485  uint8_t *bsrc;
486  int slice_start, slice_height;
487  const int cmask = ~rmode;
488 
489  for (slice = 0; slice < slices; slice++) {
490  slice_start = ((slice * height) / slices) & cmask;
491  slice_height = ((((slice + 1) * height) / slices) & cmask) -
492  slice_start;
493 
494  if (!slice_height)
495  continue;
496  bsrc = src + slice_start * stride;
497 
498  // first line - left neighbour prediction
499  bsrc[0] += 0x80;
500  A = bsrc[0];
501  for (i = step; i < width * step; i += step) {
502  bsrc[i] += A;
503  A = bsrc[i];
504  }
505  bsrc += stride;
506  if (slice_height <= 1)
507  continue;
508  // second line - first element has top prediction, the rest uses median
509  C = bsrc[-stride];
510  bsrc[0] += C;
511  A = bsrc[0];
512  for (i = step; i < width * step; i += step) {
513  B = bsrc[i - stride];
514  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
515  C = B;
516  A = bsrc[i];
517  }
518  bsrc += stride;
519  // the rest of lines use continuous median prediction
520  for (j = 2; j < slice_height; j++) {
521  for (i = 0; i < width * step; i += step) {
522  B = bsrc[i - stride];
523  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
524  C = B;
525  A = bsrc[i];
526  }
527  bsrc += stride;
528  }
529  }
530 }
531 
532 /* UtVideo interlaced mode treats every two lines as a single one,
533  * so restoring function should take care of possible padding between
534  * two parts of the same "line".
535  */
536 static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride,
537  int width, int height, int slices, int rmode)
538 {
539  int i, j, slice;
540  int A, B, C;
541  uint8_t *bsrc;
542  int slice_start, slice_height;
543  const int cmask = ~(rmode ? 3 : 1);
544  const ptrdiff_t stride2 = stride << 1;
545 
546  for (slice = 0; slice < slices; slice++) {
547  slice_start = ((slice * height) / slices) & cmask;
548  slice_height = ((((slice + 1) * height) / slices) & cmask) -
549  slice_start;
550  slice_height >>= 1;
551  if (!slice_height)
552  continue;
553 
554  bsrc = src + slice_start * stride;
555 
556  // first line - left neighbour prediction
557  bsrc[0] += 0x80;
558  A = bsrc[0];
559  for (i = step; i < width * step; i += step) {
560  bsrc[i] += A;
561  A = bsrc[i];
562  }
563  for (i = 0; i < width * step; i += step) {
564  bsrc[stride + i] += A;
565  A = bsrc[stride + i];
566  }
567  bsrc += stride2;
568  if (slice_height <= 1)
569  continue;
570  // second line - first element has top prediction, the rest uses median
571  C = bsrc[-stride2];
572  bsrc[0] += C;
573  A = bsrc[0];
574  for (i = step; i < width * step; i += step) {
575  B = bsrc[i - stride2];
576  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
577  C = B;
578  A = bsrc[i];
579  }
580  for (i = 0; i < width * step; i += step) {
581  B = bsrc[i - stride];
582  bsrc[stride + i] += mid_pred(A, B, (uint8_t)(A + B - C));
583  C = B;
584  A = bsrc[stride + i];
585  }
586  bsrc += stride2;
587  // the rest of lines use continuous median prediction
588  for (j = 2; j < slice_height; j++) {
589  for (i = 0; i < width * step; i += step) {
590  B = bsrc[i - stride2];
591  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
592  C = B;
593  A = bsrc[i];
594  }
595  for (i = 0; i < width * step; i += step) {
596  B = bsrc[i - stride];
597  bsrc[i + stride] += mid_pred(A, B, (uint8_t)(A + B - C));
598  C = B;
599  A = bsrc[i + stride];
600  }
601  bsrc += stride2;
602  }
603  }
604 }
605 
606 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
607  AVPacket *avpkt)
608 {
609  const uint8_t *buf = avpkt->data;
610  int buf_size = avpkt->size;
611  UtvideoContext *c = avctx->priv_data;
612  int i, j;
613  const uint8_t *plane_start[5];
614  int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
615  int ret;
616  GetByteContext gb;
617  ThreadFrame frame = { .f = data };
618 
619  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
620  return ret;
621 
622  /* parse plane structure to get frame flags and validate slice offsets */
623  bytestream2_init(&gb, buf, buf_size);
624  if (c->pro) {
626  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
627  return AVERROR_INVALIDDATA;
628  }
629  c->frame_info = bytestream2_get_le32u(&gb);
630  c->slices = ((c->frame_info >> 16) & 0xff) + 1;
631  for (i = 0; i < c->planes; i++) {
632  plane_start[i] = gb.buffer;
633  if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
634  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
635  return AVERROR_INVALIDDATA;
636  }
637  slice_start = 0;
638  slice_end = 0;
639  for (j = 0; j < c->slices; j++) {
640  slice_end = bytestream2_get_le32u(&gb);
641  if (slice_end < 0 || slice_end < slice_start ||
642  bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
643  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
644  return AVERROR_INVALIDDATA;
645  }
646  slice_size = slice_end - slice_start;
647  slice_start = slice_end;
648  max_slice_size = FFMAX(max_slice_size, slice_size);
649  }
650  plane_size = slice_end;
651  bytestream2_skipu(&gb, plane_size);
652  bytestream2_skipu(&gb, 1024);
653  }
654  plane_start[c->planes] = gb.buffer;
655  } else {
656  for (i = 0; i < c->planes; i++) {
657  plane_start[i] = gb.buffer;
658  if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
659  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
660  return AVERROR_INVALIDDATA;
661  }
662  bytestream2_skipu(&gb, 256);
663  slice_start = 0;
664  slice_end = 0;
665  for (j = 0; j < c->slices; j++) {
666  slice_end = bytestream2_get_le32u(&gb);
667  if (slice_end < 0 || slice_end < slice_start ||
668  bytestream2_get_bytes_left(&gb) < slice_end) {
669  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
670  return AVERROR_INVALIDDATA;
671  }
672  slice_size = slice_end - slice_start;
673  slice_start = slice_end;
674  max_slice_size = FFMAX(max_slice_size, slice_size);
675  }
676  plane_size = slice_end;
677  bytestream2_skipu(&gb, plane_size);
678  }
679  plane_start[c->planes] = gb.buffer;
681  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
682  return AVERROR_INVALIDDATA;
683  }
684  c->frame_info = bytestream2_get_le32u(&gb);
685  }
686  av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
687  c->frame_info);
688 
689  c->frame_pred = (c->frame_info >> 8) & 3;
690 
691  if (c->frame_pred == PRED_GRADIENT) {
692  avpriv_request_sample(avctx, "Frame with gradient prediction");
693  return AVERROR_PATCHWELCOME;
694  }
695 
697  max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
698 
699  if (!c->slice_bits) {
700  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
701  return AVERROR(ENOMEM);
702  }
703 
704  switch (c->avctx->pix_fmt) {
705  case AV_PIX_FMT_RGB24:
706  case AV_PIX_FMT_RGBA:
707  for (i = 0; i < c->planes; i++) {
708  ret = decode_plane(c, i, frame.f->data[0] + ff_ut_rgb_order[i],
709  c->planes, frame.f->linesize[0], avctx->width,
710  avctx->height, plane_start[i],
711  c->frame_pred == PRED_LEFT);
712  if (ret)
713  return ret;
714  if (c->frame_pred == PRED_MEDIAN) {
715  if (!c->interlaced) {
717  c->planes, frame.f->linesize[0], avctx->width,
718  avctx->height, c->slices, 0);
719  } else {
721  c->planes, frame.f->linesize[0],
722  avctx->width, avctx->height, c->slices,
723  0);
724  }
725  }
726  }
727  restore_rgb_planes(frame.f->data[0], c->planes, frame.f->linesize[0],
728  avctx->width, avctx->height);
729  break;
730  case AV_PIX_FMT_GBRAP10:
731  case AV_PIX_FMT_GBRP10:
732  for (i = 0; i < c->planes; i++) {
733  ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1,
734  frame.f->linesize[i] / 2, avctx->width,
735  avctx->height, plane_start[i],
736  plane_start[i + 1] - 1024,
737  c->frame_pred == PRED_LEFT);
738  if (ret)
739  return ret;
740  }
741  restore_rgb_planes10(frame.f, avctx->width, avctx->height);
742  break;
743  case AV_PIX_FMT_YUV420P:
744  for (i = 0; i < 3; i++) {
745  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
746  avctx->width >> !!i, avctx->height >> !!i,
747  plane_start[i], c->frame_pred == PRED_LEFT);
748  if (ret)
749  return ret;
750  if (c->frame_pred == PRED_MEDIAN) {
751  if (!c->interlaced) {
752  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
753  avctx->width >> !!i, avctx->height >> !!i,
754  c->slices, !i);
755  } else {
756  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
757  avctx->width >> !!i,
758  avctx->height >> !!i,
759  c->slices, !i);
760  }
761  }
762  }
763  break;
764  case AV_PIX_FMT_YUV422P:
765  for (i = 0; i < 3; i++) {
766  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
767  avctx->width >> !!i, avctx->height,
768  plane_start[i], c->frame_pred == PRED_LEFT);
769  if (ret)
770  return ret;
771  if (c->frame_pred == PRED_MEDIAN) {
772  if (!c->interlaced) {
773  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
774  avctx->width >> !!i, avctx->height,
775  c->slices, 0);
776  } else {
777  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
778  avctx->width >> !!i, avctx->height,
779  c->slices, 0);
780  }
781  }
782  }
783  break;
784  case AV_PIX_FMT_YUV444P:
785  for (i = 0; i < 3; i++) {
786  ret = decode_plane(c, i, frame.f->data[i], 1, frame.f->linesize[i],
787  avctx->width, avctx->height,
788  plane_start[i], c->frame_pred == PRED_LEFT);
789  if (ret)
790  return ret;
791  if (c->frame_pred == PRED_MEDIAN) {
792  if (!c->interlaced) {
793  restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
794  avctx->width, avctx->height,
795  c->slices, 0);
796  } else {
797  restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
798  avctx->width, avctx->height,
799  c->slices, 0);
800  }
801  }
802  }
803  break;
805  for (i = 0; i < 3; i++) {
806  ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], 1, frame.f->linesize[i] / 2,
807  avctx->width >> !!i, avctx->height,
808  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
809  if (ret)
810  return ret;
811  }
812  break;
813  }
814 
815  frame.f->key_frame = 1;
816  frame.f->pict_type = AV_PICTURE_TYPE_I;
817  frame.f->interlaced_frame = !!c->interlaced;
818 
819  *got_frame = 1;
820 
821  /* always report that the buffer was completely consumed */
822  return buf_size;
823 }
824 
826 {
827  UtvideoContext * const c = avctx->priv_data;
828  int h_shift, v_shift;
829 
830  c->avctx = avctx;
831 
832  ff_bswapdsp_init(&c->bdsp);
834 
835  c->slice_bits_size = 0;
836 
837  switch (avctx->codec_tag) {
838  case MKTAG('U', 'L', 'R', 'G'):
839  c->planes = 3;
840  avctx->pix_fmt = AV_PIX_FMT_RGB24;
841  break;
842  case MKTAG('U', 'L', 'R', 'A'):
843  c->planes = 4;
844  avctx->pix_fmt = AV_PIX_FMT_RGBA;
845  break;
846  case MKTAG('U', 'L', 'Y', '0'):
847  c->planes = 3;
848  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
849  avctx->colorspace = AVCOL_SPC_BT470BG;
850  break;
851  case MKTAG('U', 'L', 'Y', '2'):
852  c->planes = 3;
853  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
854  avctx->colorspace = AVCOL_SPC_BT470BG;
855  break;
856  case MKTAG('U', 'L', 'Y', '4'):
857  c->planes = 3;
858  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
859  avctx->colorspace = AVCOL_SPC_BT470BG;
860  break;
861  case MKTAG('U', 'Q', 'Y', '2'):
862  c->planes = 3;
863  c->pro = 1;
864  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
865  break;
866  case MKTAG('U', 'Q', 'R', 'G'):
867  c->planes = 3;
868  c->pro = 1;
869  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
870  break;
871  case MKTAG('U', 'Q', 'R', 'A'):
872  c->planes = 4;
873  c->pro = 1;
874  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
875  break;
876  case MKTAG('U', 'L', 'H', '0'):
877  c->planes = 3;
878  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
879  avctx->colorspace = AVCOL_SPC_BT709;
880  break;
881  case MKTAG('U', 'L', 'H', '2'):
882  c->planes = 3;
883  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
884  avctx->colorspace = AVCOL_SPC_BT709;
885  break;
886  case MKTAG('U', 'L', 'H', '4'):
887  c->planes = 3;
888  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
889  avctx->colorspace = AVCOL_SPC_BT709;
890  break;
891  default:
892  av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
893  avctx->codec_tag);
894  return AVERROR_INVALIDDATA;
895  }
896 
897  av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
898  if ((avctx->width & ((1<<h_shift)-1)) ||
899  (avctx->height & ((1<<v_shift)-1))) {
900  avpriv_request_sample(avctx, "Odd dimensions");
901  return AVERROR_PATCHWELCOME;
902  }
903 
904  if (!c->pro && avctx->extradata_size >= 16) {
905  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
906  avctx->extradata[3], avctx->extradata[2],
907  avctx->extradata[1], avctx->extradata[0]);
908  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
909  AV_RB32(avctx->extradata + 4));
910  c->frame_info_size = AV_RL32(avctx->extradata + 8);
911  c->flags = AV_RL32(avctx->extradata + 12);
912 
913  if (c->frame_info_size != 4)
914  avpriv_request_sample(avctx, "Frame info not 4 bytes");
915  av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
916  c->slices = (c->flags >> 24) + 1;
917  c->compression = c->flags & 1;
918  c->interlaced = c->flags & 0x800;
919  } else if (c->pro && avctx->extradata_size == 8) {
920  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
921  avctx->extradata[3], avctx->extradata[2],
922  avctx->extradata[1], avctx->extradata[0]);
923  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
924  AV_RB32(avctx->extradata + 4));
925  c->interlaced = 0;
926  c->frame_info_size = 4;
927  } else {
928  av_log(avctx, AV_LOG_ERROR,
929  "Insufficient extradata size %d, should be at least 16\n",
930  avctx->extradata_size);
931  return AVERROR_INVALIDDATA;
932  }
933 
934  return 0;
935 }
936 
938 {
939  UtvideoContext * const c = avctx->priv_data;
940 
941  av_freep(&c->slice_bits);
942 
943  return 0;
944 }
945 
947  .name = "utvideo",
948  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
949  .type = AVMEDIA_TYPE_VIDEO,
950  .id = AV_CODEC_ID_UTVIDEO,
951  .priv_data_size = sizeof(UtvideoContext),
952  .init = decode_init,
953  .close = decode_end,
954  .decode = decode_frame,
956  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
957 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:457
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:429
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int(* add_left_pred)(uint8_t *dst, const uint8_t *src, ptrdiff_t w, int left)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:187
int ff_ut10_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:43
#define C
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:375
uint32_t flags
Definition: utvideo.h:76
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:36
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
const char * g
Definition: vf_curves.c:112
Definition: vf_geq.c:46
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:461
int slice_bits_size
Definition: utvideo.h:86
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:268
int size
Definition: avcodec.h:1658
const char * b
Definition: vf_curves.c:113
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:371
static av_cold int decode_end(AVCodecContext *avctx)
Definition: utvideodec.c:937
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1960
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144
AVCodec.
Definition: avcodec.h:3681
static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred)
Definition: utvideodec.c:232
int interlaced
Definition: utvideo.h:80
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t bits
Definition: crc.c:296
uint8_t
#define av_cold
Definition: attributes.h:82
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: bswapdsp.h:25
#define AV_RB32
Definition: intreadwrite.h:130
Multithreading support functions.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1847
uint32_t frame_info
Definition: utvideo.h:76
static AVFrame * frame
const char data[16]
Definition: mxf.c:90
#define height
uint8_t * data
Definition: avcodec.h:1657
const uint8_t * buffer
Definition: bytestream.h:34
void(* add_median_pred)(uint8_t *dst, const uint8_t *top, const uint8_t *diff, ptrdiff_t w, int *left, int *left_top)
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:170
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:325
#define A(x)
Definition: vp56_arith.h:28
const int ff_ut_rgb_order[4]
Definition: utvideo.c:35
#define av_log(a,...)
static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
Definition: utvideodec.c:84
BswapDSPContext bdsp
Definition: utvideo.h:72
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:589
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static av_cold int decode_init(AVCodecContext *avctx)
Definition: utvideodec.c:825
#define AVERROR(e)
Definition: error.h:43
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2361
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
AVCodecContext * avctx
Definition: utvideo.h:71
const char * r
Definition: vf_curves.c:111
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
const char * name
Name of the codec implementation.
Definition: avcodec.h:3688
uint32_t frame_info_size
Definition: utvideo.h:76
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:89
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1057
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:98
Definition: vlc.h:26
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:469
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:381
int compression
Definition: utvideo.h:79
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:261
#define FFMIN(a, b)
Definition: common.h:96
#define width
int width
picture width / height.
Definition: avcodec.h:1919
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:556
#define AV_RL32
Definition: intreadwrite.h:146
static void restore_rgb_planes10(AVFrame *frame, int width, int height)
Definition: utvideodec.c:355
int bits
Definition: vlc.h:27
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: utvideodec.c:606
Common Ut Video header.
int frame_pred
Definition: utvideo.h:81
uint8_t len
Definition: magicyuv.c:49
Libavcodec external API header.
static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride, int width, int height)
Definition: utvideodec.c:337
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:218
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1732
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1764
void * buf
Definition: avisynth_c.h:690
int extradata_size
Definition: avcodec.h:1848
void ff_llviddsp_init(LLVidDSPContext *c)
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2462
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:426
#define mid_pred
Definition: mathops.h:97
static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
Definition: utvideodec.c:40
#define u(width,...)
uint8_t * slice_bits
Definition: utvideo.h:85
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:356
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:37
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:201
LLVidDSPContext llviddsp
Definition: utvideo.h:73
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
common internal api header.
static double c[64]
uint16_t sym
Definition: magicyuv.c:48
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:769
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2053
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
void * priv_data
Definition: avcodec.h:1774
int len
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:256
static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:536
#define av_freep(p)
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2261
static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:480
#define MKTAG(a, b, c, d)
Definition: common.h:342
AVCodec ff_utvideo_decoder
Definition: utvideodec.c:946
This structure stores compressed data.
Definition: avcodec.h:1634
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:354
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:994
for(j=16;j >0;--j)
static int decode_plane10(UtvideoContext *c, int plane_no, uint16_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred)
Definition: utvideodec.c:127