FFmpeg  4.3
mjpegdec.c
Go to the documentation of this file.
1 /*
2  * MJPEG decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2003 Alex Beregszaszi
5  * Copyright (c) 2003-2004 Michael Niedermayer
6  *
7  * Support for external huffman table, various fixes (AVID workaround),
8  * aspecting, new decode_frame mechanism and apple mjpeg-b support
9  * by Alex Beregszaszi
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * MJPEG decoder.
31  */
32 
33 #include "libavutil/imgutils.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/opt.h"
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "copy_block.h"
39 #include "hwconfig.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "jpegtables.h"
43 #include "mjpeg.h"
44 #include "mjpegdec.h"
45 #include "jpeglsdec.h"
46 #include "profiles.h"
47 #include "put_bits.h"
48 #include "tiff.h"
49 #include "exif.h"
50 #include "bytestream.h"
51 
52 
53 static int build_vlc(VLC *vlc, const uint8_t *bits_table,
54  const uint8_t *val_table, int nb_codes,
55  int use_static, int is_ac)
56 {
57  uint8_t huff_size[256] = { 0 };
58  uint16_t huff_code[256];
59  uint16_t huff_sym[256];
60  int i;
61 
62  av_assert0(nb_codes <= 256);
63 
64  ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
65 
66  for (i = 0; i < 256; i++)
67  huff_sym[i] = i + 16 * is_ac;
68 
69  if (is_ac)
70  huff_sym[0] = 16 * 256;
71 
72  return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
73  huff_code, 2, 2, huff_sym, 2, 2, use_static);
74 }
75 
77 {
78  static const struct {
79  int class;
80  int index;
81  const uint8_t *bits;
82  const uint8_t *values;
83  int codes;
84  int length;
85  } ht[] = {
87  avpriv_mjpeg_val_dc, 12, 12 },
89  avpriv_mjpeg_val_dc, 12, 12 },
98  };
99  int i, ret;
100 
101  for (i = 0; i < FF_ARRAY_ELEMS(ht); i++) {
102  ret = build_vlc(&s->vlcs[ht[i].class][ht[i].index],
103  ht[i].bits, ht[i].values, ht[i].codes,
104  0, ht[i].class == 1);
105  if (ret < 0)
106  return ret;
107 
108  if (ht[i].class < 2) {
109  memcpy(s->raw_huffman_lengths[ht[i].class][ht[i].index],
110  ht[i].bits + 1, 16);
111  memcpy(s->raw_huffman_values[ht[i].class][ht[i].index],
112  ht[i].values, ht[i].length);
113  }
114  }
115 
116  return 0;
117 }
118 
119 static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
120 {
121  s->buggy_avid = 1;
122  if (len > 14 && buf[12] == 1) /* 1 - NTSC */
123  s->interlace_polarity = 1;
124  if (len > 14 && buf[12] == 2) /* 2 - PAL */
125  s->interlace_polarity = 0;
126  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
127  av_log(s->avctx, AV_LOG_INFO, "AVID: len:%d %d\n", len, len > 14 ? buf[12] : -1);
128 }
129 
130 static void init_idct(AVCodecContext *avctx)
131 {
132  MJpegDecodeContext *s = avctx->priv_data;
133 
134  ff_idctdsp_init(&s->idsp, avctx);
135  ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
137 }
138 
140 {
141  MJpegDecodeContext *s = avctx->priv_data;
142  int ret;
143 
144  if (!s->picture_ptr) {
145  s->picture = av_frame_alloc();
146  if (!s->picture)
147  return AVERROR(ENOMEM);
148  s->picture_ptr = s->picture;
149  }
150 
151  s->avctx = avctx;
152  ff_blockdsp_init(&s->bdsp, avctx);
153  ff_hpeldsp_init(&s->hdsp, avctx->flags);
154  init_idct(avctx);
155  s->buffer_size = 0;
156  s->buffer = NULL;
157  s->start_code = -1;
158  s->first_picture = 1;
159  s->got_picture = 0;
160  s->org_height = avctx->coded_height;
162  avctx->colorspace = AVCOL_SPC_BT470BG;
163  s->hwaccel_pix_fmt = s->hwaccel_sw_pix_fmt = AV_PIX_FMT_NONE;
164 
165  if ((ret = init_default_huffman_tables(s)) < 0)
166  return ret;
167 
168  if (s->extern_huff) {
169  av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
170  if ((ret = init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8)) < 0)
171  return ret;
172  if (ff_mjpeg_decode_dht(s)) {
173  av_log(avctx, AV_LOG_ERROR,
174  "error using external huffman table, switching back to internal\n");
176  }
177  }
178  if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
179  s->interlace_polarity = 1; /* bottom field first */
180  av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
181  } else if (avctx->field_order == AV_FIELD_UNKNOWN) {
182  if (avctx->codec_tag == AV_RL32("MJPG"))
183  s->interlace_polarity = 1;
184  }
185 
186  if ( avctx->extradata_size > 8
187  && AV_RL32(avctx->extradata) == 0x2C
188  && AV_RL32(avctx->extradata+4) == 0x18) {
189  parse_avid(s, avctx->extradata, avctx->extradata_size);
190  }
191 
192  if (avctx->codec->id == AV_CODEC_ID_AMV)
193  s->flipped = 1;
194 
195  return 0;
196 }
197 
198 
199 /* quantize tables */
201 {
202  int len, index, i;
203 
204  len = get_bits(&s->gb, 16) - 2;
205 
206  if (8*len > get_bits_left(&s->gb)) {
207  av_log(s->avctx, AV_LOG_ERROR, "dqt: len %d is too large\n", len);
208  return AVERROR_INVALIDDATA;
209  }
210 
211  while (len >= 65) {
212  int pr = get_bits(&s->gb, 4);
213  if (pr > 1) {
214  av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
215  return AVERROR_INVALIDDATA;
216  }
217  index = get_bits(&s->gb, 4);
218  if (index >= 4)
219  return -1;
220  av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
221  /* read quant table */
222  for (i = 0; i < 64; i++) {
223  s->quant_matrixes[index][i] = get_bits(&s->gb, pr ? 16 : 8);
224  if (s->quant_matrixes[index][i] == 0) {
225  av_log(s->avctx, AV_LOG_ERROR, "dqt: 0 quant value\n");
226  return AVERROR_INVALIDDATA;
227  }
228  }
229 
230  // XXX FIXME fine-tune, and perhaps add dc too
231  s->qscale[index] = FFMAX(s->quant_matrixes[index][1],
232  s->quant_matrixes[index][8]) >> 1;
233  av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
234  index, s->qscale[index]);
235  len -= 1 + 64 * (1+pr);
236  }
237  return 0;
238 }
239 
240 /* decode huffman tables and build VLC decoders */
242 {
243  int len, index, i, class, n, v, code_max;
244  uint8_t bits_table[17];
245  uint8_t val_table[256];
246  int ret = 0;
247 
248  len = get_bits(&s->gb, 16) - 2;
249 
250  if (8*len > get_bits_left(&s->gb)) {
251  av_log(s->avctx, AV_LOG_ERROR, "dht: len %d is too large\n", len);
252  return AVERROR_INVALIDDATA;
253  }
254 
255  while (len > 0) {
256  if (len < 17)
257  return AVERROR_INVALIDDATA;
258  class = get_bits(&s->gb, 4);
259  if (class >= 2)
260  return AVERROR_INVALIDDATA;
261  index = get_bits(&s->gb, 4);
262  if (index >= 4)
263  return AVERROR_INVALIDDATA;
264  n = 0;
265  for (i = 1; i <= 16; i++) {
266  bits_table[i] = get_bits(&s->gb, 8);
267  n += bits_table[i];
268  }
269  len -= 17;
270  if (len < n || n > 256)
271  return AVERROR_INVALIDDATA;
272 
273  code_max = 0;
274  for (i = 0; i < n; i++) {
275  v = get_bits(&s->gb, 8);
276  if (v > code_max)
277  code_max = v;
278  val_table[i] = v;
279  }
280  len -= n;
281 
282  /* build VLC and flush previous vlc if present */
283  ff_free_vlc(&s->vlcs[class][index]);
284  av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
285  class, index, code_max + 1);
286  if ((ret = build_vlc(&s->vlcs[class][index], bits_table, val_table,
287  code_max + 1, 0, class > 0)) < 0)
288  return ret;
289 
290  if (class > 0) {
291  ff_free_vlc(&s->vlcs[2][index]);
292  if ((ret = build_vlc(&s->vlcs[2][index], bits_table, val_table,
293  code_max + 1, 0, 0)) < 0)
294  return ret;
295  }
296 
297  for (i = 0; i < 16; i++)
298  s->raw_huffman_lengths[class][index][i] = bits_table[i + 1];
299  for (i = 0; i < 256; i++)
300  s->raw_huffman_values[class][index][i] = val_table[i];
301  }
302  return 0;
303 }
304 
306 {
307  int len, nb_components, i, width, height, bits, ret, size_change;
308  unsigned pix_fmt_id;
309  int h_count[MAX_COMPONENTS] = { 0 };
310  int v_count[MAX_COMPONENTS] = { 0 };
311 
312  s->cur_scan = 0;
313  memset(s->upscale_h, 0, sizeof(s->upscale_h));
314  memset(s->upscale_v, 0, sizeof(s->upscale_v));
315 
316  len = get_bits(&s->gb, 16);
317  bits = get_bits(&s->gb, 8);
318 
319  if (bits > 16 || bits < 1) {
320  av_log(s->avctx, AV_LOG_ERROR, "bits %d is invalid\n", bits);
321  return AVERROR_INVALIDDATA;
322  }
323 
324  if (s->avctx->bits_per_raw_sample != bits) {
325  av_log(s->avctx, s->avctx->bits_per_raw_sample > 0 ? AV_LOG_INFO : AV_LOG_DEBUG, "Changing bps from %d to %d\n", s->avctx->bits_per_raw_sample, bits);
326  s->avctx->bits_per_raw_sample = bits;
327  init_idct(s->avctx);
328  }
329  if (s->pegasus_rct)
330  bits = 9;
331  if (bits == 9 && !s->pegasus_rct)
332  s->rct = 1; // FIXME ugly
333 
334  if(s->lossless && s->avctx->lowres){
335  av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
336  return -1;
337  }
338 
339  height = get_bits(&s->gb, 16);
340  width = get_bits(&s->gb, 16);
341 
342  // HACK for odd_height.mov
343  if (s->interlaced && s->width == width && s->height == height + 1)
344  height= s->height;
345 
346  av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
347  if (av_image_check_size(width, height, 0, s->avctx) < 0)
348  return AVERROR_INVALIDDATA;
349  if (s->buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->buf_size * 4LL)
350  return AVERROR_INVALIDDATA;
351 
352  nb_components = get_bits(&s->gb, 8);
353  if (nb_components <= 0 ||
354  nb_components > MAX_COMPONENTS)
355  return -1;
356  if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
357  if (nb_components != s->nb_components) {
358  av_log(s->avctx, AV_LOG_ERROR,
359  "nb_components changing in interlaced picture\n");
360  return AVERROR_INVALIDDATA;
361  }
362  }
363  if (s->ls && !(bits <= 8 || nb_components == 1)) {
365  "JPEG-LS that is not <= 8 "
366  "bits/component or 16-bit gray");
367  return AVERROR_PATCHWELCOME;
368  }
369  if (len != 8 + 3 * nb_components) {
370  av_log(s->avctx, AV_LOG_ERROR, "decode_sof0: error, len(%d) mismatch %d components\n", len, nb_components);
371  return AVERROR_INVALIDDATA;
372  }
373 
374  s->nb_components = nb_components;
375  s->h_max = 1;
376  s->v_max = 1;
377  for (i = 0; i < nb_components; i++) {
378  /* component id */
379  s->component_id[i] = get_bits(&s->gb, 8) - 1;
380  h_count[i] = get_bits(&s->gb, 4);
381  v_count[i] = get_bits(&s->gb, 4);
382  /* compute hmax and vmax (only used in interleaved case) */
383  if (h_count[i] > s->h_max)
384  s->h_max = h_count[i];
385  if (v_count[i] > s->v_max)
386  s->v_max = v_count[i];
387  s->quant_index[i] = get_bits(&s->gb, 8);
388  if (s->quant_index[i] >= 4) {
389  av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
390  return AVERROR_INVALIDDATA;
391  }
392  if (!h_count[i] || !v_count[i]) {
393  av_log(s->avctx, AV_LOG_ERROR,
394  "Invalid sampling factor in component %d %d:%d\n",
395  i, h_count[i], v_count[i]);
396  return AVERROR_INVALIDDATA;
397  }
398 
399  av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
400  i, h_count[i], v_count[i],
401  s->component_id[i], s->quant_index[i]);
402  }
403  if ( nb_components == 4
404  && s->component_id[0] == 'C' - 1
405  && s->component_id[1] == 'M' - 1
406  && s->component_id[2] == 'Y' - 1
407  && s->component_id[3] == 'K' - 1)
408  s->adobe_transform = 0;
409 
410  if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
411  avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
412  return AVERROR_PATCHWELCOME;
413  }
414 
415  if (s->bayer) {
416  if (nb_components == 2) {
417  /* Bayer images embedded in DNGs can contain 2 interleaved components and the
418  width stored in their SOF3 markers is the width of each one. We only output
419  a single component, therefore we need to adjust the output image width. We
420  handle the deinterleaving (but not the debayering) in this file. */
421  width *= 2;
422  }
423  /* They can also contain 1 component, which is double the width and half the height
424  of the final image (rows are interleaved). We don't handle the decoding in this
425  file, but leave that to the TIFF/DNG decoder. */
426  }
427 
428  /* if different size, realloc/alloc picture */
429  if (width != s->width || height != s->height || bits != s->bits ||
430  memcmp(s->h_count, h_count, sizeof(h_count)) ||
431  memcmp(s->v_count, v_count, sizeof(v_count))) {
432  size_change = 1;
433 
434  s->width = width;
435  s->height = height;
436  s->bits = bits;
437  memcpy(s->h_count, h_count, sizeof(h_count));
438  memcpy(s->v_count, v_count, sizeof(v_count));
439  s->interlaced = 0;
440  s->got_picture = 0;
441 
442  /* test interlaced mode */
443  if (s->first_picture &&
444  (s->multiscope != 2 || s->avctx->time_base.den >= 25 * s->avctx->time_base.num) &&
445  s->org_height != 0 &&
446  s->height < ((s->org_height * 3) / 4)) {
447  s->interlaced = 1;
448  s->bottom_field = s->interlace_polarity;
449  s->picture_ptr->interlaced_frame = 1;
450  s->picture_ptr->top_field_first = !s->interlace_polarity;
451  height *= 2;
452  }
453 
454  ret = ff_set_dimensions(s->avctx, width, height);
455  if (ret < 0)
456  return ret;
457 
458  s->first_picture = 0;
459  } else {
460  size_change = 0;
461  }
462 
463  if (s->got_picture && s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
464  if (s->progressive) {
465  avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
466  return AVERROR_INVALIDDATA;
467  }
468  } else {
469  if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
470  s->rgb = 1;
471  else if (!s->lossless)
472  s->rgb = 0;
473  /* XXX: not complete test ! */
474  pix_fmt_id = ((unsigned)s->h_count[0] << 28) | (s->v_count[0] << 24) |
475  (s->h_count[1] << 20) | (s->v_count[1] << 16) |
476  (s->h_count[2] << 12) | (s->v_count[2] << 8) |
477  (s->h_count[3] << 4) | s->v_count[3];
478  av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
479  /* NOTE we do not allocate pictures large enough for the possible
480  * padding of h/v_count being 4 */
481  if (!(pix_fmt_id & 0xD0D0D0D0))
482  pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
483  if (!(pix_fmt_id & 0x0D0D0D0D))
484  pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
485 
486  for (i = 0; i < 8; i++) {
487  int j = 6 + (i&1) - (i&6);
488  int is = (pix_fmt_id >> (4*i)) & 0xF;
489  int js = (pix_fmt_id >> (4*j)) & 0xF;
490 
491  if (is == 1 && js != 2 && (i < 2 || i > 5))
492  js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
493  if (is == 1 && js != 2 && (i < 2 || i > 5))
494  js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
495 
496  if (is == 1 && js == 2) {
497  if (i & 1) s->upscale_h[j/2] = 1;
498  else s->upscale_v[j/2] = 1;
499  }
500  }
501 
502  switch (pix_fmt_id) {
503  case 0x11110000: /* for bayer-encoded huffman lossless JPEGs embedded in DNGs */
504  if (!s->bayer)
505  goto unk_pixfmt;
506  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
507  break;
508  case 0x11111100:
509  if (s->rgb)
510  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGR48;
511  else {
512  if ( s->adobe_transform == 0
513  || s->component_id[0] == 'R' - 1 && s->component_id[1] == 'G' - 1 && s->component_id[2] == 'B' - 1) {
514  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_GBRP : AV_PIX_FMT_GBRP16;
515  } else {
516  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
517  else s->avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
518  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
519  }
520  }
521  av_assert0(s->nb_components == 3);
522  break;
523  case 0x11111111:
524  if (s->rgb)
525  s->avctx->pix_fmt = s->bits <= 9 ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA64;
526  else {
527  if (s->adobe_transform == 0 && s->bits <= 8) {
528  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
529  } else {
530  s->avctx->pix_fmt = s->bits <= 8 ? AV_PIX_FMT_YUVA444P : AV_PIX_FMT_YUVA444P16;
531  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
532  }
533  }
534  av_assert0(s->nb_components == 4);
535  break;
536  case 0x22111122:
537  case 0x22111111:
538  if (s->adobe_transform == 0 && s->bits <= 8) {
539  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
540  s->upscale_v[1] = s->upscale_v[2] = 1;
541  s->upscale_h[1] = s->upscale_h[2] = 1;
542  } else if (s->adobe_transform == 2 && s->bits <= 8) {
543  s->avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
544  s->upscale_v[1] = s->upscale_v[2] = 1;
545  s->upscale_h[1] = s->upscale_h[2] = 1;
546  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
547  } else {
548  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
549  else s->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
550  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
551  }
552  av_assert0(s->nb_components == 4);
553  break;
554  case 0x12121100:
555  case 0x22122100:
556  case 0x21211100:
557  case 0x22211200:
558  case 0x22221100:
559  case 0x22112200:
560  case 0x11222200:
561  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
562  else
563  goto unk_pixfmt;
564  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
565  break;
566  case 0x11000000:
567  case 0x13000000:
568  case 0x14000000:
569  case 0x31000000:
570  case 0x33000000:
571  case 0x34000000:
572  case 0x41000000:
573  case 0x43000000:
574  case 0x44000000:
575  if(s->bits <= 8)
576  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
577  else
578  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
579  break;
580  case 0x12111100:
581  case 0x14121200:
582  case 0x14111100:
583  case 0x22211100:
584  case 0x22112100:
585  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
586  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
587  else
588  goto unk_pixfmt;
589  s->upscale_v[0] = s->upscale_v[1] = 1;
590  } else {
591  if (pix_fmt_id == 0x14111100)
592  s->upscale_v[1] = s->upscale_v[2] = 1;
593  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
594  else
595  goto unk_pixfmt;
596  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
597  }
598  break;
599  case 0x21111100:
600  if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
601  if (s->bits <= 8) s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
602  else
603  goto unk_pixfmt;
604  s->upscale_h[0] = s->upscale_h[1] = 1;
605  } else {
606  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
607  else s->avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
608  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
609  }
610  break;
611  case 0x31111100:
612  if (s->bits > 8)
613  goto unk_pixfmt;
614  s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
615  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
616  s->upscale_h[1] = s->upscale_h[2] = 2;
617  break;
618  case 0x22121100:
619  case 0x22111200:
620  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
621  else
622  goto unk_pixfmt;
623  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
624  break;
625  case 0x22111100:
626  case 0x23111100:
627  case 0x42111100:
628  case 0x24111100:
629  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
630  else s->avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
631  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
632  if (pix_fmt_id == 0x42111100) {
633  if (s->bits > 8)
634  goto unk_pixfmt;
635  s->upscale_h[1] = s->upscale_h[2] = 1;
636  } else if (pix_fmt_id == 0x24111100) {
637  if (s->bits > 8)
638  goto unk_pixfmt;
639  s->upscale_v[1] = s->upscale_v[2] = 1;
640  } else if (pix_fmt_id == 0x23111100) {
641  if (s->bits > 8)
642  goto unk_pixfmt;
643  s->upscale_v[1] = s->upscale_v[2] = 2;
644  }
645  break;
646  case 0x41111100:
647  if (s->bits <= 8) s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
648  else
649  goto unk_pixfmt;
650  s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
651  break;
652  default:
653  unk_pixfmt:
654  avpriv_report_missing_feature(s->avctx, "Pixel format 0x%x bits:%d", pix_fmt_id, s->bits);
655  memset(s->upscale_h, 0, sizeof(s->upscale_h));
656  memset(s->upscale_v, 0, sizeof(s->upscale_v));
657  return AVERROR_PATCHWELCOME;
658  }
659  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->avctx->lowres) {
660  avpriv_report_missing_feature(s->avctx, "Lowres for weird subsampling");
661  return AVERROR_PATCHWELCOME;
662  }
663  if ((AV_RB32(s->upscale_h) || AV_RB32(s->upscale_v)) && s->progressive && s->avctx->pix_fmt == AV_PIX_FMT_GBRP) {
664  avpriv_report_missing_feature(s->avctx, "progressive for weird subsampling");
665  return AVERROR_PATCHWELCOME;
666  }
667  if (s->ls) {
668  memset(s->upscale_h, 0, sizeof(s->upscale_h));
669  memset(s->upscale_v, 0, sizeof(s->upscale_v));
670  if (s->nb_components == 3) {
671  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
672  } else if (s->nb_components != 1) {
673  av_log(s->avctx, AV_LOG_ERROR, "Unsupported number of components %d\n", s->nb_components);
674  return AVERROR_PATCHWELCOME;
675  } else if (s->palette_index && s->bits <= 8)
676  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
677  else if (s->bits <= 8)
678  s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
679  else
680  s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
681  }
682 
683  s->pix_desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
684  if (!s->pix_desc) {
685  av_log(s->avctx, AV_LOG_ERROR, "Could not get a pixel format descriptor.\n");
686  return AVERROR_BUG;
687  }
688 
689  if (s->avctx->pix_fmt == s->hwaccel_sw_pix_fmt && !size_change) {
690  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
691  } else {
692  enum AVPixelFormat pix_fmts[] = {
693 #if CONFIG_MJPEG_NVDEC_HWACCEL
695 #endif
696 #if CONFIG_MJPEG_VAAPI_HWACCEL
698 #endif
699  s->avctx->pix_fmt,
701  };
702  s->hwaccel_pix_fmt = ff_get_format(s->avctx, pix_fmts);
703  if (s->hwaccel_pix_fmt < 0)
704  return AVERROR(EINVAL);
705 
706  s->hwaccel_sw_pix_fmt = s->avctx->pix_fmt;
707  s->avctx->pix_fmt = s->hwaccel_pix_fmt;
708  }
709 
710  if (s->avctx->skip_frame == AVDISCARD_ALL) {
711  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
712  s->picture_ptr->key_frame = 1;
713  s->got_picture = 1;
714  return 0;
715  }
716 
717  av_frame_unref(s->picture_ptr);
718  if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
719  return -1;
720  s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
721  s->picture_ptr->key_frame = 1;
722  s->got_picture = 1;
723 
724  for (i = 0; i < 4; i++)
725  s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
726 
727  ff_dlog(s->avctx, "%d %d %d %d %d %d\n",
728  s->width, s->height, s->linesize[0], s->linesize[1],
729  s->interlaced, s->avctx->height);
730 
731  }
732 
733  if ((s->rgb && !s->lossless && !s->ls) ||
734  (!s->rgb && s->ls && s->nb_components > 1) ||
735  (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 && !s->ls)
736  ) {
737  av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
738  return AVERROR_PATCHWELCOME;
739  }
740 
741  /* totally blank picture as progressive JPEG will only add details to it */
742  if (s->progressive) {
743  int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
744  int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
745  for (i = 0; i < s->nb_components; i++) {
746  int size = bw * bh * s->h_count[i] * s->v_count[i];
747  av_freep(&s->blocks[i]);
748  av_freep(&s->last_nnz[i]);
749  s->blocks[i] = av_mallocz_array(size, sizeof(**s->blocks));
750  s->last_nnz[i] = av_mallocz_array(size, sizeof(**s->last_nnz));
751  if (!s->blocks[i] || !s->last_nnz[i])
752  return AVERROR(ENOMEM);
753  s->block_stride[i] = bw * s->h_count[i];
754  }
755  memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
756  }
757 
758  if (s->avctx->hwaccel) {
759  s->hwaccel_picture_private =
760  av_mallocz(s->avctx->hwaccel->frame_priv_data_size);
761  if (!s->hwaccel_picture_private)
762  return AVERROR(ENOMEM);
763 
764  ret = s->avctx->hwaccel->start_frame(s->avctx, s->raw_image_buffer,
765  s->raw_image_buffer_size);
766  if (ret < 0)
767  return ret;
768  }
769 
770  return 0;
771 }
772 
773 static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
774 {
775  int code;
776  code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
777  if (code < 0 || code > 16) {
778  av_log(s->avctx, AV_LOG_WARNING,
779  "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
780  0, dc_index, &s->vlcs[0][dc_index]);
781  return 0xfffff;
782  }
783 
784  if (code)
785  return get_xbits(&s->gb, code);
786  else
787  return 0;
788 }
789 
790 /* decode block and dequantize */
791 static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
792  int dc_index, int ac_index, uint16_t *quant_matrix)
793 {
794  int code, i, j, level, val;
795 
796  /* DC coef */
797  val = mjpeg_decode_dc(s, dc_index);
798  if (val == 0xfffff) {
799  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
800  return AVERROR_INVALIDDATA;
801  }
802  val = val * (unsigned)quant_matrix[0] + s->last_dc[component];
803  val = av_clip_int16(val);
804  s->last_dc[component] = val;
805  block[0] = val;
806  /* AC coefs */
807  i = 0;
808  {OPEN_READER(re, &s->gb);
809  do {
810  UPDATE_CACHE(re, &s->gb);
811  GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
812 
813  i += ((unsigned)code) >> 4;
814  code &= 0xf;
815  if (code) {
816  if (code > MIN_CACHE_BITS - 16)
817  UPDATE_CACHE(re, &s->gb);
818 
819  {
820  int cache = GET_CACHE(re, &s->gb);
821  int sign = (~cache) >> 31;
822  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
823  }
824 
825  LAST_SKIP_BITS(re, &s->gb, code);
826 
827  if (i > 63) {
828  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
829  return AVERROR_INVALIDDATA;
830  }
831  j = s->scantable.permutated[i];
832  block[j] = level * quant_matrix[i];
833  }
834  } while (i < 63);
835  CLOSE_READER(re, &s->gb);}
836 
837  return 0;
838 }
839 
841  int component, int dc_index,
842  uint16_t *quant_matrix, int Al)
843 {
844  unsigned val;
845  s->bdsp.clear_block(block);
846  val = mjpeg_decode_dc(s, dc_index);
847  if (val == 0xfffff) {
848  av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
849  return AVERROR_INVALIDDATA;
850  }
851  val = (val * (quant_matrix[0] << Al)) + s->last_dc[component];
852  s->last_dc[component] = val;
853  block[0] = val;
854  return 0;
855 }
856 
857 /* decode block and dequantize - progressive JPEG version */
859  uint8_t *last_nnz, int ac_index,
860  uint16_t *quant_matrix,
861  int ss, int se, int Al, int *EOBRUN)
862 {
863  int code, i, j, val, run;
864  unsigned level;
865 
866  if (*EOBRUN) {
867  (*EOBRUN)--;
868  return 0;
869  }
870 
871  {
872  OPEN_READER(re, &s->gb);
873  for (i = ss; ; i++) {
874  UPDATE_CACHE(re, &s->gb);
875  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
876 
877  run = ((unsigned) code) >> 4;
878  code &= 0xF;
879  if (code) {
880  i += run;
881  if (code > MIN_CACHE_BITS - 16)
882  UPDATE_CACHE(re, &s->gb);
883 
884  {
885  int cache = GET_CACHE(re, &s->gb);
886  int sign = (~cache) >> 31;
887  level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
888  }
889 
890  LAST_SKIP_BITS(re, &s->gb, code);
891 
892  if (i >= se) {
893  if (i == se) {
894  j = s->scantable.permutated[se];
895  block[j] = level * (quant_matrix[se] << Al);
896  break;
897  }
898  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
899  return AVERROR_INVALIDDATA;
900  }
901  j = s->scantable.permutated[i];
902  block[j] = level * (quant_matrix[i] << Al);
903  } else {
904  if (run == 0xF) {// ZRL - skip 15 coefficients
905  i += 15;
906  if (i >= se) {
907  av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
908  return AVERROR_INVALIDDATA;
909  }
910  } else {
911  val = (1 << run);
912  if (run) {
913  UPDATE_CACHE(re, &s->gb);
914  val += NEG_USR32(GET_CACHE(re, &s->gb), run);
915  LAST_SKIP_BITS(re, &s->gb, run);
916  }
917  *EOBRUN = val - 1;
918  break;
919  }
920  }
921  }
922  CLOSE_READER(re, &s->gb);
923  }
924 
925  if (i > *last_nnz)
926  *last_nnz = i;
927 
928  return 0;
929 }
930 
931 #define REFINE_BIT(j) { \
932  UPDATE_CACHE(re, &s->gb); \
933  sign = block[j] >> 15; \
934  block[j] += SHOW_UBITS(re, &s->gb, 1) * \
935  ((quant_matrix[i] ^ sign) - sign) << Al; \
936  LAST_SKIP_BITS(re, &s->gb, 1); \
937 }
938 
939 #define ZERO_RUN \
940 for (; ; i++) { \
941  if (i > last) { \
942  i += run; \
943  if (i > se) { \
944  av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
945  return -1; \
946  } \
947  break; \
948  } \
949  j = s->scantable.permutated[i]; \
950  if (block[j]) \
951  REFINE_BIT(j) \
952  else if (run-- == 0) \
953  break; \
954 }
955 
956 /* decode block and dequantize - progressive JPEG refinement pass */
958  uint8_t *last_nnz,
959  int ac_index, uint16_t *quant_matrix,
960  int ss, int se, int Al, int *EOBRUN)
961 {
962  int code, i = ss, j, sign, val, run;
963  int last = FFMIN(se, *last_nnz);
964 
965  OPEN_READER(re, &s->gb);
966  if (*EOBRUN) {
967  (*EOBRUN)--;
968  } else {
969  for (; ; i++) {
970  UPDATE_CACHE(re, &s->gb);
971  GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
972 
973  if (code & 0xF) {
974  run = ((unsigned) code) >> 4;
975  UPDATE_CACHE(re, &s->gb);
976  val = SHOW_UBITS(re, &s->gb, 1);
977  LAST_SKIP_BITS(re, &s->gb, 1);
978  ZERO_RUN;
979  j = s->scantable.permutated[i];
980  val--;
981  block[j] = ((quant_matrix[i] << Al) ^ val) - val;
982  if (i == se) {
983  if (i > *last_nnz)
984  *last_nnz = i;
985  CLOSE_READER(re, &s->gb);
986  return 0;
987  }
988  } else {
989  run = ((unsigned) code) >> 4;
990  if (run == 0xF) {
991  ZERO_RUN;
992  } else {
993  val = run;
994  run = (1 << run);
995  if (val) {
996  UPDATE_CACHE(re, &s->gb);
997  run += SHOW_UBITS(re, &s->gb, val);
998  LAST_SKIP_BITS(re, &s->gb, val);
999  }
1000  *EOBRUN = run - 1;
1001  break;
1002  }
1003  }
1004  }
1005 
1006  if (i > *last_nnz)
1007  *last_nnz = i;
1008  }
1009 
1010  for (; i <= last; i++) {
1011  j = s->scantable.permutated[i];
1012  if (block[j])
1013  REFINE_BIT(j)
1014  }
1015  CLOSE_READER(re, &s->gb);
1016 
1017  return 0;
1018 }
1019 #undef REFINE_BIT
1020 #undef ZERO_RUN
1021 
1022 static int handle_rstn(MJpegDecodeContext *s, int nb_components)
1023 {
1024  int i;
1025  int reset = 0;
1026 
1027  if (s->restart_interval) {
1028  s->restart_count--;
1029  if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
1030  align_get_bits(&s->gb);
1031  for (i = 0; i < nb_components; i++) /* reset dc */
1032  s->last_dc[i] = (4 << s->bits);
1033  }
1034 
1035  i = 8 + ((-get_bits_count(&s->gb)) & 7);
1036  /* skip RSTn */
1037  if (s->restart_count == 0) {
1038  if( show_bits(&s->gb, i) == (1 << i) - 1
1039  || show_bits(&s->gb, i) == 0xFF) {
1040  int pos = get_bits_count(&s->gb);
1041  align_get_bits(&s->gb);
1042  while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
1043  skip_bits(&s->gb, 8);
1044  if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
1045  for (i = 0; i < nb_components; i++) /* reset dc */
1046  s->last_dc[i] = (4 << s->bits);
1047  reset = 1;
1048  } else
1049  skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
1050  }
1051  }
1052  }
1053  return reset;
1054 }
1055 
1056 /* Handles 1 to 4 components */
1057 static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
1058 {
1059  int i, mb_x, mb_y;
1060  unsigned width;
1061  uint16_t (*buffer)[4];
1062  int left[4], top[4], topleft[4];
1063  const int linesize = s->linesize[0];
1064  const int mask = ((1 << s->bits) - 1) << point_transform;
1065  int resync_mb_y = 0;
1066  int resync_mb_x = 0;
1067  int vpred[6];
1068 
1069  if (!s->bayer && s->nb_components < 3)
1070  return AVERROR_INVALIDDATA;
1071  if (s->bayer && s->nb_components > 2)
1072  return AVERROR_INVALIDDATA;
1073  if (s->nb_components <= 0 || s->nb_components > 4)
1074  return AVERROR_INVALIDDATA;
1075  if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
1076  return AVERROR_INVALIDDATA;
1077 
1078 
1079  s->restart_count = s->restart_interval;
1080 
1081  if (s->restart_interval == 0)
1082  s->restart_interval = INT_MAX;
1083 
1084  if (s->bayer)
1085  width = s->mb_width / nb_components; /* Interleaved, width stored is the total so need to divide */
1086  else
1087  width = s->mb_width;
1088 
1089  av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size, width * 4 * sizeof(s->ljpeg_buffer[0][0]));
1090  if (!s->ljpeg_buffer)
1091  return AVERROR(ENOMEM);
1092 
1093  buffer = s->ljpeg_buffer;
1094 
1095  for (i = 0; i < 4; i++)
1096  buffer[0][i] = 1 << (s->bits - 1);
1097 
1098  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1099  uint8_t *ptr = s->picture_ptr->data[0] + (linesize * mb_y);
1100 
1101  if (s->interlaced && s->bottom_field)
1102  ptr += linesize >> 1;
1103 
1104  for (i = 0; i < 4; i++)
1105  top[i] = left[i] = topleft[i] = buffer[0][i];
1106 
1107  if ((mb_y * s->width) % s->restart_interval == 0) {
1108  for (i = 0; i < 6; i++)
1109  vpred[i] = 1 << (s->bits-1);
1110  }
1111 
1112  for (mb_x = 0; mb_x < width; mb_x++) {
1113  int modified_predictor = predictor;
1114 
1115  if (get_bits_left(&s->gb) < 1) {
1116  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in rgb_scan\n");
1117  return AVERROR_INVALIDDATA;
1118  }
1119 
1120  if (s->restart_interval && !s->restart_count){
1121  s->restart_count = s->restart_interval;
1122  resync_mb_x = mb_x;
1123  resync_mb_y = mb_y;
1124  for(i=0; i<4; i++)
1125  top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
1126  }
1127  if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1128  modified_predictor = 1;
1129 
1130  for (i=0;i<nb_components;i++) {
1131  int pred, dc;
1132 
1133  topleft[i] = top[i];
1134  top[i] = buffer[mb_x][i];
1135 
1136  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1137  if(dc == 0xFFFFF)
1138  return -1;
1139 
1140  if (!s->bayer || mb_x) {
1141  pred = left[i];
1142  } else { /* This path runs only for the first line in bayer images */
1143  vpred[i] += dc;
1144  pred = vpred[i] - dc;
1145  }
1146 
1147  PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1148 
1149  left[i] = buffer[mb_x][i] =
1150  mask & (pred + (unsigned)(dc * (1 << point_transform)));
1151  }
1152 
1153  if (s->restart_interval && !--s->restart_count) {
1154  align_get_bits(&s->gb);
1155  skip_bits(&s->gb, 16); /* skip RSTn */
1156  }
1157  }
1158  if (s->rct && s->nb_components == 4) {
1159  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1160  ptr[4*mb_x + 2] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1161  ptr[4*mb_x + 1] = buffer[mb_x][1] + ptr[4*mb_x + 2];
1162  ptr[4*mb_x + 3] = buffer[mb_x][2] + ptr[4*mb_x + 2];
1163  ptr[4*mb_x + 0] = buffer[mb_x][3];
1164  }
1165  } else if (s->nb_components == 4) {
1166  for(i=0; i<nb_components; i++) {
1167  int c= s->comp_index[i];
1168  if (s->bits <= 8) {
1169  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1170  ptr[4*mb_x+3-c] = buffer[mb_x][i];
1171  }
1172  } else if(s->bits == 9) {
1173  return AVERROR_PATCHWELCOME;
1174  } else {
1175  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1176  ((uint16_t*)ptr)[4*mb_x+c] = buffer[mb_x][i];
1177  }
1178  }
1179  }
1180  } else if (s->rct) {
1181  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1182  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
1183  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1184  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1185  }
1186  } else if (s->pegasus_rct) {
1187  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1188  ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
1189  ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
1190  ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
1191  }
1192  } else if (s->bayer) {
1193  if (nb_components == 1) {
1194  /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */
1195  for (mb_x = 0; mb_x < width; mb_x++)
1196  ((uint16_t*)ptr)[mb_x] = buffer[mb_x][0];
1197  } else if (nb_components == 2) {
1198  for (mb_x = 0; mb_x < width; mb_x++) {
1199  ((uint16_t*)ptr)[2*mb_x + 0] = buffer[mb_x][0];
1200  ((uint16_t*)ptr)[2*mb_x + 1] = buffer[mb_x][1];
1201  }
1202  }
1203  } else {
1204  for(i=0; i<nb_components; i++) {
1205  int c= s->comp_index[i];
1206  if (s->bits <= 8) {
1207  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1208  ptr[3*mb_x+2-c] = buffer[mb_x][i];
1209  }
1210  } else if(s->bits == 9) {
1211  return AVERROR_PATCHWELCOME;
1212  } else {
1213  for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
1214  ((uint16_t*)ptr)[3*mb_x+2-c] = buffer[mb_x][i];
1215  }
1216  }
1217  }
1218  }
1219  }
1220  return 0;
1221 }
1222 
1223 static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
1224  int point_transform, int nb_components)
1225 {
1226  int i, mb_x, mb_y, mask;
1227  int bits= (s->bits+7)&~7;
1228  int resync_mb_y = 0;
1229  int resync_mb_x = 0;
1230 
1231  point_transform += bits - s->bits;
1232  mask = ((1 << s->bits) - 1) << point_transform;
1233 
1234  av_assert0(nb_components>=1 && nb_components<=4);
1235 
1236  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1237  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1238  if (get_bits_left(&s->gb) < 1) {
1239  av_log(s->avctx, AV_LOG_ERROR, "bitstream end in yuv_scan\n");
1240  return AVERROR_INVALIDDATA;
1241  }
1242  if (s->restart_interval && !s->restart_count){
1243  s->restart_count = s->restart_interval;
1244  resync_mb_x = mb_x;
1245  resync_mb_y = mb_y;
1246  }
1247 
1248  if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
1249  int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1250  int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1251  for (i = 0; i < nb_components; i++) {
1252  uint8_t *ptr;
1253  uint16_t *ptr16;
1254  int n, h, v, x, y, c, j, linesize;
1255  n = s->nb_blocks[i];
1256  c = s->comp_index[i];
1257  h = s->h_scount[i];
1258  v = s->v_scount[i];
1259  x = 0;
1260  y = 0;
1261  linesize= s->linesize[c];
1262 
1263  if(bits>8) linesize /= 2;
1264 
1265  for(j=0; j<n; j++) {
1266  int pred, dc;
1267 
1268  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1269  if(dc == 0xFFFFF)
1270  return -1;
1271  if ( h * mb_x + x >= s->width
1272  || v * mb_y + y >= s->height) {
1273  // Nothing to do
1274  } else if (bits<=8) {
1275  ptr = s->picture_ptr->data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
1276  if(y==0 && toprow){
1277  if(x==0 && leftcol){
1278  pred= 1 << (bits - 1);
1279  }else{
1280  pred= ptr[-1];
1281  }
1282  }else{
1283  if(x==0 && leftcol){
1284  pred= ptr[-linesize];
1285  }else{
1286  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1287  }
1288  }
1289 
1290  if (s->interlaced && s->bottom_field)
1291  ptr += linesize >> 1;
1292  pred &= mask;
1293  *ptr= pred + ((unsigned)dc << point_transform);
1294  }else{
1295  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1296  if(y==0 && toprow){
1297  if(x==0 && leftcol){
1298  pred= 1 << (bits - 1);
1299  }else{
1300  pred= ptr16[-1];
1301  }
1302  }else{
1303  if(x==0 && leftcol){
1304  pred= ptr16[-linesize];
1305  }else{
1306  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1307  }
1308  }
1309 
1310  if (s->interlaced && s->bottom_field)
1311  ptr16 += linesize >> 1;
1312  pred &= mask;
1313  *ptr16= pred + ((unsigned)dc << point_transform);
1314  }
1315  if (++x == h) {
1316  x = 0;
1317  y++;
1318  }
1319  }
1320  }
1321  } else {
1322  for (i = 0; i < nb_components; i++) {
1323  uint8_t *ptr;
1324  uint16_t *ptr16;
1325  int n, h, v, x, y, c, j, linesize, dc;
1326  n = s->nb_blocks[i];
1327  c = s->comp_index[i];
1328  h = s->h_scount[i];
1329  v = s->v_scount[i];
1330  x = 0;
1331  y = 0;
1332  linesize = s->linesize[c];
1333 
1334  if(bits>8) linesize /= 2;
1335 
1336  for (j = 0; j < n; j++) {
1337  int pred;
1338 
1339  dc = mjpeg_decode_dc(s, s->dc_index[i]);
1340  if(dc == 0xFFFFF)
1341  return -1;
1342  if ( h * mb_x + x >= s->width
1343  || v * mb_y + y >= s->height) {
1344  // Nothing to do
1345  } else if (bits<=8) {
1346  ptr = s->picture_ptr->data[c] +
1347  (linesize * (v * mb_y + y)) +
1348  (h * mb_x + x); //FIXME optimize this crap
1349  PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1350 
1351  pred &= mask;
1352  *ptr = pred + ((unsigned)dc << point_transform);
1353  }else{
1354  ptr16 = (uint16_t*)(s->picture_ptr->data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
1355  PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1356 
1357  pred &= mask;
1358  *ptr16= pred + ((unsigned)dc << point_transform);
1359  }
1360 
1361  if (++x == h) {
1362  x = 0;
1363  y++;
1364  }
1365  }
1366  }
1367  }
1368  if (s->restart_interval && !--s->restart_count) {
1369  align_get_bits(&s->gb);
1370  skip_bits(&s->gb, 16); /* skip RSTn */
1371  }
1372  }
1373  }
1374  return 0;
1375 }
1376 
1378  uint8_t *dst, const uint8_t *src,
1379  int linesize, int lowres)
1380 {
1381  switch (lowres) {
1382  case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
1383  break;
1384  case 1: copy_block4(dst, src, linesize, linesize, 4);
1385  break;
1386  case 2: copy_block2(dst, src, linesize, linesize, 2);
1387  break;
1388  case 3: *dst = *src;
1389  break;
1390  }
1391 }
1392 
1393 static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
1394 {
1395  int block_x, block_y;
1396  int size = 8 >> s->avctx->lowres;
1397  if (s->bits > 8) {
1398  for (block_y=0; block_y<size; block_y++)
1399  for (block_x=0; block_x<size; block_x++)
1400  *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
1401  } else {
1402  for (block_y=0; block_y<size; block_y++)
1403  for (block_x=0; block_x<size; block_x++)
1404  *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
1405  }
1406 }
1407 
1408 static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
1409  int Al, const uint8_t *mb_bitmask,
1410  int mb_bitmask_size,
1411  const AVFrame *reference)
1412 {
1413  int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1415  const uint8_t *reference_data[MAX_COMPONENTS];
1416  int linesize[MAX_COMPONENTS];
1417  GetBitContext mb_bitmask_gb = {0}; // initialize to silence gcc warning
1418  int bytes_per_pixel = 1 + (s->bits > 8);
1419 
1420  if (mb_bitmask) {
1421  if (mb_bitmask_size != (s->mb_width * s->mb_height + 7)>>3) {
1422  av_log(s->avctx, AV_LOG_ERROR, "mb_bitmask_size mismatches\n");
1423  return AVERROR_INVALIDDATA;
1424  }
1425  init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
1426  }
1427 
1428  s->restart_count = 0;
1429 
1430  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &chroma_h_shift,
1431  &chroma_v_shift);
1432  chroma_width = AV_CEIL_RSHIFT(s->width, chroma_h_shift);
1433  chroma_height = AV_CEIL_RSHIFT(s->height, chroma_v_shift);
1434 
1435  for (i = 0; i < nb_components; i++) {
1436  int c = s->comp_index[i];
1437  data[c] = s->picture_ptr->data[c];
1438  reference_data[c] = reference ? reference->data[c] : NULL;
1439  linesize[c] = s->linesize[c];
1440  s->coefs_finished[c] |= 1;
1441  }
1442 
1443  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1444  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1445  const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
1446 
1447  if (s->restart_interval && !s->restart_count)
1448  s->restart_count = s->restart_interval;
1449 
1450  if (get_bits_left(&s->gb) < 0) {
1451  av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
1452  -get_bits_left(&s->gb));
1453  return AVERROR_INVALIDDATA;
1454  }
1455  for (i = 0; i < nb_components; i++) {
1456  uint8_t *ptr;
1457  int n, h, v, x, y, c, j;
1458  int block_offset;
1459  n = s->nb_blocks[i];
1460  c = s->comp_index[i];
1461  h = s->h_scount[i];
1462  v = s->v_scount[i];
1463  x = 0;
1464  y = 0;
1465  for (j = 0; j < n; j++) {
1466  block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
1467  (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
1468 
1469  if (s->interlaced && s->bottom_field)
1470  block_offset += linesize[c] >> 1;
1471  if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->width)
1472  && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->height)) {
1473  ptr = data[c] + block_offset;
1474  } else
1475  ptr = NULL;
1476  if (!s->progressive) {
1477  if (copy_mb) {
1478  if (ptr)
1479  mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
1480  linesize[c], s->avctx->lowres);
1481 
1482  } else {
1483  s->bdsp.clear_block(s->block);
1484  if (decode_block(s, s->block, i,
1485  s->dc_index[i], s->ac_index[i],
1486  s->quant_matrixes[s->quant_sindex[i]]) < 0) {
1487  av_log(s->avctx, AV_LOG_ERROR,
1488  "error y=%d x=%d\n", mb_y, mb_x);
1489  return AVERROR_INVALIDDATA;
1490  }
1491  if (ptr) {
1492  s->idsp.idct_put(ptr, linesize[c], s->block);
1493  if (s->bits & 7)
1494  shift_output(s, ptr, linesize[c]);
1495  }
1496  }
1497  } else {
1498  int block_idx = s->block_stride[c] * (v * mb_y + y) +
1499  (h * mb_x + x);
1500  int16_t *block = s->blocks[c][block_idx];
1501  if (Ah)
1502  block[0] += get_bits1(&s->gb) *
1503  s->quant_matrixes[s->quant_sindex[i]][0] << Al;
1504  else if (decode_dc_progressive(s, block, i, s->dc_index[i],
1505  s->quant_matrixes[s->quant_sindex[i]],
1506  Al) < 0) {
1507  av_log(s->avctx, AV_LOG_ERROR,
1508  "error y=%d x=%d\n", mb_y, mb_x);
1509  return AVERROR_INVALIDDATA;
1510  }
1511  }
1512  ff_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
1513  ff_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
1514  mb_x, mb_y, x, y, c, s->bottom_field,
1515  (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1516  if (++x == h) {
1517  x = 0;
1518  y++;
1519  }
1520  }
1521  }
1522 
1523  handle_rstn(s, nb_components);
1524  }
1525  }
1526  return 0;
1527 }
1528 
1530  int se, int Ah, int Al)
1531 {
1532  int mb_x, mb_y;
1533  int EOBRUN = 0;
1534  int c = s->comp_index[0];
1535  uint16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
1536 
1537  av_assert0(ss>=0 && Ah>=0 && Al>=0);
1538  if (se < ss || se > 63) {
1539  av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
1540  return AVERROR_INVALIDDATA;
1541  }
1542 
1543  // s->coefs_finished is a bitmask for coefficients coded
1544  // ss and se are parameters telling start and end coefficients
1545  s->coefs_finished[c] |= (2ULL << se) - (1ULL << ss);
1546 
1547  s->restart_count = 0;
1548 
1549  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1550  int block_idx = mb_y * s->block_stride[c];
1551  int16_t (*block)[64] = &s->blocks[c][block_idx];
1552  uint8_t *last_nnz = &s->last_nnz[c][block_idx];
1553  if (get_bits_left(&s->gb) <= 0) {
1554  av_log(s->avctx, AV_LOG_ERROR, "bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1555  return AVERROR_INVALIDDATA;
1556  }
1557  for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
1558  int ret;
1559  if (s->restart_interval && !s->restart_count)
1560  s->restart_count = s->restart_interval;
1561 
1562  if (Ah)
1563  ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
1564  quant_matrix, ss, se, Al, &EOBRUN);
1565  else
1566  ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
1567  quant_matrix, ss, se, Al, &EOBRUN);
1568  if (ret < 0) {
1569  av_log(s->avctx, AV_LOG_ERROR,
1570  "error y=%d x=%d\n", mb_y, mb_x);
1571  return AVERROR_INVALIDDATA;
1572  }
1573 
1574  if (handle_rstn(s, 0))
1575  EOBRUN = 0;
1576  }
1577  }
1578  return 0;
1579 }
1580 
1582 {
1583  int mb_x, mb_y;
1584  int c;
1585  const int bytes_per_pixel = 1 + (s->bits > 8);
1586  const int block_size = s->lossless ? 1 : 8;
1587 
1588  for (c = 0; c < s->nb_components; c++) {
1589  uint8_t *data = s->picture_ptr->data[c];
1590  int linesize = s->linesize[c];
1591  int h = s->h_max / s->h_count[c];
1592  int v = s->v_max / s->v_count[c];
1593  int mb_width = (s->width + h * block_size - 1) / (h * block_size);
1594  int mb_height = (s->height + v * block_size - 1) / (v * block_size);
1595 
1596  if (~s->coefs_finished[c])
1597  av_log(s->avctx, AV_LOG_WARNING, "component %d is incomplete\n", c);
1598 
1599  if (s->interlaced && s->bottom_field)
1600  data += linesize >> 1;
1601 
1602  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1603  uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
1604  int block_idx = mb_y * s->block_stride[c];
1605  int16_t (*block)[64] = &s->blocks[c][block_idx];
1606  for (mb_x = 0; mb_x < mb_width; mb_x++, block++) {
1607  s->idsp.idct_put(ptr, linesize, *block);
1608  if (s->bits & 7)
1609  shift_output(s, ptr, linesize);
1610  ptr += bytes_per_pixel*8 >> s->avctx->lowres;
1611  }
1612  }
1613  }
1614 }
1615 
1617  int mb_bitmask_size, const AVFrame *reference)
1618 {
1619  int len, nb_components, i, h, v, predictor, point_transform;
1620  int index, id, ret;
1621  const int block_size = s->lossless ? 1 : 8;
1622  int ilv, prev_shift;
1623 
1624  if (!s->got_picture) {
1625  av_log(s->avctx, AV_LOG_WARNING,
1626  "Can not process SOS before SOF, skipping\n");
1627  return -1;
1628  }
1629 
1630  if (reference) {
1631  if (reference->width != s->picture_ptr->width ||
1632  reference->height != s->picture_ptr->height ||
1633  reference->format != s->picture_ptr->format) {
1634  av_log(s->avctx, AV_LOG_ERROR, "Reference mismatching\n");
1635  return AVERROR_INVALIDDATA;
1636  }
1637  }
1638 
1639  /* XXX: verify len field validity */
1640  len = get_bits(&s->gb, 16);
1641  nb_components = get_bits(&s->gb, 8);
1642  if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
1644  "decode_sos: nb_components (%d)",
1645  nb_components);
1646  return AVERROR_PATCHWELCOME;
1647  }
1648  if (len != 6 + 2 * nb_components) {
1649  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
1650  return AVERROR_INVALIDDATA;
1651  }
1652  for (i = 0; i < nb_components; i++) {
1653  id = get_bits(&s->gb, 8) - 1;
1654  av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
1655  /* find component index */
1656  for (index = 0; index < s->nb_components; index++)
1657  if (id == s->component_id[index])
1658  break;
1659  if (index == s->nb_components) {
1660  av_log(s->avctx, AV_LOG_ERROR,
1661  "decode_sos: index(%d) out of components\n", index);
1662  return AVERROR_INVALIDDATA;
1663  }
1664  /* Metasoft MJPEG codec has Cb and Cr swapped */
1665  if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
1666  && nb_components == 3 && s->nb_components == 3 && i)
1667  index = 3 - i;
1668 
1669  s->quant_sindex[i] = s->quant_index[index];
1670  s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
1671  s->h_scount[i] = s->h_count[index];
1672  s->v_scount[i] = s->v_count[index];
1673 
1674  if((nb_components == 1 || nb_components == 3) && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
1675  index = (index+2)%3;
1676 
1677  s->comp_index[i] = index;
1678 
1679  s->dc_index[i] = get_bits(&s->gb, 4);
1680  s->ac_index[i] = get_bits(&s->gb, 4);
1681 
1682  if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
1683  s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
1684  goto out_of_range;
1685  if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
1686  goto out_of_range;
1687  }
1688 
1689  predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
1690  ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
1691  if(s->avctx->codec_tag != AV_RL32("CJPG")){
1692  prev_shift = get_bits(&s->gb, 4); /* Ah */
1693  point_transform = get_bits(&s->gb, 4); /* Al */
1694  }else
1695  prev_shift = point_transform = 0;
1696 
1697  if (nb_components > 1) {
1698  /* interleaved stream */
1699  s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
1700  s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
1701  } else if (!s->ls) { /* skip this for JPEG-LS */
1702  h = s->h_max / s->h_scount[0];
1703  v = s->v_max / s->v_scount[0];
1704  s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
1705  s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
1706  s->nb_blocks[0] = 1;
1707  s->h_scount[0] = 1;
1708  s->v_scount[0] = 1;
1709  }
1710 
1711  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1712  av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
1713  s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
1714  predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
1715  s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
1716 
1717 
1718  /* mjpeg-b can have padding bytes between sos and image data, skip them */
1719  for (i = s->mjpb_skiptosod; i > 0; i--)
1720  skip_bits(&s->gb, 8);
1721 
1722 next_field:
1723  for (i = 0; i < nb_components; i++)
1724  s->last_dc[i] = (4 << s->bits);
1725 
1726  if (s->avctx->hwaccel) {
1727  int bytes_to_start = get_bits_count(&s->gb) / 8;
1728  av_assert0(bytes_to_start >= 0 &&
1729  s->raw_scan_buffer_size >= bytes_to_start);
1730 
1731  ret = s->avctx->hwaccel->decode_slice(s->avctx,
1732  s->raw_scan_buffer + bytes_to_start,
1733  s->raw_scan_buffer_size - bytes_to_start);
1734  if (ret < 0)
1735  return ret;
1736 
1737  } else if (s->lossless) {
1738  av_assert0(s->picture_ptr == s->picture);
1739  if (CONFIG_JPEGLS_DECODER && s->ls) {
1740 // for () {
1741 // reset_ls_coding_parameters(s, 0);
1742 
1743  if ((ret = ff_jpegls_decode_picture(s, predictor,
1744  point_transform, ilv)) < 0)
1745  return ret;
1746  } else {
1747  if (s->rgb || s->bayer) {
1748  if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
1749  return ret;
1750  } else {
1751  if ((ret = ljpeg_decode_yuv_scan(s, predictor,
1752  point_transform,
1753  nb_components)) < 0)
1754  return ret;
1755  }
1756  }
1757  } else {
1758  if (s->progressive && predictor) {
1759  av_assert0(s->picture_ptr == s->picture);
1760  if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
1761  ilv, prev_shift,
1762  point_transform)) < 0)
1763  return ret;
1764  } else {
1765  if ((ret = mjpeg_decode_scan(s, nb_components,
1766  prev_shift, point_transform,
1767  mb_bitmask, mb_bitmask_size, reference)) < 0)
1768  return ret;
1769  }
1770  }
1771 
1772  if (s->interlaced &&
1773  get_bits_left(&s->gb) > 32 &&
1774  show_bits(&s->gb, 8) == 0xFF) {
1775  GetBitContext bak = s->gb;
1776  align_get_bits(&bak);
1777  if (show_bits(&bak, 16) == 0xFFD1) {
1778  av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
1779  s->gb = bak;
1780  skip_bits(&s->gb, 16);
1781  s->bottom_field ^= 1;
1782 
1783  goto next_field;
1784  }
1785  }
1786 
1787  emms_c();
1788  return 0;
1789  out_of_range:
1790  av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
1791  return AVERROR_INVALIDDATA;
1792 }
1793 
1795 {
1796  if (get_bits(&s->gb, 16) != 4)
1797  return AVERROR_INVALIDDATA;
1798  s->restart_interval = get_bits(&s->gb, 16);
1799  s->restart_count = 0;
1800  av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
1801  s->restart_interval);
1802 
1803  return 0;
1804 }
1805 
1807 {
1808  int len, id, i;
1809 
1810  len = get_bits(&s->gb, 16);
1811  if (len < 6) {
1812  if (s->bayer) {
1813  // Pentax K-1 (digital camera) JPEG images embedded in DNG images contain unknown APP0 markers
1814  av_log(s->avctx, AV_LOG_WARNING, "skipping APPx (len=%"PRId32") for bayer-encoded image\n", len);
1815  skip_bits(&s->gb, len);
1816  return 0;
1817  } else
1818  return AVERROR_INVALIDDATA;
1819  }
1820  if (8 * len > get_bits_left(&s->gb))
1821  return AVERROR_INVALIDDATA;
1822 
1823  id = get_bits_long(&s->gb, 32);
1824  len -= 6;
1825 
1826  if (s->avctx->debug & FF_DEBUG_STARTCODE)
1827  av_log(s->avctx, AV_LOG_DEBUG, "APPx (%s / %8X) len=%d\n",
1828  av_fourcc2str(av_bswap32(id)), id, len);
1829 
1830  /* Buggy AVID, it puts EOI only at every 10th frame. */
1831  /* Also, this fourcc is used by non-avid files too, it holds some
1832  information, but it's always present in AVID-created files. */
1833  if (id == AV_RB32("AVI1")) {
1834  /* structure:
1835  4bytes AVI1
1836  1bytes polarity
1837  1bytes always zero
1838  4bytes field_size
1839  4bytes field_size_less_padding
1840  */
1841  s->buggy_avid = 1;
1842  i = get_bits(&s->gb, 8); len--;
1843  av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
1844  goto out;
1845  }
1846 
1847  if (id == AV_RB32("JFIF")) {
1848  int t_w, t_h, v1, v2;
1849  if (len < 8)
1850  goto out;
1851  skip_bits(&s->gb, 8); /* the trailing zero-byte */
1852  v1 = get_bits(&s->gb, 8);
1853  v2 = get_bits(&s->gb, 8);
1854  skip_bits(&s->gb, 8);
1855 
1856  s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
1857  s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
1858  if ( s->avctx->sample_aspect_ratio.num <= 0
1859  || s->avctx->sample_aspect_ratio.den <= 0) {
1860  s->avctx->sample_aspect_ratio.num = 0;
1861  s->avctx->sample_aspect_ratio.den = 1;
1862  }
1863 
1864  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1865  av_log(s->avctx, AV_LOG_INFO,
1866  "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1867  v1, v2,
1868  s->avctx->sample_aspect_ratio.num,
1869  s->avctx->sample_aspect_ratio.den);
1870 
1871  len -= 8;
1872  if (len >= 2) {
1873  t_w = get_bits(&s->gb, 8);
1874  t_h = get_bits(&s->gb, 8);
1875  if (t_w && t_h) {
1876  /* skip thumbnail */
1877  if (len -10 - (t_w * t_h * 3) > 0)
1878  len -= t_w * t_h * 3;
1879  }
1880  len -= 2;
1881  }
1882  goto out;
1883  }
1884 
1885  if ( id == AV_RB32("Adob")
1886  && len >= 7
1887  && show_bits(&s->gb, 8) == 'e'
1888  && show_bits_long(&s->gb, 32) != AV_RB32("e_CM")) {
1889  skip_bits(&s->gb, 8); /* 'e' */
1890  skip_bits(&s->gb, 16); /* version */
1891  skip_bits(&s->gb, 16); /* flags0 */
1892  skip_bits(&s->gb, 16); /* flags1 */
1893  s->adobe_transform = get_bits(&s->gb, 8);
1894  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1895  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found, transform=%d\n", s->adobe_transform);
1896  len -= 7;
1897  goto out;
1898  }
1899 
1900  if (id == AV_RB32("LJIF")) {
1901  int rgb = s->rgb;
1902  int pegasus_rct = s->pegasus_rct;
1903  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1904  av_log(s->avctx, AV_LOG_INFO,
1905  "Pegasus lossless jpeg header found\n");
1906  skip_bits(&s->gb, 16); /* version ? */
1907  skip_bits(&s->gb, 16); /* unknown always 0? */
1908  skip_bits(&s->gb, 16); /* unknown always 0? */
1909  skip_bits(&s->gb, 16); /* unknown always 0? */
1910  switch (i=get_bits(&s->gb, 8)) {
1911  case 1:
1912  rgb = 1;
1913  pegasus_rct = 0;
1914  break;
1915  case 2:
1916  rgb = 1;
1917  pegasus_rct = 1;
1918  break;
1919  default:
1920  av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
1921  }
1922 
1923  len -= 9;
1924  if (s->got_picture)
1925  if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) {
1926  av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n");
1927  goto out;
1928  }
1929 
1930  s->rgb = rgb;
1931  s->pegasus_rct = pegasus_rct;
1932 
1933  goto out;
1934  }
1935  if (id == AV_RL32("colr") && len > 0) {
1936  s->colr = get_bits(&s->gb, 8);
1937  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1938  av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
1939  len --;
1940  goto out;
1941  }
1942  if (id == AV_RL32("xfrm") && len > 0) {
1943  s->xfrm = get_bits(&s->gb, 8);
1944  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1945  av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
1946  len --;
1947  goto out;
1948  }
1949 
1950  /* JPS extension by VRex */
1951  if (s->start_code == APP3 && id == AV_RB32("_JPS") && len >= 10) {
1952  int flags, layout, type;
1953  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1954  av_log(s->avctx, AV_LOG_INFO, "_JPSJPS_\n");
1955 
1956  skip_bits(&s->gb, 32); len -= 4; /* JPS_ */
1957  skip_bits(&s->gb, 16); len -= 2; /* block length */
1958  skip_bits(&s->gb, 8); /* reserved */
1959  flags = get_bits(&s->gb, 8);
1960  layout = get_bits(&s->gb, 8);
1961  type = get_bits(&s->gb, 8);
1962  len -= 4;
1963 
1964  av_freep(&s->stereo3d);
1965  s->stereo3d = av_stereo3d_alloc();
1966  if (!s->stereo3d) {
1967  goto out;
1968  }
1969  if (type == 0) {
1970  s->stereo3d->type = AV_STEREO3D_2D;
1971  } else if (type == 1) {
1972  switch (layout) {
1973  case 0x01:
1974  s->stereo3d->type = AV_STEREO3D_LINES;
1975  break;
1976  case 0x02:
1977  s->stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1978  break;
1979  case 0x03:
1980  s->stereo3d->type = AV_STEREO3D_TOPBOTTOM;
1981  break;
1982  }
1983  if (!(flags & 0x04)) {
1984  s->stereo3d->flags = AV_STEREO3D_FLAG_INVERT;
1985  }
1986  }
1987  goto out;
1988  }
1989 
1990  /* EXIF metadata */
1991  if (s->start_code == APP1 && id == AV_RB32("Exif") && len >= 2) {
1992  GetByteContext gbytes;
1993  int ret, le, ifd_offset, bytes_read;
1994  const uint8_t *aligned;
1995 
1996  skip_bits(&s->gb, 16); // skip padding
1997  len -= 2;
1998 
1999  // init byte wise reading
2000  aligned = align_get_bits(&s->gb);
2001  bytestream2_init(&gbytes, aligned, len);
2002 
2003  // read TIFF header
2004  ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
2005  if (ret) {
2006  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
2007  } else {
2008  bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
2009 
2010  // read 0th IFD and store the metadata
2011  // (return values > 0 indicate the presence of subimage metadata)
2012  ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
2013  if (ret < 0) {
2014  av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
2015  }
2016  }
2017 
2018  bytes_read = bytestream2_tell(&gbytes);
2019  skip_bits(&s->gb, bytes_read << 3);
2020  len -= bytes_read;
2021 
2022  goto out;
2023  }
2024 
2025  /* Apple MJPEG-A */
2026  if ((s->start_code == APP1) && (len > (0x28 - 8))) {
2027  id = get_bits_long(&s->gb, 32);
2028  len -= 4;
2029  /* Apple MJPEG-A */
2030  if (id == AV_RB32("mjpg")) {
2031  /* structure:
2032  4bytes field size
2033  4bytes pad field size
2034  4bytes next off
2035  4bytes quant off
2036  4bytes huff off
2037  4bytes image off
2038  4bytes scan off
2039  4bytes data off
2040  */
2041  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2042  av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
2043  }
2044  }
2045 
2046  if (s->start_code == APP2 && id == AV_RB32("ICC_") && len >= 10) {
2047  int id2;
2048  unsigned seqno;
2049  unsigned nummarkers;
2050 
2051  id = get_bits_long(&s->gb, 32);
2052  id2 = get_bits(&s->gb, 24);
2053  len -= 7;
2054  if (id != AV_RB32("PROF") || id2 != AV_RB24("ILE")) {
2055  av_log(s->avctx, AV_LOG_WARNING, "Invalid ICC_PROFILE header in APP2\n");
2056  goto out;
2057  }
2058 
2059  skip_bits(&s->gb, 8);
2060  seqno = get_bits(&s->gb, 8);
2061  len -= 2;
2062  if (seqno == 0) {
2063  av_log(s->avctx, AV_LOG_WARNING, "Invalid sequence number in APP2\n");
2064  goto out;
2065  }
2066 
2067  nummarkers = get_bits(&s->gb, 8);
2068  len -= 1;
2069  if (nummarkers == 0) {
2070  av_log(s->avctx, AV_LOG_WARNING, "Invalid number of markers coded in APP2\n");
2071  goto out;
2072  } else if (s->iccnum != 0 && nummarkers != s->iccnum) {
2073  av_log(s->avctx, AV_LOG_WARNING, "Mistmatch in coded number of ICC markers between markers\n");
2074  goto out;
2075  } else if (seqno > nummarkers) {
2076  av_log(s->avctx, AV_LOG_WARNING, "Mismatching sequence number and coded number of ICC markers\n");
2077  goto out;
2078  }
2079 
2080  /* Allocate if this is the first APP2 we've seen. */
2081  if (s->iccnum == 0) {
2082  s->iccdata = av_mallocz(nummarkers * sizeof(*(s->iccdata)));
2083  s->iccdatalens = av_mallocz(nummarkers * sizeof(*(s->iccdatalens)));
2084  if (!s->iccdata || !s->iccdatalens) {
2085  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data arrays\n");
2086  return AVERROR(ENOMEM);
2087  }
2088  s->iccnum = nummarkers;
2089  }
2090 
2091  if (s->iccdata[seqno - 1]) {
2092  av_log(s->avctx, AV_LOG_WARNING, "Duplicate ICC sequence number\n");
2093  goto out;
2094  }
2095 
2096  s->iccdatalens[seqno - 1] = len;
2097  s->iccdata[seqno - 1] = av_malloc(len);
2098  if (!s->iccdata[seqno - 1]) {
2099  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate ICC data buffer\n");
2100  return AVERROR(ENOMEM);
2101  }
2102 
2103  memcpy(s->iccdata[seqno - 1], align_get_bits(&s->gb), len);
2104  skip_bits(&s->gb, len << 3);
2105  len = 0;
2106  s->iccread++;
2107 
2108  if (s->iccread > s->iccnum)
2109  av_log(s->avctx, AV_LOG_WARNING, "Read more ICC markers than are supposed to be coded\n");
2110  }
2111 
2112 out:
2113  /* slow but needed for extreme adobe jpegs */
2114  if (len < 0)
2115  av_log(s->avctx, AV_LOG_ERROR,
2116  "mjpeg: error, decode_app parser read over the end\n");
2117  while (--len > 0)
2118  skip_bits(&s->gb, 8);
2119 
2120  return 0;
2121 }
2122 
2124 {
2125  int len = get_bits(&s->gb, 16);
2126  if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
2127  int i;
2128  char *cbuf = av_malloc(len - 1);
2129  if (!cbuf)
2130  return AVERROR(ENOMEM);
2131 
2132  for (i = 0; i < len - 2; i++)
2133  cbuf[i] = get_bits(&s->gb, 8);
2134  if (i > 0 && cbuf[i - 1] == '\n')
2135  cbuf[i - 1] = 0;
2136  else
2137  cbuf[i] = 0;
2138 
2139  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2140  av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
2141 
2142  /* buggy avid, it puts EOI only at every 10th frame */
2143  if (!strncmp(cbuf, "AVID", 4)) {
2144  parse_avid(s, cbuf, len);
2145  } else if (!strcmp(cbuf, "CS=ITU601"))
2146  s->cs_itu601 = 1;
2147  else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32) && s->avctx->codec_tag) ||
2148  (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
2149  s->flipped = 1;
2150  else if (!strcmp(cbuf, "MULTISCOPE II")) {
2151  s->avctx->sample_aspect_ratio = (AVRational) { 1, 2 };
2152  s->multiscope = 2;
2153  }
2154 
2155  av_free(cbuf);
2156  }
2157 
2158  return 0;
2159 }
2160 
2161 /* return the 8 bit start code value and update the search
2162  state. Return -1 if no start code found */
2163 static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
2164 {
2165  const uint8_t *buf_ptr;
2166  unsigned int v, v2;
2167  int val;
2168  int skipped = 0;
2169 
2170  buf_ptr = *pbuf_ptr;
2171  while (buf_end - buf_ptr > 1) {
2172  v = *buf_ptr++;
2173  v2 = *buf_ptr;
2174  if ((v == 0xff) && (v2 >= SOF0) && (v2 <= COM) && buf_ptr < buf_end) {
2175  val = *buf_ptr++;
2176  goto found;
2177  }
2178  skipped++;
2179  }
2180  buf_ptr = buf_end;
2181  val = -1;
2182 found:
2183  ff_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
2184  *pbuf_ptr = buf_ptr;
2185  return val;
2186 }
2187 
2189  const uint8_t **buf_ptr, const uint8_t *buf_end,
2190  const uint8_t **unescaped_buf_ptr,
2191  int *unescaped_buf_size)
2192 {
2193  int start_code;
2194  start_code = find_marker(buf_ptr, buf_end);
2195 
2196  av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
2197  if (!s->buffer)
2198  return AVERROR(ENOMEM);
2199 
2200  /* unescape buffer of SOS, use special treatment for JPEG-LS */
2201  if (start_code == SOS && !s->ls) {
2202  const uint8_t *src = *buf_ptr;
2203  const uint8_t *ptr = src;
2204  uint8_t *dst = s->buffer;
2205 
2206  #define copy_data_segment(skip) do { \
2207  ptrdiff_t length = (ptr - src) - (skip); \
2208  if (length > 0) { \
2209  memcpy(dst, src, length); \
2210  dst += length; \
2211  src = ptr; \
2212  } \
2213  } while (0)
2214 
2215  if (s->avctx->codec_id == AV_CODEC_ID_THP) {
2216  ptr = buf_end;
2217  copy_data_segment(0);
2218  } else {
2219  while (ptr < buf_end) {
2220  uint8_t x = *(ptr++);
2221 
2222  if (x == 0xff) {
2223  ptrdiff_t skip = 0;
2224  while (ptr < buf_end && x == 0xff) {
2225  x = *(ptr++);
2226  skip++;
2227  }
2228 
2229  /* 0xFF, 0xFF, ... */
2230  if (skip > 1) {
2231  copy_data_segment(skip);
2232 
2233  /* decrement src as it is equal to ptr after the
2234  * copy_data_segment macro and we might want to
2235  * copy the current value of x later on */
2236  src--;
2237  }
2238 
2239  if (x < RST0 || x > RST7) {
2240  copy_data_segment(1);
2241  if (x)
2242  break;
2243  }
2244  }
2245  }
2246  if (src < ptr)
2247  copy_data_segment(0);
2248  }
2249  #undef copy_data_segment
2250 
2251  *unescaped_buf_ptr = s->buffer;
2252  *unescaped_buf_size = dst - s->buffer;
2253  memset(s->buffer + *unescaped_buf_size, 0,
2255 
2256  av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %"PTRDIFF_SPECIFIER" bytes\n",
2257  (buf_end - *buf_ptr) - (dst - s->buffer));
2258  } else if (start_code == SOS && s->ls) {
2259  const uint8_t *src = *buf_ptr;
2260  uint8_t *dst = s->buffer;
2261  int bit_count = 0;
2262  int t = 0, b = 0;
2263  PutBitContext pb;
2264 
2265  /* find marker */
2266  while (src + t < buf_end) {
2267  uint8_t x = src[t++];
2268  if (x == 0xff) {
2269  while ((src + t < buf_end) && x == 0xff)
2270  x = src[t++];
2271  if (x & 0x80) {
2272  t -= FFMIN(2, t);
2273  break;
2274  }
2275  }
2276  }
2277  bit_count = t * 8;
2278  init_put_bits(&pb, dst, t);
2279 
2280  /* unescape bitstream */
2281  while (b < t) {
2282  uint8_t x = src[b++];
2283  put_bits(&pb, 8, x);
2284  if (x == 0xFF && b < t) {
2285  x = src[b++];
2286  if (x & 0x80) {
2287  av_log(s->avctx, AV_LOG_WARNING, "Invalid escape sequence\n");
2288  x &= 0x7f;
2289  }
2290  put_bits(&pb, 7, x);
2291  bit_count--;
2292  }
2293  }
2294  flush_put_bits(&pb);
2295 
2296  *unescaped_buf_ptr = dst;
2297  *unescaped_buf_size = (bit_count + 7) >> 3;
2298  memset(s->buffer + *unescaped_buf_size, 0,
2300  } else {
2301  *unescaped_buf_ptr = *buf_ptr;
2302  *unescaped_buf_size = buf_end - *buf_ptr;
2303  }
2304 
2305  return start_code;
2306 }
2307 
2309 {
2310  int i;
2311 
2312  if (s->iccdata)
2313  for (i = 0; i < s->iccnum; i++)
2314  av_freep(&s->iccdata[i]);
2315  av_freep(&s->iccdata);
2316  av_freep(&s->iccdatalens);
2317 
2318  s->iccread = 0;
2319  s->iccnum = 0;
2320 }
2321 
2322 int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2323  AVPacket *avpkt)
2324 {
2325  AVFrame *frame = data;
2326  const uint8_t *buf = avpkt->data;
2327  int buf_size = avpkt->size;
2328  MJpegDecodeContext *s = avctx->priv_data;
2329  const uint8_t *buf_end, *buf_ptr;
2330  const uint8_t *unescaped_buf_ptr;
2331  int hshift, vshift;
2332  int unescaped_buf_size;
2333  int start_code;
2334  int i, index;
2335  int ret = 0;
2336  int is16bit;
2337 
2338  s->buf_size = buf_size;
2339 
2340  av_dict_free(&s->exif_metadata);
2341  av_freep(&s->stereo3d);
2342  s->adobe_transform = -1;
2343 
2344  if (s->iccnum != 0)
2346 
2347  buf_ptr = buf;
2348  buf_end = buf + buf_size;
2349  while (buf_ptr < buf_end) {
2350  /* find start next marker */
2351  start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
2352  &unescaped_buf_ptr,
2353  &unescaped_buf_size);
2354  /* EOF */
2355  if (start_code < 0) {
2356  break;
2357  } else if (unescaped_buf_size > INT_MAX / 8) {
2358  av_log(avctx, AV_LOG_ERROR,
2359  "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2360  start_code, unescaped_buf_size, buf_size);
2361  return AVERROR_INVALIDDATA;
2362  }
2363  av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%"PTRDIFF_SPECIFIER"\n",
2364  start_code, buf_end - buf_ptr);
2365 
2366  ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
2367 
2368  if (ret < 0) {
2369  av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
2370  goto fail;
2371  }
2372 
2373  s->start_code = start_code;
2374  if (s->avctx->debug & FF_DEBUG_STARTCODE)
2375  av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
2376 
2377  /* process markers */
2378  if (start_code >= RST0 && start_code <= RST7) {
2379  av_log(avctx, AV_LOG_DEBUG,
2380  "restart marker: %d\n", start_code & 0x0f);
2381  /* APP fields */
2382  } else if (start_code >= APP0 && start_code <= APP15) {
2383  if ((ret = mjpeg_decode_app(s)) < 0)
2384  av_log(avctx, AV_LOG_ERROR, "unable to decode APP fields: %s\n",
2385  av_err2str(ret));
2386  /* Comment */
2387  } else if (start_code == COM) {
2388  ret = mjpeg_decode_com(s);
2389  if (ret < 0)
2390  return ret;
2391  } else if (start_code == DQT) {
2393  if (ret < 0)
2394  return ret;
2395  }
2396 
2397  ret = -1;
2398 
2399  if (!CONFIG_JPEGLS_DECODER &&
2400  (start_code == SOF48 || start_code == LSE)) {
2401  av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
2402  return AVERROR(ENOSYS);
2403  }
2404 
2405  if (avctx->skip_frame == AVDISCARD_ALL) {
2406  switch(start_code) {
2407  case SOF0:
2408  case SOF1:
2409  case SOF2:
2410  case SOF3:
2411  case SOF48:
2412  case SOI:
2413  case SOS:
2414  case EOI:
2415  break;
2416  default:
2417  goto skip;
2418  }
2419  }
2420 
2421  switch (start_code) {
2422  case SOI:
2423  s->restart_interval = 0;
2424  s->restart_count = 0;
2425  s->raw_image_buffer = buf_ptr;
2426  s->raw_image_buffer_size = buf_end - buf_ptr;
2427  /* nothing to do on SOI */
2428  break;
2429  case DHT:
2430  if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
2431  av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
2432  goto fail;
2433  }
2434  break;
2435  case SOF0:
2436  case SOF1:
2437  if (start_code == SOF0)
2438  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT;
2439  else
2441  s->lossless = 0;
2442  s->ls = 0;
2443  s->progressive = 0;
2444  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2445  goto fail;
2446  break;
2447  case SOF2:
2448  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT;
2449  s->lossless = 0;
2450  s->ls = 0;
2451  s->progressive = 1;
2452  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2453  goto fail;
2454  break;
2455  case SOF3:
2456  s->avctx->profile = FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS;
2457  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2458  s->lossless = 1;
2459  s->ls = 0;
2460  s->progressive = 0;
2461  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2462  goto fail;
2463  break;
2464  case SOF48:
2465  s->avctx->profile = FF_PROFILE_MJPEG_JPEG_LS;
2466  s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
2467  s->lossless = 1;
2468  s->ls = 1;
2469  s->progressive = 0;
2470  if ((ret = ff_mjpeg_decode_sof(s)) < 0)
2471  goto fail;
2472  break;
2473  case LSE:
2474  if (!CONFIG_JPEGLS_DECODER ||
2475  (ret = ff_jpegls_decode_lse(s)) < 0)
2476  goto fail;
2477  break;
2478  case EOI:
2479 eoi_parser:
2480  if (!avctx->hwaccel && avctx->skip_frame != AVDISCARD_ALL &&
2481  s->progressive && s->cur_scan && s->got_picture)
2483  s->cur_scan = 0;
2484  if (!s->got_picture) {
2485  av_log(avctx, AV_LOG_WARNING,
2486  "Found EOI before any SOF, ignoring\n");
2487  break;
2488  }
2489  if (s->interlaced) {
2490  s->bottom_field ^= 1;
2491  /* if not bottom field, do not output image yet */
2492  if (s->bottom_field == !s->interlace_polarity)
2493  break;
2494  }
2495  if (avctx->skip_frame == AVDISCARD_ALL) {
2496  s->got_picture = 0;
2497  goto the_end_no_picture;
2498  }
2499  if (s->avctx->hwaccel) {
2500  ret = s->avctx->hwaccel->end_frame(s->avctx);
2501  if (ret < 0)
2502  return ret;
2503 
2504  av_freep(&s->hwaccel_picture_private);
2505  }
2506  if ((ret = av_frame_ref(frame, s->picture_ptr)) < 0)
2507  return ret;
2508  *got_frame = 1;
2509  s->got_picture = 0;
2510 
2511  if (!s->lossless) {
2512  int qp = FFMAX3(s->qscale[0],
2513  s->qscale[1],
2514  s->qscale[2]);
2515  int qpw = (s->width + 15) / 16;
2516  AVBufferRef *qp_table_buf = av_buffer_alloc(qpw);
2517  if (qp_table_buf) {
2518  memset(qp_table_buf->data, qp, qpw);
2519  av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1);
2520  }
2521 
2522  if(avctx->debug & FF_DEBUG_QP)
2523  av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
2524  }
2525 
2526  goto the_end;
2527  case SOS:
2528  s->raw_scan_buffer = buf_ptr;
2529  s->raw_scan_buffer_size = buf_end - buf_ptr;
2530 
2531  s->cur_scan++;
2532  if (avctx->skip_frame == AVDISCARD_ALL) {
2533  skip_bits(&s->gb, get_bits_left(&s->gb));
2534  break;
2535  }
2536 
2537  if ((ret = ff_mjpeg_decode_sos(s, NULL, 0, NULL)) < 0 &&
2538  (avctx->err_recognition & AV_EF_EXPLODE))
2539  goto fail;
2540  break;
2541  case DRI:
2542  if ((ret = mjpeg_decode_dri(s)) < 0)
2543  return ret;
2544  break;
2545  case SOF5:
2546  case SOF6:
2547  case SOF7:
2548  case SOF9:
2549  case SOF10:
2550  case SOF11:
2551  case SOF13:
2552  case SOF14:
2553  case SOF15:
2554  case JPG:
2555  av_log(avctx, AV_LOG_ERROR,
2556  "mjpeg: unsupported coding type (%x)\n", start_code);
2557  break;
2558  }
2559 
2560 skip:
2561  /* eof process start code */
2562  buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
2563  av_log(avctx, AV_LOG_DEBUG,
2564  "marker parser used %d bytes (%d bits)\n",
2565  (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
2566  }
2567  if (s->got_picture && s->cur_scan) {
2568  av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
2569  goto eoi_parser;
2570  }
2571  av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
2572  return AVERROR_INVALIDDATA;
2573 fail:
2574  s->got_picture = 0;
2575  return ret;
2576 the_end:
2577 
2578  is16bit = av_pix_fmt_desc_get(s->avctx->pix_fmt)->comp[0].step > 1;
2579 
2580  if (AV_RB32(s->upscale_h)) {
2581  int p;
2583  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2584  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2585  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2586  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2587  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2588  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2589  avctx->pix_fmt == AV_PIX_FMT_YUV420P16||
2590  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2591  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2592  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2593  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2594  );
2595  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2596  if (ret)
2597  return ret;
2598 
2599  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2600  for (p = 0; p<s->nb_components; p++) {
2601  uint8_t *line = s->picture_ptr->data[p];
2602  int w = s->width;
2603  int h = s->height;
2604  if (!s->upscale_h[p])
2605  continue;
2606  if (p==1 || p==2) {
2607  w = AV_CEIL_RSHIFT(w, hshift);
2608  h = AV_CEIL_RSHIFT(h, vshift);
2609  }
2610  if (s->upscale_v[p] == 1)
2611  h = (h+1)>>1;
2612  av_assert0(w > 0);
2613  for (i = 0; i < h; i++) {
2614  if (s->upscale_h[p] == 1) {
2615  if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2616  else line[w - 1] = line[(w - 1) / 2];
2617  for (index = w - 2; index > 0; index--) {
2618  if (is16bit)
2619  ((uint16_t*)line)[index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2620  else
2621  line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2622  }
2623  } else if (s->upscale_h[p] == 2) {
2624  if (is16bit) {
2625  ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2626  if (w > 1)
2627  ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2628  } else {
2629  line[w - 1] = line[(w - 1) / 3];
2630  if (w > 1)
2631  line[w - 2] = line[w - 1];
2632  }
2633  for (index = w - 3; index > 0; index--) {
2634  line[index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2635  }
2636  }
2637  line += s->linesize[p];
2638  }
2639  }
2640  }
2641  if (AV_RB32(s->upscale_v)) {
2642  int p;
2644  avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
2645  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
2646  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
2647  avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
2648  avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
2649  avctx->pix_fmt == AV_PIX_FMT_YUV440P ||
2650  avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
2651  avctx->pix_fmt == AV_PIX_FMT_YUVA444P ||
2652  avctx->pix_fmt == AV_PIX_FMT_YUVA420P ||
2653  avctx->pix_fmt == AV_PIX_FMT_YUVA420P16||
2654  avctx->pix_fmt == AV_PIX_FMT_GBRP ||
2655  avctx->pix_fmt == AV_PIX_FMT_GBRAP
2656  );
2657  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2658  if (ret)
2659  return ret;
2660 
2661  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2662  for (p = 0; p < s->nb_components; p++) {
2663  uint8_t *dst;
2664  int w = s->width;
2665  int h = s->height;
2666  if (!s->upscale_v[p])
2667  continue;
2668  if (p==1 || p==2) {
2669  w = AV_CEIL_RSHIFT(w, hshift);
2670  h = AV_CEIL_RSHIFT(h, vshift);
2671  }
2672  dst = &((uint8_t *)s->picture_ptr->data[p])[(h - 1) * s->linesize[p]];
2673  for (i = h - 1; i; i--) {
2674  uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[p])[i * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2675  uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[p])[(i + 1) * s->upscale_v[p] / (s->upscale_v[p] + 1) * s->linesize[p]];
2676  if (s->upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2677  memcpy(dst, src1, w);
2678  } else {
2679  for (index = 0; index < w; index++)
2680  dst[index] = (src1[index] + src2[index]) >> 1;
2681  }
2682  dst -= s->linesize[p];
2683  }
2684  }
2685  }
2686  if (s->flipped && !s->rgb) {
2687  int j;
2688  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
2689  if (ret)
2690  return ret;
2691 
2692  av_assert0(s->nb_components == av_pix_fmt_count_planes(s->picture_ptr->format));
2693  for (index=0; index<s->nb_components; index++) {
2694  uint8_t *dst = s->picture_ptr->data[index];
2695  int w = s->picture_ptr->width;
2696  int h = s->picture_ptr->height;
2697  if(index && index<3){
2698  w = AV_CEIL_RSHIFT(w, hshift);
2699  h = AV_CEIL_RSHIFT(h, vshift);
2700  }
2701  if(dst){
2702  uint8_t *dst2 = dst + s->picture_ptr->linesize[index]*(h-1);
2703  for (i=0; i<h/2; i++) {
2704  for (j=0; j<w; j++)
2705  FFSWAP(int, dst[j], dst2[j]);
2706  dst += s->picture_ptr->linesize[index];
2707  dst2 -= s->picture_ptr->linesize[index];
2708  }
2709  }
2710  }
2711  }
2712  if (s->adobe_transform == 0 && s->avctx->pix_fmt == AV_PIX_FMT_GBRAP) {
2713  int w = s->picture_ptr->width;
2714  int h = s->picture_ptr->height;
2715  av_assert0(s->nb_components == 4);
2716  for (i=0; i<h; i++) {
2717  int j;
2718  uint8_t *dst[4];
2719  for (index=0; index<4; index++) {
2720  dst[index] = s->picture_ptr->data[index]
2721  + s->picture_ptr->linesize[index]*i;
2722  }
2723  for (j=0; j<w; j++) {
2724  int k = dst[3][j];
2725  int r = dst[0][j] * k;
2726  int g = dst[1][j] * k;
2727  int b = dst[2][j] * k;
2728  dst[0][j] = g*257 >> 16;
2729  dst[1][j] = b*257 >> 16;
2730  dst[2][j] = r*257 >> 16;
2731  dst[3][j] = 255;
2732  }
2733  }
2734  }
2735  if (s->adobe_transform == 2 && s->avctx->pix_fmt == AV_PIX_FMT_YUVA444P) {
2736  int w = s->picture_ptr->width;
2737  int h = s->picture_ptr->height;
2738  av_assert0(s->nb_components == 4);
2739  for (i=0; i<h; i++) {
2740  int j;
2741  uint8_t *dst[4];
2742  for (index=0; index<4; index++) {
2743  dst[index] = s->picture_ptr->data[index]
2744  + s->picture_ptr->linesize[index]*i;
2745  }
2746  for (j=0; j<w; j++) {
2747  int k = dst[3][j];
2748  int r = (255 - dst[0][j]) * k;
2749  int g = (128 - dst[1][j]) * k;
2750  int b = (128 - dst[2][j]) * k;
2751  dst[0][j] = r*257 >> 16;
2752  dst[1][j] = (g*257 >> 16) + 128;
2753  dst[2][j] = (b*257 >> 16) + 128;
2754  dst[3][j] = 255;
2755  }
2756  }
2757  }
2758 
2759  if (s->stereo3d) {
2761  if (stereo) {
2762  stereo->type = s->stereo3d->type;
2763  stereo->flags = s->stereo3d->flags;
2764  }
2765  av_freep(&s->stereo3d);
2766  }
2767 
2768  if (s->iccnum != 0 && s->iccnum == s->iccread) {
2769  AVFrameSideData *sd;
2770  size_t offset = 0;
2771  int total_size = 0;
2772  int i;
2773 
2774  /* Sum size of all parts. */
2775  for (i = 0; i < s->iccnum; i++)
2776  total_size += s->iccdatalens[i];
2777 
2779  if (!sd) {
2780  av_log(s->avctx, AV_LOG_ERROR, "Could not allocate frame side data\n");
2781  return AVERROR(ENOMEM);
2782  }
2783 
2784  /* Reassemble the parts, which are now in-order. */
2785  for (i = 0; i < s->iccnum; i++) {
2786  memcpy(sd->data + offset, s->iccdata[i], s->iccdatalens[i]);
2787  offset += s->iccdatalens[i];
2788  }
2789  }
2790 
2791  av_dict_copy(&((AVFrame *) data)->metadata, s->exif_metadata, 0);
2792  av_dict_free(&s->exif_metadata);
2793 
2794 the_end_no_picture:
2795  av_log(avctx, AV_LOG_DEBUG, "decode frame unused %"PTRDIFF_SPECIFIER" bytes\n",
2796  buf_end - buf_ptr);
2797 // return buf_end - buf_ptr;
2798  return buf_ptr - buf;
2799 }
2800 
2802 {
2803  MJpegDecodeContext *s = avctx->priv_data;
2804  int i, j;
2805 
2806  if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
2807  av_log(avctx, AV_LOG_INFO, "Single field\n");
2808  }
2809 
2810  if (s->picture) {
2811  av_frame_free(&s->picture);
2812  s->picture_ptr = NULL;
2813  } else if (s->picture_ptr)
2814  av_frame_unref(s->picture_ptr);
2815 
2816  av_freep(&s->buffer);
2817  av_freep(&s->stereo3d);
2818  av_freep(&s->ljpeg_buffer);
2819  s->ljpeg_buffer_size = 0;
2820 
2821  for (i = 0; i < 3; i++) {
2822  for (j = 0; j < 4; j++)
2823  ff_free_vlc(&s->vlcs[i][j]);
2824  }
2825  for (i = 0; i < MAX_COMPONENTS; i++) {
2826  av_freep(&s->blocks[i]);
2827  av_freep(&s->last_nnz[i]);
2828  }
2829  av_dict_free(&s->exif_metadata);
2830 
2832 
2833  av_freep(&s->hwaccel_picture_private);
2834 
2835  return 0;
2836 }
2837 
2838 static void decode_flush(AVCodecContext *avctx)
2839 {
2840  MJpegDecodeContext *s = avctx->priv_data;
2841  s->got_picture = 0;
2842 }
2843 
2844 #if CONFIG_MJPEG_DECODER
2845 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2846 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2847 static const AVOption options[] = {
2848  { "extern_huff", "Use external huffman table.",
2849  OFFSET(extern_huff), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
2850  { NULL },
2851 };
2852 
2853 static const AVClass mjpegdec_class = {
2854  .class_name = "MJPEG decoder",
2855  .item_name = av_default_item_name,
2856  .option = options,
2857  .version = LIBAVUTIL_VERSION_INT,
2858 };
2859 
2861  .name = "mjpeg",
2862  .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
2863  .type = AVMEDIA_TYPE_VIDEO,
2864  .id = AV_CODEC_ID_MJPEG,
2865  .priv_data_size = sizeof(MJpegDecodeContext),
2867  .close = ff_mjpeg_decode_end,
2869  .flush = decode_flush,
2870  .capabilities = AV_CODEC_CAP_DR1,
2871  .max_lowres = 3,
2872  .priv_class = &mjpegdec_class,
2874  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2876  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2877 #if CONFIG_MJPEG_NVDEC_HWACCEL
2878  HWACCEL_NVDEC(mjpeg),
2879 #endif
2880 #if CONFIG_MJPEG_VAAPI_HWACCEL
2881  HWACCEL_VAAPI(mjpeg),
2882 #endif
2883  NULL
2884  },
2885 };
2886 #endif
2887 #if CONFIG_THP_DECODER
2889  .name = "thp",
2890  .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
2891  .type = AVMEDIA_TYPE_VIDEO,
2892  .id = AV_CODEC_ID_THP,
2893  .priv_data_size = sizeof(MJpegDecodeContext),
2895  .close = ff_mjpeg_decode_end,
2897  .flush = decode_flush,
2898  .capabilities = AV_CODEC_CAP_DR1,
2899  .max_lowres = 3,
2900  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
2901 };
2902 #endif
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
AVCodec
AVCodec.
Definition: codec.h:190
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
jpegtables.h
mjpeg.h
level
uint8_t level
Definition: svq3.c:209
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
av_buffer_alloc
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
ff_mjpeg_build_huffman_codes
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
Definition: jpegtables.c:127
blockdsp.h
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1279
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
SOS
@ SOS
Definition: mjpeg.h:72
mjpeg_copy_block
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
Definition: mjpegdec.c:1377
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
SOF48
@ SOF48
JPEG-LS.
Definition: mjpeg.h:103
GetByteContext
Definition: bytestream.h:33
APP1
@ APP1
Definition: mjpeg.h:80
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: mjpegdec.c:2838
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
ZERO_RUN
#define ZERO_RUN
Definition: mjpegdec.c:939
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:406
SOF0
@ SOF0
Definition: mjpeg.h:39
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
GET_VLC
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
Definition: get_bits.h:706
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
avpriv_mjpeg_bits_ac_luminance
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
Definition: jpegtables.c:73
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
se
#define se(name, range_min, range_max)
Definition: cbs_h2645.c:273
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
init_idct
static void init_idct(AVCodecContext *avctx)
Definition: mjpegdec.c:130
RST7
@ RST7
Definition: mjpeg.h:68
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
mjpegdec.h
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:178
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
avpriv_mjpeg_val_ac_luminance
const uint8_t avpriv_mjpeg_val_ac_luminance[]
Definition: jpegtables.c:75
AVFrame::width
int width
Definition: frame.h:358
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:439
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
FF_PROFILE_MJPEG_JPEG_LS
#define FF_PROFILE_MJPEG_JPEG_LS
Definition: avcodec.h:1960
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
SOF11
@ SOF11
Definition: mjpeg.h:50
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1183
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
mjpeg_decode_dc
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
Definition: mjpegdec.c:773
jpeglsdec.h
data
const char data[16]
Definition: mxf.c:91
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:192
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
build_vlc
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
Definition: mjpegdec.c:53
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
AV_CODEC_ID_THP
@ AV_CODEC_ID_THP
Definition: codec_id.h:149
avpriv_mjpeg_bits_dc_luminance
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
Definition: jpegtables.c:65
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
APP15
@ APP15
Definition: mjpeg.h:94
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2589
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:515
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
ff_mjpeg_decode_dht
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
Definition: mjpegdec.c:241
ljpeg_decode_yuv_scan
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
Definition: mjpegdec.c:1223
shift_output
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
Definition: mjpegdec.c:1393
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
ff_mjpeg_decode_init
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
Definition: mjpegdec.c:139
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
fail
#define fail()
Definition: checkasm.h:123
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:441
SOF3
@ SOF3
Definition: mjpeg.h:42
FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
Definition: avcodec.h:1956
GetBitContext
Definition: get_bits.h:61
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
mjpeg_decode_com
static int mjpeg_decode_com(MJpegDecodeContext *s)
Definition: mjpegdec.c:2123
init_default_huffman_tables
static int init_default_huffman_tables(MJpegDecodeContext *s)
Definition: mjpegdec.c:76
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:381
AV_RB24
#define AV_RB24
Definition: intreadwrite.h:64
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
av_bswap32
#define av_bswap32
Definition: bswap.h:33
avpriv_mjpeg_bits_dc_chrominance
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
Definition: jpegtables.c:70
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
ff_mjpeg_profiles
const AVProfile ff_mjpeg_profiles[]
Definition: profiles.c:163
ff_exif_decode_ifd
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
Definition: exif.c:122
aligned
static int aligned(int val)
Definition: dashdec.c:171
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
decode_dc_progressive
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
Definition: mjpegdec.c:840
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:409
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2192
COM
@ COM
Definition: mjpeg.h:111
FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
Definition: avcodec.h:1958
mask
static const uint16_t mask[17]
Definition: lzw.c:38
handle_rstn
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
Definition: mjpegdec.c:1022
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
SOF5
@ SOF5
Definition: mjpeg.h:44
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
width
#define width
emms_c
#define emms_c()
Definition: internal.h:55
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
parse_avid
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
Definition: mjpegdec.c:119
avpriv_mjpeg_val_dc
const uint8_t avpriv_mjpeg_val_dc[12]
Definition: jpegtables.c:67
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:410
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
Definition: avcodec.h:1959
g
const char * g
Definition: vf_curves.c:115
APP3
@ APP3
Definition: mjpeg.h:82
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
ff_jpegls_decode_picture
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Definition: jpeglsdec.c:348
bits
uint8_t bits
Definition: vp3data.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_FIELD_UNKNOWN
@ AV_FIELD_UNKNOWN
Definition: codec_par.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AV_PIX_FMT_GBR24P
@ AV_PIX_FMT_GBR24P
Definition: pixfmt.h:169
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:408
RST0
@ RST0
Definition: mjpeg.h:61
reset_icc_profile
static void reset_icc_profile(MJpegDecodeContext *s)
Definition: mjpegdec.c:2308
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:239
ff_mjpeg_decode_end
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
Definition: mjpegdec.c:2801
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
PutBitContext
Definition: put_bits.h:35
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:416
ff_thp_decoder
AVCodec ff_thp_decoder
AV_PIX_FMT_RGBA64
#define AV_PIX_FMT_RGBA64
Definition: pixfmt.h:387
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:261
AV_PIX_FMT_BGR48
#define AV_PIX_FMT_BGR48
Definition: pixfmt.h:388
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
mjpeg_idct_scan_progressive_ac
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
Definition: mjpegdec.c:1581
copy_block2
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:27
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:208
VD
#define VD
Definition: cuviddec.c:1071
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
ff_mjpeg_decode_dqt
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
Definition: mjpegdec.c:200
SOF13
@ SOF13
Definition: mjpeg.h:52
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
tiff.h
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
src
#define src
Definition: vp8dsp.c:254
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MJpegDecodeContext
Definition: mjpegdec.h:46
mjpeg_decode_scan
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1408
decode_block_refinement
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:957
lowres
static int lowres
Definition: ffplay.c:336
CONFIG_JPEGLS_DECODER
#define CONFIG_JPEGLS_DECODER
Definition: config.h:841
mjpeg_decode_scan_progressive_ac
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
Definition: mjpegdec.c:1529
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: bitstream.c:273
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
AV_RB32
#define AV_RB32
Definition: intreadwrite.h:130
AV_PIX_FMT_ABGR
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Definition: pixfmt.h:94
DRI
@ DRI
Definition: mjpeg.h:75
avpriv_mjpeg_val_ac_chrominance
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Definition: jpegtables.c:102
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
copy_data_segment
#define copy_data_segment(skip)
options
const OptionDef options[]
copy_mb
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
Definition: cinepakenc.c:523
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
ljpeg_decode_rgb_scan
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Definition: mjpegdec.c:1057
AVPacket::size
int size
Definition: packet.h:356
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:332
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
SOF14
@ SOF14
Definition: mjpeg.h:53
ff_jpegls_decode_lse
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
Definition: jpeglsdec.c:51
FF_QSCALE_TYPE_MPEG1
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:92
decode_block_progressive
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
Definition: mjpegdec.c:858
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
ff_mjpeg_decode_sos
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
Definition: mjpegdec.c:1616
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
size
int size
Definition: twinvq_data.h:11134
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:208
SOF15
@ SOF15
Definition: mjpeg.h:54
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
AVCodecHWConfigInternal
Definition: hwconfig.h:29
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
get_xbits
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
Definition: get_bits.h:321
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
find_marker
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
Definition: mjpegdec.c:2163
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
DQT
@ DQT
Definition: mjpeg.h:73
r
#define r
Definition: input.c:40
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
SOF10
@ SOF10
Definition: mjpeg.h:49
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
src1
#define src1
Definition: h264pred.c:139
NEG_USR32
#define NEG_USR32(a, s)
Definition: mathops.h:166
copy_block4
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:37
ff_mjpeg_decode_frame
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: mjpegdec.c:2322
interlaced
uint8_t interlaced
Definition: mxfenc.c:2141
decode_block
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
Definition: mjpegdec.c:791
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
EOI
@ EOI
Definition: mjpeg.h:71
copy_block.h
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
Definition: avcodec.h:1957
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
mjpeg_decode_dri
static int mjpeg_decode_dri(MJpegDecodeContext *s)
Definition: mjpegdec.c:1794
MIN_CACHE_BITS
#define MIN_CACHE_BITS
Definition: get_bits.h:128
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
SOF9
@ SOF9
Definition: mjpeg.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1625
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
len
int len
Definition: vorbis_enc_data.h:452
exif.h
DHT
@ DHT
Definition: mjpeg.h:56
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
REFINE_BIT
#define REFINE_BIT(j)
Definition: mjpegdec.c:931
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
pred
static const float pred[4]
Definition: siprdata.h:259
av_stereo3d_alloc
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
Definition: stereo3d.c:28
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
SOF2
@ SOF2
Definition: mjpeg.h:41
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
pos
unsigned int pos
Definition: spdifenc.c:410
LSE
@ LSE
JPEG-LS extension parameters.
Definition: mjpeg.h:104
FF_DEBUG_QP
#define FF_DEBUG_QP
Definition: avcodec.h:1616
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
ff_mjpeg_find_marker
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
Definition: mjpegdec.c:2188
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVFrame::height
int height
Definition: frame.h:358
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
AV_RL32
#define AV_RL32
Definition: intreadwrite.h:146
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:557
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
APP2
@ APP2
Definition: mjpeg.h:81
VLC
Definition: vlc.h:26
values
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
Definition: filter_design.txt:263
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
profiles
static const AVProfile profiles[]
Definition: libfdk-aacenc.c:428
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1611
ff_mjpeg_decode_sof
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
Definition: mjpegdec.c:305
APP0
@ APP0
Definition: mjpeg.h:79
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
SOI
@ SOI
Definition: mjpeg.h:70
mjpeg_decode_app
static int mjpeg_decode_app(MJpegDecodeContext *s)
Definition: mjpegdec.c:1806
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:206
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
SOF1
@ SOF1
Definition: mjpeg.h:40
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:551
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:504
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:564
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
MAX_COMPONENTS
#define MAX_COMPONENTS
Definition: mjpegdec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
SOF7
@ SOF7
Definition: mjpeg.h:46
avpriv_mjpeg_bits_ac_chrominance
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Definition: jpegtables.c:99
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
ff_mjpeg_decoder
AVCodec ff_mjpeg_decoder
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
PREDICT
#define PREDICT(ret, topleft, top, left, predictor)
Definition: mjpeg.h:118
put_bits.h
SOF6
@ SOF6
Definition: mjpeg.h:45
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55
JPG
@ JPG
Definition: mjpeg.h:47
av_fourcc2str
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
re
float re
Definition: fft.c:82