FFmpeg  4.3
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include "libavutil/attributes.h"
37 #include "libavutil/avstring.h"
38 #include "libavutil/error.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/opt.h"
42 #include "avcodec.h"
43 #include "bytestream.h"
44 #include "faxcompr.h"
45 #include "internal.h"
46 #include "lzw.h"
47 #include "mathops.h"
48 #include "tiff.h"
49 #include "tiff_data.h"
50 #include "mjpegdec.h"
51 #include "thread.h"
52 #include "get_bits.h"
53 
54 typedef struct TiffContext {
55  AVClass *class;
58 
59  /* JPEG decoding for DNG */
60  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
61  AVFrame *jpgframe; // decoded JPEG tile
62 
64  uint16_t get_page;
66 
68  int width, height;
69  unsigned int bpp, bppcount;
70  uint32_t palette[256];
72  int le;
75  int planar;
76  int subsampling[2];
77  int fax_opts;
78  int predictor;
80  uint32_t res[4];
82 
83  int is_bayer;
85  unsigned black_level;
86  unsigned white_level;
87  uint16_t dng_lut[65536];
88 
89  uint32_t sub_ifd;
90  uint16_t cur_page;
91 
92  int strips, rps, sstype;
93  int sot;
96 
97  /* Tile support */
98  int is_tiled;
102 
103  int is_jpeg;
104 
108  unsigned int yuv_line_size;
110  unsigned int fax_buffer_size;
111 
114 } TiffContext;
115 
116 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
117  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
118  s->tiff_type = tiff_type;
119 }
120 
121 static void free_geotags(TiffContext *const s)
122 {
123  int i;
124  for (i = 0; i < s->geotag_count; i++) {
125  if (s->geotags[i].val)
126  av_freep(&s->geotags[i].val);
127  }
128  av_freep(&s->geotags);
129  s->geotag_count = 0;
130 }
131 
132 #define RET_GEOKEY(TYPE, array, element)\
133  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
134  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
135  return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
136 
137 static const char *get_geokey_name(int key)
138 {
139  RET_GEOKEY(VERT, vert, name);
140  RET_GEOKEY(PROJ, proj, name);
141  RET_GEOKEY(GEOG, geog, name);
142  RET_GEOKEY(CONF, conf, name);
143 
144  return NULL;
145 }
146 
147 static int get_geokey_type(int key)
148 {
149  RET_GEOKEY(VERT, vert, type);
150  RET_GEOKEY(PROJ, proj, type);
151  RET_GEOKEY(GEOG, geog, type);
152  RET_GEOKEY(CONF, conf, type);
153 
154  return AVERROR_INVALIDDATA;
155 }
156 
157 static int cmp_id_key(const void *id, const void *k)
158 {
159  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
160 }
161 
162 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
163 {
164  TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
165  if(r)
166  return r->name;
167 
168  return NULL;
169 }
170 
171 static char *get_geokey_val(int key, int val)
172 {
173  char *ap;
174 
176  return av_strdup("undefined");
178  return av_strdup("User-Defined");
179 
180 #define RET_GEOKEY_VAL(TYPE, array)\
181  if (val >= TIFF_##TYPE##_OFFSET &&\
182  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
183  return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
184 
185  switch (key) {
187  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
188  break;
190  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
191  break;
195  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
196  break;
199  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
200  break;
202  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
203  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
204  break;
206  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
207  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
208  break;
210  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
211  break;
213  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
214  break;
217  if(ap) return ap;
218  break;
221  if(ap) return ap;
222  break;
224  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
225  break;
227  RET_GEOKEY_VAL(VERT_CS, vert_cs);
228  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
229  break;
230 
231  }
232 
233  ap = av_malloc(14);
234  if (ap)
235  snprintf(ap, 14, "Unknown-%d", val);
236  return ap;
237 }
238 
239 static char *doubles2str(double *dp, int count, const char *sep)
240 {
241  int i;
242  char *ap, *ap0;
243  uint64_t component_len;
244  if (!sep) sep = ", ";
245  component_len = 24LL + strlen(sep);
246  if (count >= (INT_MAX - 1)/component_len)
247  return NULL;
248  ap = av_malloc(component_len * count + 1);
249  if (!ap)
250  return NULL;
251  ap0 = ap;
252  ap[0] = '\0';
253  for (i = 0; i < count; i++) {
254  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
255  if(l >= component_len) {
256  av_free(ap0);
257  return NULL;
258  }
259  ap += l;
260  }
261  ap0[strlen(ap0) - strlen(sep)] = '\0';
262  return ap0;
263 }
264 
265 static int add_metadata(int count, int type,
266  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
267 {
268  switch(type) {
269  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
270  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
271  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
272  default : return AVERROR_INVALIDDATA;
273  };
274 }
275 
276 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
277  const uint8_t *src, int src_stride, int width, int height,
278  int is_single_comp, int is_u16);
279 
281  unsigned int bpp, uint8_t* dst,
282  int usePtr, const uint8_t *src,
283  uint8_t c, int width, int offset)
284 {
285  switch (bpp) {
286  case 1:
287  while (--width >= 0) {
288  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
289  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
290  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
291  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
292  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
293  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
294  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
295  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
296  }
297  break;
298  case 2:
299  while (--width >= 0) {
300  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
301  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
302  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
303  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
304  }
305  break;
306  case 4:
307  while (--width >= 0) {
308  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
309  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
310  }
311  break;
312  case 10:
313  case 12:
314  case 14: {
315  uint16_t *dst16 = (uint16_t *)dst;
316  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
317  uint8_t shift = is_dng ? 0 : 16 - bpp;
318  GetBitContext gb;
319 
320  init_get_bits8(&gb, src, width);
321  for (int i = 0; i < s->width; i++) {
322  dst16[i] = get_bits(&gb, bpp) << shift;
323  }
324  }
325  break;
326  default:
327  if (usePtr) {
328  memcpy(dst + offset, src, width);
329  } else {
330  memset(dst + offset, c, width);
331  }
332  }
333 }
334 
335 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
336 {
337  int i;
338 
339  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
340  if (!s->deinvert_buf)
341  return AVERROR(ENOMEM);
342  for (i = 0; i < size; i++)
343  s->deinvert_buf[i] = ff_reverse[src[i]];
344 
345  return 0;
346 }
347 
348 static void unpack_gray(TiffContext *s, AVFrame *p,
349  const uint8_t *src, int lnum, int width, int bpp)
350 {
351  GetBitContext gb;
352  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
353 
354  init_get_bits8(&gb, src, width);
355 
356  for (int i = 0; i < s->width; i++) {
357  dst[i] = get_bits(&gb, bpp);
358  }
359 }
360 
361 static void unpack_yuv(TiffContext *s, AVFrame *p,
362  const uint8_t *src, int lnum)
363 {
364  int i, j, k;
365  int w = (s->width - 1) / s->subsampling[0] + 1;
366  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
367  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
368  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
369  for (i = 0; i < w; i++) {
370  for (j = 0; j < s->subsampling[1]; j++)
371  for (k = 0; k < s->subsampling[0]; k++)
372  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
373  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
374  *pu++ = *src++;
375  *pv++ = *src++;
376  }
377  }else{
378  for (i = 0; i < w; i++) {
379  for (j = 0; j < s->subsampling[1]; j++)
380  for (k = 0; k < s->subsampling[0]; k++)
381  p->data[0][(lnum + j) * p->linesize[0] +
382  i * s->subsampling[0] + k] = *src++;
383  *pu++ = *src++;
384  *pv++ = *src++;
385  }
386  }
387 }
388 
389 #if CONFIG_ZLIB
390 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
391  int size)
392 {
393  z_stream zstream = { 0 };
394  int zret;
395 
396  zstream.next_in = src;
397  zstream.avail_in = size;
398  zstream.next_out = dst;
399  zstream.avail_out = *len;
400  zret = inflateInit(&zstream);
401  if (zret != Z_OK) {
402  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
403  return zret;
404  }
405  zret = inflate(&zstream, Z_SYNC_FLUSH);
406  inflateEnd(&zstream);
407  *len = zstream.total_out;
408  return zret == Z_STREAM_END ? Z_OK : zret;
409 }
410 
411 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
412  const uint8_t *src, int size, int width, int lines,
413  int strip_start, int is_yuv)
414 {
415  uint8_t *zbuf;
416  unsigned long outlen;
417  int ret, line;
418  outlen = width * lines;
419  zbuf = av_malloc(outlen);
420  if (!zbuf)
421  return AVERROR(ENOMEM);
422  if (s->fill_order) {
423  if ((ret = deinvert_buffer(s, src, size)) < 0) {
424  av_free(zbuf);
425  return ret;
426  }
427  src = s->deinvert_buf;
428  }
429  ret = tiff_uncompress(zbuf, &outlen, src, size);
430  if (ret != Z_OK) {
431  av_log(s->avctx, AV_LOG_ERROR,
432  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
433  (unsigned long)width * lines, ret);
434  av_free(zbuf);
435  return AVERROR_UNKNOWN;
436  }
437  src = zbuf;
438  for (line = 0; line < lines; line++) {
439  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
440  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
441  } else {
442  memcpy(dst, src, width);
443  }
444  if (is_yuv) {
445  unpack_yuv(s, p, dst, strip_start + line);
446  line += s->subsampling[1] - 1;
447  }
448  dst += stride;
449  src += width;
450  }
451  av_free(zbuf);
452  return 0;
453 }
454 #endif
455 
456 #if CONFIG_LZMA
457 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
458  int size)
459 {
460  lzma_stream stream = LZMA_STREAM_INIT;
461  lzma_ret ret;
462 
463  stream.next_in = (uint8_t *)src;
464  stream.avail_in = size;
465  stream.next_out = dst;
466  stream.avail_out = *len;
467  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
468  if (ret != LZMA_OK) {
469  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
470  return ret;
471  }
472  ret = lzma_code(&stream, LZMA_RUN);
473  lzma_end(&stream);
474  *len = stream.total_out;
475  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
476 }
477 
478 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
479  const uint8_t *src, int size, int width, int lines,
480  int strip_start, int is_yuv)
481 {
482  uint64_t outlen = width * (uint64_t)lines;
483  int ret, line;
484  uint8_t *buf = av_malloc(outlen);
485  if (!buf)
486  return AVERROR(ENOMEM);
487  if (s->fill_order) {
488  if ((ret = deinvert_buffer(s, src, size)) < 0) {
489  av_free(buf);
490  return ret;
491  }
492  src = s->deinvert_buf;
493  }
494  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
495  if (ret != LZMA_OK) {
496  av_log(s->avctx, AV_LOG_ERROR,
497  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
498  (uint64_t)width * lines, ret);
499  av_free(buf);
500  return AVERROR_UNKNOWN;
501  }
502  src = buf;
503  for (line = 0; line < lines; line++) {
504  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
505  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
506  } else {
507  memcpy(dst, src, width);
508  }
509  if (is_yuv) {
510  unpack_yuv(s, p, dst, strip_start + line);
511  line += s->subsampling[1] - 1;
512  }
513  dst += stride;
514  src += width;
515  }
516  av_free(buf);
517  return 0;
518 }
519 #endif
520 
521 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
522  const uint8_t *src, int size, int width, int lines)
523 {
524  int i, ret = 0;
525  int line;
526  uint8_t *src2;
527 
528  av_fast_padded_malloc(&s->fax_buffer, &s->fax_buffer_size, size);
529  src2 = s->fax_buffer;
530 
531  if (!src2) {
532  av_log(s->avctx, AV_LOG_ERROR,
533  "Error allocating temporary buffer\n");
534  return AVERROR(ENOMEM);
535  }
536 
537  if (!s->fill_order) {
538  memcpy(src2, src, size);
539  } else {
540  for (i = 0; i < size; i++)
541  src2[i] = ff_reverse[src[i]];
542  }
543  memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
544  ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
545  s->compr, s->fax_opts);
546  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
547  for (line = 0; line < lines; line++) {
548  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
549  dst += stride;
550  }
551  return ret;
552 }
553 
554 static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame);
555 
557  const uint8_t *src, int size, int strip_start, int lines)
558 {
559  PutByteContext pb;
560  int c, line, pixels, code, ret;
561  const uint8_t *ssrc = src;
562  int width = ((s->width * s->bpp) + 7) >> 3;
564  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
565  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
566  desc->nb_components >= 3;
567  int is_dng;
568 
569  if (s->planar)
570  width /= s->bppcount;
571 
572  if (size <= 0)
573  return AVERROR_INVALIDDATA;
574 
575  if (is_yuv) {
576  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
577  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
578  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
579  if (s->yuv_line == NULL) {
580  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
581  return AVERROR(ENOMEM);
582  }
583  dst = s->yuv_line;
584  stride = 0;
585 
586  width = (s->width - 1) / s->subsampling[0] + 1;
587  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
588  av_assert0(width <= bytes_per_row);
589  av_assert0(s->bpp == 24);
590  }
591  if (s->is_bayer) {
592  av_assert0(width == (s->bpp * s->width + 7) >> 3);
593  }
594  if (p->format == AV_PIX_FMT_GRAY12) {
595  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
596  if (s->yuv_line == NULL) {
597  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
598  return AVERROR(ENOMEM);
599  }
600  dst = s->yuv_line;
601  stride = 0;
602  }
603 
604  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
605 #if CONFIG_ZLIB
606  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
607  strip_start, is_yuv);
608 #else
609  av_log(s->avctx, AV_LOG_ERROR,
610  "zlib support not enabled, "
611  "deflate compression not supported\n");
612  return AVERROR(ENOSYS);
613 #endif
614  }
615  if (s->compr == TIFF_LZMA) {
616 #if CONFIG_LZMA
617  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
618  strip_start, is_yuv);
619 #else
620  av_log(s->avctx, AV_LOG_ERROR,
621  "LZMA support not enabled\n");
622  return AVERROR(ENOSYS);
623 #endif
624  }
625  if (s->compr == TIFF_LZW) {
626  if (s->fill_order) {
627  if ((ret = deinvert_buffer(s, src, size)) < 0)
628  return ret;
629  ssrc = src = s->deinvert_buf;
630  }
631  if (size > 1 && !src[0] && (src[1]&1)) {
632  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
633  }
634  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
635  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
636  return ret;
637  }
638  for (line = 0; line < lines; line++) {
639  pixels = ff_lzw_decode(s->lzw, dst, width);
640  if (pixels < width) {
641  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
642  pixels, width);
643  return AVERROR_INVALIDDATA;
644  }
645  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
646  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
647  if (is_yuv) {
648  unpack_yuv(s, p, dst, strip_start + line);
649  line += s->subsampling[1] - 1;
650  } else if (p->format == AV_PIX_FMT_GRAY12) {
651  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
652  }
653  dst += stride;
654  }
655  return 0;
656  }
657  if (s->compr == TIFF_CCITT_RLE ||
658  s->compr == TIFF_G3 ||
659  s->compr == TIFF_G4) {
660  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
661  return AVERROR_INVALIDDATA;
662 
663  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
664  }
665 
666  bytestream2_init(&s->gb, src, size);
667  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
668 
669  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
670 
671  /* Decode JPEG-encoded DNGs with strips */
672  if (s->compr == TIFF_NEWJPEG && is_dng) {
673  if (s->strips > 1) {
674  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
675  return AVERROR_PATCHWELCOME;
676  }
677  if ((ret = dng_decode_strip(s->avctx, p)) < 0)
678  return ret;
679  return 0;
680  }
681 
682  if (is_dng && stride == 0)
683  return AVERROR_INVALIDDATA;
684 
685  for (line = 0; line < lines; line++) {
686  if (src - ssrc > size) {
687  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
688  return AVERROR_INVALIDDATA;
689  }
690 
691  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
692  break;
693  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
694  switch (s->compr) {
695  case TIFF_RAW:
696  if (ssrc + size - src < width)
697  return AVERROR_INVALIDDATA;
698 
699  if (!s->fill_order) {
700  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
701  dst, 1, src, 0, width, 0);
702  } else {
703  int i;
704  for (i = 0; i < width; i++)
705  dst[i] = ff_reverse[src[i]];
706  }
707 
708  /* Color processing for DNG images with uncompressed strips (non-tiled) */
709  if (is_dng) {
710  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
711 
712  is_u16 = (s->bpp > 8);
713  pixel_size_bits = (is_u16 ? 16 : 8);
714  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
715 
716  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
717  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
718  dng_blit(s,
719  dst,
720  0, // no stride, only 1 line
721  dst,
722  0, // no stride, only 1 line
723  elements,
724  1,
725  0, // single-component variation is only preset in JPEG-encoded DNGs
726  is_u16);
727  }
728 
729  src += width;
730  break;
731  case TIFF_PACKBITS:
732  for (pixels = 0; pixels < width;) {
733  if (ssrc + size - src < 2) {
734  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
735  return AVERROR_INVALIDDATA;
736  }
737  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
738  if (code >= 0) {
739  code++;
740  if (pixels + code > width ||
741  ssrc + size - src < code) {
742  av_log(s->avctx, AV_LOG_ERROR,
743  "Copy went out of bounds\n");
744  return AVERROR_INVALIDDATA;
745  }
746  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
747  dst, 1, src, 0, code, pixels);
748  src += code;
749  pixels += code;
750  } else if (code != -128) { // -127..-1
751  code = (-code) + 1;
752  if (pixels + code > width) {
753  av_log(s->avctx, AV_LOG_ERROR,
754  "Run went out of bounds\n");
755  return AVERROR_INVALIDDATA;
756  }
757  c = *src++;
758  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
759  dst, 0, NULL, c, code, pixels);
760  pixels += code;
761  }
762  }
763  if (s->fill_order) {
764  int i;
765  for (i = 0; i < width; i++)
766  dst[i] = ff_reverse[dst[i]];
767  }
768  break;
769  }
770  if (is_yuv) {
771  unpack_yuv(s, p, dst, strip_start + line);
772  line += s->subsampling[1] - 1;
773  } else if (p->format == AV_PIX_FMT_GRAY12) {
774  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
775  }
776  dst += stride;
777  }
778  return 0;
779 }
780 
781 /**
782  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
783  */
784 static uint16_t av_always_inline dng_process_color16(uint16_t value,
785  const uint16_t *lut,
786  uint16_t black_level,
787  float scale_factor) {
788  float value_norm;
789 
790  // Lookup table lookup
791  if (lut)
792  value = lut[value];
793 
794  // Black level subtraction
795  value = av_clip_uint16_c((unsigned)value - black_level);
796 
797  // Color scaling
798  value_norm = (float)value * scale_factor;
799 
800  value = av_clip_uint16_c(value_norm * 65535);
801 
802  return value;
803 }
804 
805 static uint16_t av_always_inline dng_process_color8(uint16_t value,
806  const uint16_t *lut,
807  uint16_t black_level,
808  float scale_factor) {
809  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
810 }
811 
812 static void dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
813  const uint8_t *src, int src_stride,
814  int width, int height, int is_single_comp, int is_u16)
815 {
816  int line, col;
817  float scale_factor;
818 
819  scale_factor = 1.0f / (s->white_level - s->black_level);
820 
821  if (is_single_comp) {
822  if (!is_u16)
823  return; /* <= 8bpp unsupported */
824 
825  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
826  (split vertically in the middle). */
827  for (line = 0; line < height / 2; line++) {
828  uint16_t *dst_u16 = (uint16_t *)dst;
829  uint16_t *src_u16 = (uint16_t *)src;
830 
831  /* Blit first half of input row row to initial row of output */
832  for (col = 0; col < width; col++)
833  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
834 
835  /* Advance the destination pointer by a row (source pointer remains in the same place) */
836  dst += dst_stride * sizeof(uint16_t);
837  dst_u16 = (uint16_t *)dst;
838 
839  /* Blit second half of input row row to next row of output */
840  for (col = 0; col < width; col++)
841  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
842 
843  dst += dst_stride * sizeof(uint16_t);
844  src += src_stride * sizeof(uint16_t);
845  }
846  } else {
847  /* Input and output image are the same size and the MJpeg decoder has done per-component
848  deinterleaving, so blitting here is straightforward. */
849  if (is_u16) {
850  for (line = 0; line < height; line++) {
851  uint16_t *dst_u16 = (uint16_t *)dst;
852  uint16_t *src_u16 = (uint16_t *)src;
853 
854  for (col = 0; col < width; col++)
855  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level, scale_factor);
856 
857  dst += dst_stride * sizeof(uint16_t);
858  src += src_stride * sizeof(uint16_t);
859  }
860  } else {
861  for (line = 0; line < height; line++) {
862  for (col = 0; col < width; col++)
863  *dst++ = dng_process_color8(*src++, s->dng_lut, s->black_level, scale_factor);
864 
865  dst += dst_stride;
866  src += src_stride;
867  }
868  }
869  }
870 }
871 
873  int tile_byte_count, int dst_x, int dst_y, int w, int h)
874 {
875  TiffContext *s = avctx->priv_data;
876  AVPacket jpkt;
877  uint8_t *dst_data, *src_data;
878  uint32_t dst_offset; /* offset from dst buffer in pixels */
879  int is_single_comp, is_u16, pixel_size;
880  int ret;
881 
882  /* Prepare a packet and send to the MJPEG decoder */
883  av_init_packet(&jpkt);
884  jpkt.data = (uint8_t*)s->gb.buffer;
885  jpkt.size = tile_byte_count;
886 
887  if (s->is_bayer) {
888  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
889  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
890  image or not from its own data (and we need that information when decoding it). */
891  mjpegdecctx->bayer = 1;
892  }
893 
894  ret = avcodec_send_packet(s->avctx_mjpeg, &jpkt);
895  if (ret < 0) {
896  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
897  return ret;
898  }
899 
900  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
901  if (ret < 0) {
902  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
903 
904  /* Normally skip, error if explode */
905  if (avctx->err_recognition & AV_EF_EXPLODE)
906  return AVERROR_INVALIDDATA;
907  else
908  return 0;
909  }
910 
911  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
912 
913  /* See dng_blit for explanation */
914  is_single_comp = (s->avctx_mjpeg->width == w * 2 && s->avctx_mjpeg->height == h / 2);
915 
916  is_u16 = (s->bpp > 8);
917  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
918 
919  if (is_single_comp && !is_u16) {
920  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
921  av_frame_unref(s->jpgframe);
922  return AVERROR_PATCHWELCOME;
923  }
924 
925  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
926  dst_data = frame->data[0] + dst_offset * pixel_size;
927  src_data = s->jpgframe->data[0];
928 
929  dng_blit(s,
930  dst_data,
931  frame->linesize[0] / pixel_size,
932  src_data,
933  s->jpgframe->linesize[0] / pixel_size,
934  w,
935  h,
936  is_single_comp,
937  is_u16);
938 
939  av_frame_unref(s->jpgframe);
940 
941  return 0;
942 }
943 
945 {
946  TiffContext *s = avctx->priv_data;
947  int tile_idx;
948  int tile_offset_offset, tile_offset;
949  int tile_byte_count_offset, tile_byte_count;
950  int tile_count_x, tile_count_y;
951  int tile_width, tile_length;
952  int has_width_leftover, has_height_leftover;
953  int tile_x = 0, tile_y = 0;
954  int pos_x = 0, pos_y = 0;
955  int ret;
956 
957  s->jpgframe->width = s->tile_width;
958  s->jpgframe->height = s->tile_length;
959 
960  s->avctx_mjpeg->width = s->tile_width;
961  s->avctx_mjpeg->height = s->tile_length;
962 
963  has_width_leftover = (s->width % s->tile_width != 0);
964  has_height_leftover = (s->height % s->tile_length != 0);
965 
966  /* Calculate tile counts (round up) */
967  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
968  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
969 
970  /* Iterate over the number of tiles */
971  for (tile_idx = 0; tile_idx < s->tile_count; tile_idx++) {
972  tile_x = tile_idx % tile_count_x;
973  tile_y = tile_idx / tile_count_x;
974 
975  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
976  tile_width = s->width % s->tile_width;
977  else
978  tile_width = s->tile_width;
979 
980  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
981  tile_length = s->height % s->tile_length;
982  else
983  tile_length = s->tile_length;
984 
985  /* Read tile offset */
986  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
987  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
988  tile_offset = ff_tget_long(&s->gb, s->le);
989 
990  /* Read tile byte size */
991  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
992  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
993  tile_byte_count = ff_tget_long(&s->gb, s->le);
994 
995  /* Seek to tile data */
996  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
997 
998  /* Decode JPEG tile and copy it in the reference frame */
999  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1000 
1001  if (ret < 0)
1002  return ret;
1003 
1004  /* Advance current positions */
1005  pos_x += tile_width;
1006  if (tile_x == tile_count_x - 1) { // If on the right edge
1007  pos_x = 0;
1008  pos_y += tile_length;
1009  }
1010  }
1011 
1012  /* Frame is ready to be output */
1013  frame->pict_type = AV_PICTURE_TYPE_I;
1014  frame->key_frame = 1;
1015 
1016  return avpkt->size;
1017 }
1018 
1020 {
1021  TiffContext *s = avctx->priv_data;
1022 
1023  s->jpgframe->width = s->width;
1024  s->jpgframe->height = s->height;
1025 
1026  s->avctx_mjpeg->width = s->width;
1027  s->avctx_mjpeg->height = s->height;
1028 
1029  return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height);
1030 }
1031 
1033 {
1034  int ret;
1035  int create_gray_palette = 0;
1036 
1037  // make sure there is no aliasing in the following switch
1038  if (s->bpp >= 100 || s->bppcount >= 10) {
1039  av_log(s->avctx, AV_LOG_ERROR,
1040  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1041  s->bpp, s->bppcount);
1042  return AVERROR_INVALIDDATA;
1043  }
1044 
1045  switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
1046  case 11:
1047  if (!s->palette_is_set) {
1048  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1049  break;
1050  }
1051  case 21:
1052  case 41:
1053  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1054  if (!s->palette_is_set) {
1055  create_gray_palette = 1;
1056  }
1057  break;
1058  case 81:
1059  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1060  break;
1061  case 121:
1062  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1063  break;
1064  case 10081:
1065  switch (AV_RL32(s->pattern)) {
1066  case 0x02010100:
1067  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1068  break;
1069  case 0x00010102:
1070  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1071  break;
1072  case 0x01000201:
1073  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1074  break;
1075  case 0x01020001:
1076  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1077  break;
1078  default:
1079  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1080  AV_RL32(s->pattern));
1081  return AVERROR_PATCHWELCOME;
1082  }
1083  break;
1084  case 10101:
1085  case 10121:
1086  case 10141:
1087  case 10161:
1088  switch (AV_RL32(s->pattern)) {
1089  case 0x02010100:
1090  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1091  break;
1092  case 0x00010102:
1093  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1094  break;
1095  case 0x01000201:
1096  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1097  break;
1098  case 0x01020001:
1099  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1100  break;
1101  default:
1102  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1103  AV_RL32(s->pattern));
1104  return AVERROR_PATCHWELCOME;
1105  }
1106  break;
1107  case 243:
1108  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1109  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1110  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1111  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1112  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1113  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1114  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1115  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1116  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1117  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1118  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1119  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1120  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1121  } else {
1122  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1123  return AVERROR_PATCHWELCOME;
1124  }
1125  } else
1126  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1127  break;
1128  case 161:
1129  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1130  break;
1131  case 162:
1132  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1133  break;
1134  case 322:
1135  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1136  break;
1137  case 324:
1138  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1139  break;
1140  case 405:
1141  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1142  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1143  else {
1144  av_log(s->avctx, AV_LOG_ERROR,
1145  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1146  return AVERROR_PATCHWELCOME;
1147  }
1148  break;
1149  case 483:
1150  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1151  break;
1152  case 644:
1153  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1154  break;
1155  case 1243:
1156  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1157  break;
1158  case 1324:
1159  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1160  break;
1161  case 1483:
1162  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1163  break;
1164  case 1644:
1165  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1166  break;
1167  default:
1168  av_log(s->avctx, AV_LOG_ERROR,
1169  "This format is not supported (bpp=%d, bppcount=%d)\n",
1170  s->bpp, s->bppcount);
1171  return AVERROR_INVALIDDATA;
1172  }
1173 
1174  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1175  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1176  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1177  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1178  desc->nb_components < 3) {
1179  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1180  return AVERROR_INVALIDDATA;
1181  }
1182  }
1183 
1184  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1185  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1186  if (ret < 0)
1187  return ret;
1188  }
1189  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1190  return ret;
1191  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1192  if (!create_gray_palette)
1193  memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
1194  else {
1195  /* make default grayscale pal */
1196  int i;
1197  uint32_t *pal = (uint32_t *)frame->f->data[1];
1198  for (i = 0; i < 1<<s->bpp; i++)
1199  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1200  }
1201  }
1202  return 0;
1203 }
1204 
1205 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1206 {
1207  int offset = tag == TIFF_YRES ? 2 : 0;
1208  s->res[offset++] = num;
1209  s->res[offset] = den;
1210  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1211  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1212  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1213  if (num > INT64_MAX || den > INT64_MAX) {
1214  num = num >> 1;
1215  den = den >> 1;
1216  }
1217  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1218  num, den, INT32_MAX);
1219  if (!s->avctx->sample_aspect_ratio.den)
1220  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1221  }
1222 }
1223 
1225 {
1226  AVFrameSideData *sd;
1227  GetByteContext gb_temp;
1228  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1229  int i, start;
1230  int pos;
1231  int ret;
1232  double *dp;
1233 
1234  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1235  if (ret < 0) {
1236  goto end;
1237  }
1238 
1239  off = bytestream2_tell(&s->gb);
1240  if (count == 1) {
1241  switch (type) {
1242  case TIFF_BYTE:
1243  case TIFF_SHORT:
1244  case TIFF_LONG:
1245  value = ff_tget(&s->gb, type, s->le);
1246  break;
1247  case TIFF_RATIONAL:
1248  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1249  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1250  if (!value2) {
1251  av_log(s->avctx, AV_LOG_ERROR, "Invalid denominator in rational\n");
1252  return AVERROR_INVALIDDATA;
1253  }
1254 
1255  break;
1256  case TIFF_STRING:
1257  if (count <= 4) {
1258  break;
1259  }
1260  default:
1261  value = UINT_MAX;
1262  }
1263  }
1264 
1265  switch (tag) {
1266  case TIFF_SUBFILE:
1267  s->is_thumbnail = (value != 0);
1268  break;
1269  case TIFF_WIDTH:
1270  s->width = value;
1271  break;
1272  case TIFF_HEIGHT:
1273  s->height = value;
1274  break;
1275  case TIFF_BPP:
1276  if (count > 5U) {
1277  av_log(s->avctx, AV_LOG_ERROR,
1278  "This format is not supported (bpp=%d, %d components)\n",
1279  value, count);
1280  return AVERROR_INVALIDDATA;
1281  }
1282  s->bppcount = count;
1283  if (count == 1)
1284  s->bpp = value;
1285  else {
1286  switch (type) {
1287  case TIFF_BYTE:
1288  case TIFF_SHORT:
1289  case TIFF_LONG:
1290  s->bpp = 0;
1291  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1292  return AVERROR_INVALIDDATA;
1293  for (i = 0; i < count; i++)
1294  s->bpp += ff_tget(&s->gb, type, s->le);
1295  break;
1296  default:
1297  s->bpp = -1;
1298  }
1299  }
1300  break;
1302  if (count != 1) {
1303  av_log(s->avctx, AV_LOG_ERROR,
1304  "Samples per pixel requires a single value, many provided\n");
1305  return AVERROR_INVALIDDATA;
1306  }
1307  if (value > 5U) {
1308  av_log(s->avctx, AV_LOG_ERROR,
1309  "Samples per pixel %d is too large\n", value);
1310  return AVERROR_INVALIDDATA;
1311  }
1312  if (s->bppcount == 1)
1313  s->bpp *= value;
1314  s->bppcount = value;
1315  break;
1316  case TIFF_COMPR:
1317  s->compr = value;
1318  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1319  s->predictor = 0;
1320  switch (s->compr) {
1321  case TIFF_RAW:
1322  case TIFF_PACKBITS:
1323  case TIFF_LZW:
1324  case TIFF_CCITT_RLE:
1325  break;
1326  case TIFF_G3:
1327  case TIFF_G4:
1328  s->fax_opts = 0;
1329  break;
1330  case TIFF_DEFLATE:
1331  case TIFF_ADOBE_DEFLATE:
1332 #if CONFIG_ZLIB
1333  break;
1334 #else
1335  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1336  return AVERROR(ENOSYS);
1337 #endif
1338  case TIFF_JPEG:
1339  case TIFF_NEWJPEG:
1340  s->is_jpeg = 1;
1341  break;
1342  case TIFF_LZMA:
1343 #if CONFIG_LZMA
1344  break;
1345 #else
1346  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1347  return AVERROR(ENOSYS);
1348 #endif
1349  default:
1350  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1351  s->compr);
1352  return AVERROR_INVALIDDATA;
1353  }
1354  break;
1355  case TIFF_ROWSPERSTRIP:
1356  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1357  value = s->height;
1358  s->rps = FFMIN(value, s->height);
1359  break;
1360  case TIFF_STRIP_OFFS:
1361  if (count == 1) {
1362  if (value > INT_MAX) {
1363  av_log(s->avctx, AV_LOG_ERROR,
1364  "strippos %u too large\n", value);
1365  return AVERROR_INVALIDDATA;
1366  }
1367  s->strippos = 0;
1368  s->stripoff = value;
1369  } else
1370  s->strippos = off;
1371  s->strips = count;
1372  if (s->strips == 1)
1373  s->rps = s->height;
1374  s->sot = type;
1375  break;
1376  case TIFF_STRIP_SIZE:
1377  if (count == 1) {
1378  if (value > INT_MAX) {
1379  av_log(s->avctx, AV_LOG_ERROR,
1380  "stripsize %u too large\n", value);
1381  return AVERROR_INVALIDDATA;
1382  }
1383  s->stripsizesoff = 0;
1384  s->stripsize = value;
1385  s->strips = 1;
1386  } else {
1387  s->stripsizesoff = off;
1388  }
1389  s->strips = count;
1390  s->sstype = type;
1391  break;
1392  case TIFF_XRES:
1393  case TIFF_YRES:
1394  set_sar(s, tag, value, value2);
1395  break;
1396  case TIFF_TILE_OFFSETS:
1397  s->tile_offsets_offset = off;
1398  s->tile_count = count;
1399  s->is_tiled = 1;
1400  break;
1401  case TIFF_TILE_BYTE_COUNTS:
1402  s->tile_byte_counts_offset = off;
1403  break;
1404  case TIFF_TILE_LENGTH:
1405  s->tile_length = value;
1406  break;
1407  case TIFF_TILE_WIDTH:
1408  s->tile_width = value;
1409  break;
1410  case TIFF_PREDICTOR:
1411  s->predictor = value;
1412  break;
1413  case TIFF_SUB_IFDS:
1414  if (count == 1)
1415  s->sub_ifd = value;
1416  else if (count > 1)
1417  s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */
1418  break;
1420  for (int i = 0; i < FFMIN(count, 1 << s->bpp); i++)
1421  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1422  break;
1423  case DNG_BLACK_LEVEL:
1424  if (count > 1) { /* Use the first value in the pattern (assume they're all the same) */
1425  if (type == TIFF_RATIONAL) {
1426  value = ff_tget(&s->gb, TIFF_LONG, s->le);
1427  value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
1428  if (!value2) {
1429  av_log(s->avctx, AV_LOG_ERROR, "Invalid black level denominator\n");
1430  return AVERROR_INVALIDDATA;
1431  }
1432 
1433  s->black_level = value / value2;
1434  } else
1435  s->black_level = ff_tget(&s->gb, type, s->le);
1436  av_log(s->avctx, AV_LOG_WARNING, "Assuming black level pattern values are identical\n");
1437  } else {
1438  s->black_level = value / value2;
1439  }
1440  break;
1441  case DNG_WHITE_LEVEL:
1442  s->white_level = value;
1443  break;
1444  case TIFF_CFA_PATTERN_DIM:
1445  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1446  ff_tget(&s->gb, type, s->le) != 2)) {
1447  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1448  return AVERROR_INVALIDDATA;
1449  }
1450  break;
1451  case TIFF_CFA_PATTERN:
1452  s->is_bayer = 1;
1453  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1454  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1455  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1456  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1457  break;
1458  case TIFF_PHOTOMETRIC:
1459  switch (value) {
1462  case TIFF_PHOTOMETRIC_RGB:
1466  case TIFF_PHOTOMETRIC_CFA:
1467  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1468  s->photometric = value;
1469  break;
1477  "PhotometricInterpretation 0x%04X",
1478  value);
1479  return AVERROR_PATCHWELCOME;
1480  default:
1481  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1482  "unknown\n", value);
1483  return AVERROR_INVALIDDATA;
1484  }
1485  break;
1486  case TIFF_FILL_ORDER:
1487  if (value < 1 || value > 2) {
1488  av_log(s->avctx, AV_LOG_ERROR,
1489  "Unknown FillOrder value %d, trying default one\n", value);
1490  value = 1;
1491  }
1492  s->fill_order = value - 1;
1493  break;
1494  case TIFF_PAL: {
1495  GetByteContext pal_gb[3];
1496  off = type_sizes[type];
1497  if (count / 3 > 256 ||
1498  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1499  return AVERROR_INVALIDDATA;
1500 
1501  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1502  bytestream2_skip(&pal_gb[1], count / 3 * off);
1503  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1504 
1505  off = (type_sizes[type] - 1) << 3;
1506  if (off > 31U) {
1507  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1508  return AVERROR_INVALIDDATA;
1509  }
1510 
1511  for (i = 0; i < count / 3; i++) {
1512  uint32_t p = 0xFF000000;
1513  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1514  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1515  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1516  s->palette[i] = p;
1517  }
1518  s->palette_is_set = 1;
1519  break;
1520  }
1521  case TIFF_PLANAR:
1522  s->planar = value == 2;
1523  break;
1525  if (count != 2) {
1526  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1527  return AVERROR_INVALIDDATA;
1528  }
1529  for (i = 0; i < count; i++) {
1530  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1531  if (s->subsampling[i] <= 0) {
1532  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1533  s->subsampling[i] = 1;
1534  return AVERROR_INVALIDDATA;
1535  }
1536  }
1537  break;
1538  case TIFF_T4OPTIONS:
1539  if (s->compr == TIFF_G3)
1540  s->fax_opts = value;
1541  break;
1542  case TIFF_T6OPTIONS:
1543  if (s->compr == TIFF_G4)
1544  s->fax_opts = value;
1545  break;
1546 #define ADD_METADATA(count, name, sep)\
1547  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1548  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1549  goto end;\
1550  }
1552  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1553  break;
1555  ADD_METADATA(count, "ModelTransformationTag", NULL);
1556  break;
1557  case TIFF_MODEL_TIEPOINT:
1558  ADD_METADATA(count, "ModelTiepointTag", NULL);
1559  break;
1561  if (s->geotag_count) {
1562  avpriv_request_sample(s->avctx, "Multiple geo key directories\n");
1563  return AVERROR_INVALIDDATA;
1564  }
1565  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1566  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1567  s->geotag_count = ff_tget_short(&s->gb, s->le);
1568  if (s->geotag_count > count / 4 - 1) {
1569  s->geotag_count = count / 4 - 1;
1570  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1571  }
1572  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1573  || s->geotag_count == 0) {
1574  s->geotag_count = 0;
1575  return -1;
1576  }
1577  s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
1578  if (!s->geotags) {
1579  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1580  s->geotag_count = 0;
1581  goto end;
1582  }
1583  for (i = 0; i < s->geotag_count; i++) {
1584  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1585  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1586  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1587 
1588  if (!s->geotags[i].type)
1589  s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
1590  else
1591  s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
1592  }
1593  break;
1595  if (count >= INT_MAX / sizeof(int64_t))
1596  return AVERROR_INVALIDDATA;
1597  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1598  return AVERROR_INVALIDDATA;
1599  dp = av_malloc_array(count, sizeof(double));
1600  if (!dp) {
1601  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1602  goto end;
1603  }
1604  for (i = 0; i < count; i++)
1605  dp[i] = ff_tget_double(&s->gb, s->le);
1606  for (i = 0; i < s->geotag_count; i++) {
1607  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1608  if (s->geotags[i].count == 0
1609  || s->geotags[i].offset + s->geotags[i].count > count) {
1610  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1611  } else if (s->geotags[i].val) {
1612  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1613  } else {
1614  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1615  if (!ap) {
1616  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1617  av_freep(&dp);
1618  return AVERROR(ENOMEM);
1619  }
1620  s->geotags[i].val = ap;
1621  }
1622  }
1623  }
1624  av_freep(&dp);
1625  break;
1626  case TIFF_GEO_ASCII_PARAMS:
1627  pos = bytestream2_tell(&s->gb);
1628  for (i = 0; i < s->geotag_count; i++) {
1629  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1630  if (s->geotags[i].count == 0
1631  || s->geotags[i].offset + s->geotags[i].count > count) {
1632  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1633  } else {
1634  char *ap;
1635 
1636  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1637  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1638  return AVERROR_INVALIDDATA;
1639  if (s->geotags[i].val)
1640  return AVERROR_INVALIDDATA;
1641  ap = av_malloc(s->geotags[i].count);
1642  if (!ap) {
1643  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1644  return AVERROR(ENOMEM);
1645  }
1646  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1647  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1648  s->geotags[i].val = ap;
1649  }
1650  }
1651  }
1652  break;
1653  case TIFF_ICC_PROFILE:
1654  if (type != TIFF_UNDEFINED)
1655  return AVERROR_INVALIDDATA;
1656 
1657  gb_temp = s->gb;
1658  bytestream2_seek(&gb_temp, SEEK_SET, off);
1659 
1660  if (bytestream2_get_bytes_left(&gb_temp) < count)
1661  return AVERROR_INVALIDDATA;
1662 
1664  if (!sd)
1665  return AVERROR(ENOMEM);
1666 
1667  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1668  break;
1669  case TIFF_ARTIST:
1670  ADD_METADATA(count, "artist", NULL);
1671  break;
1672  case TIFF_COPYRIGHT:
1673  ADD_METADATA(count, "copyright", NULL);
1674  break;
1675  case TIFF_DATE:
1676  ADD_METADATA(count, "date", NULL);
1677  break;
1678  case TIFF_DOCUMENT_NAME:
1679  ADD_METADATA(count, "document_name", NULL);
1680  break;
1681  case TIFF_HOST_COMPUTER:
1682  ADD_METADATA(count, "computer", NULL);
1683  break;
1685  ADD_METADATA(count, "description", NULL);
1686  break;
1687  case TIFF_MAKE:
1688  ADD_METADATA(count, "make", NULL);
1689  break;
1690  case TIFF_MODEL:
1691  ADD_METADATA(count, "model", NULL);
1692  break;
1693  case TIFF_PAGE_NAME:
1694  ADD_METADATA(count, "page_name", NULL);
1695  break;
1696  case TIFF_PAGE_NUMBER:
1697  ADD_METADATA(count, "page_number", " / ");
1698  // need to seek back to re-read the page number
1699  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1700  // read the page number
1701  s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
1702  // get back to where we were before the previous seek
1703  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1704  break;
1705  case TIFF_SOFTWARE_NAME:
1706  ADD_METADATA(count, "software", NULL);
1707  break;
1708  case DNG_VERSION:
1709  if (count == 4) {
1710  unsigned int ver[4];
1711  ver[0] = ff_tget(&s->gb, type, s->le);
1712  ver[1] = ff_tget(&s->gb, type, s->le);
1713  ver[2] = ff_tget(&s->gb, type, s->le);
1714  ver[3] = ff_tget(&s->gb, type, s->le);
1715 
1716  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1717  ver[0], ver[1], ver[2], ver[3]);
1718 
1720  }
1721  break;
1722  case CINEMADNG_TIME_CODES:
1723  case CINEMADNG_FRAME_RATE:
1724  case CINEMADNG_T_STOP:
1725  case CINEMADNG_REEL_NAME:
1728  break;
1729  default:
1730  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1731  av_log(s->avctx, AV_LOG_ERROR,
1732  "Unknown or unsupported tag %d/0x%0X\n",
1733  tag, tag);
1734  return AVERROR_INVALIDDATA;
1735  }
1736  }
1737 end:
1738  if (s->bpp > 64U) {
1739  av_log(s->avctx, AV_LOG_ERROR,
1740  "This format is not supported (bpp=%d, %d components)\n",
1741  s->bpp, count);
1742  s->bpp = 0;
1743  return AVERROR_INVALIDDATA;
1744  }
1745  bytestream2_seek(&s->gb, start, SEEK_SET);
1746  return 0;
1747 }
1748 
1749 static int decode_frame(AVCodecContext *avctx,
1750  void *data, int *got_frame, AVPacket *avpkt)
1751 {
1752  TiffContext *const s = avctx->priv_data;
1753  AVFrame *const p = data;
1754  ThreadFrame frame = { .f = data };
1755  unsigned off, last_off;
1756  int le, ret, plane, planes;
1757  int i, j, entries, stride;
1758  unsigned soff, ssize;
1759  uint8_t *dst;
1760  GetByteContext stripsizes;
1761  GetByteContext stripdata;
1762  int retry_for_subifd, retry_for_page;
1763  int is_dng;
1764  int has_tile_bits, has_strip_bits;
1765 
1766  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1767 
1768  // parse image header
1769  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1770  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1771  return ret;
1772  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1773  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1774  return AVERROR_INVALIDDATA;
1775  }
1776  s->le = le;
1777  // TIFF_BPP is not a required tag and defaults to 1
1778 
1779  s->tiff_type = TIFF_TYPE_TIFF;
1780 again:
1781  s->is_thumbnail = 0;
1782  s->bppcount = s->bpp = 1;
1783  s->photometric = TIFF_PHOTOMETRIC_NONE;
1784  s->compr = TIFF_RAW;
1785  s->fill_order = 0;
1786  s->white_level = 0;
1787  s->is_bayer = 0;
1788  s->is_tiled = 0;
1789  s->is_jpeg = 0;
1790  s->cur_page = 0;
1791 
1792  for (i = 0; i < 65536; i++)
1793  s->dng_lut[i] = i;
1794 
1795  free_geotags(s);
1796 
1797  // Reset these offsets so we can tell if they were set this frame
1798  s->stripsizesoff = s->strippos = 0;
1799  /* parse image file directory */
1800  bytestream2_seek(&s->gb, off, SEEK_SET);
1801  entries = ff_tget_short(&s->gb, le);
1802  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1803  return AVERROR_INVALIDDATA;
1804  for (i = 0; i < entries; i++) {
1805  if ((ret = tiff_decode_tag(s, p)) < 0)
1806  return ret;
1807  }
1808 
1809  if (s->get_thumbnail && !s->is_thumbnail) {
1810  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
1811  return AVERROR_EOF;
1812  }
1813 
1814  /** whether we should process this IFD's SubIFD */
1815  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
1816  /** whether we should process this multi-page IFD's next page */
1817  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
1818 
1819  last_off = off;
1820  if (retry_for_page) {
1821  // set offset to the next IFD
1822  off = ff_tget_long(&s->gb, le);
1823  } else if (retry_for_subifd) {
1824  // set offset to the SubIFD
1825  off = s->sub_ifd;
1826  }
1827 
1828  if (retry_for_subifd || retry_for_page) {
1829  if (!off) {
1830  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
1831  return AVERROR_INVALIDDATA;
1832  }
1833  if (off <= last_off) {
1834  avpriv_request_sample(s->avctx, "non increasing IFD offset\n");
1835  return AVERROR_INVALIDDATA;
1836  }
1837  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1838  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1839  return AVERROR_INVALIDDATA;
1840  }
1841  s->sub_ifd = 0;
1842  goto again;
1843  }
1844 
1845  /* At this point we've decided on which (Sub)IFD to process */
1846 
1847  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
1848 
1849  for (i = 0; i<s->geotag_count; i++) {
1850  const char *keyname = get_geokey_name(s->geotags[i].key);
1851  if (!keyname) {
1852  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
1853  continue;
1854  }
1855  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
1856  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
1857  continue;
1858  }
1859  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
1860  if (ret<0) {
1861  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
1862  return ret;
1863  }
1864  }
1865 
1866  if (is_dng) {
1867  int bps;
1868 
1869  if (s->white_level == 0)
1870  s->white_level = (1 << s->bpp) - 1; /* Default value as per the spec */
1871 
1872  if (s->white_level <= s->black_level) {
1873  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%"PRId32") must be less than WhiteLevel (%"PRId32")\n",
1874  s->black_level, s->white_level);
1875  return AVERROR_INVALIDDATA;
1876  }
1877 
1878  if (s->bpp % s->bppcount)
1879  return AVERROR_INVALIDDATA;
1880  bps = s->bpp / s->bppcount;
1881  if (bps < 8 || bps > 32)
1882  return AVERROR_INVALIDDATA;
1883  if (s->planar)
1884  return AVERROR_PATCHWELCOME;
1885  }
1886 
1887  if (!s->is_tiled && !s->strippos && !s->stripoff) {
1888  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
1889  return AVERROR_INVALIDDATA;
1890  }
1891 
1892  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length || s->tile_count;
1893  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
1894 
1895  if (has_tile_bits && has_strip_bits) {
1896  av_log(avctx, AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
1897  return AVERROR_INVALIDDATA;
1898  }
1899 
1900  /* now we have the data and may start decoding */
1901  if ((ret = init_image(s, &frame)) < 0)
1902  return ret;
1903 
1904  if (!s->is_tiled) {
1905  if (s->strips == 1 && !s->stripsize) {
1906  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
1907  s->stripsize = avpkt->size - s->stripoff;
1908  }
1909 
1910  if (s->stripsizesoff) {
1911  if (s->stripsizesoff >= (unsigned)avpkt->size)
1912  return AVERROR_INVALIDDATA;
1913  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
1914  avpkt->size - s->stripsizesoff);
1915  }
1916  if (s->strippos) {
1917  if (s->strippos >= (unsigned)avpkt->size)
1918  return AVERROR_INVALIDDATA;
1919  bytestream2_init(&stripdata, avpkt->data + s->strippos,
1920  avpkt->size - s->strippos);
1921  }
1922 
1923  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
1924  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
1925  return AVERROR_INVALIDDATA;
1926  }
1927  }
1928 
1929  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
1930  s->photometric == TIFF_PHOTOMETRIC_CFA) {
1932  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
1934  }
1935 
1936  /* Handle DNG images with JPEG-compressed tiles */
1937 
1938  if (is_dng && s->is_tiled) {
1939  if (!s->is_jpeg) {
1940  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
1941  return AVERROR_PATCHWELCOME;
1942  } else if (!s->is_bayer) {
1943  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
1944  return AVERROR_PATCHWELCOME;
1945  } else {
1946  if ((ret = dng_decode_tiles(avctx, (AVFrame*)data, avpkt)) > 0)
1947  *got_frame = 1;
1948  return ret;
1949  }
1950  }
1951 
1952  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
1953 
1954  planes = s->planar ? s->bppcount : 1;
1955  for (plane = 0; plane < planes; plane++) {
1956  uint8_t *five_planes = NULL;
1957  int remaining = avpkt->size;
1958  int decoded_height;
1959  stride = p->linesize[plane];
1960  dst = p->data[plane];
1961  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
1962  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
1963  stride = stride * 5 / 4;
1964  five_planes =
1965  dst = av_malloc(stride * s->height);
1966  if (!dst)
1967  return AVERROR(ENOMEM);
1968  }
1969  for (i = 0; i < s->height; i += s->rps) {
1970  if (i)
1971  dst += s->rps * stride;
1972  if (s->stripsizesoff)
1973  ssize = ff_tget(&stripsizes, s->sstype, le);
1974  else
1975  ssize = s->stripsize;
1976 
1977  if (s->strippos)
1978  soff = ff_tget(&stripdata, s->sot, le);
1979  else
1980  soff = s->stripoff;
1981 
1982  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
1983  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
1984  av_freep(&five_planes);
1985  return AVERROR_INVALIDDATA;
1986  }
1987  remaining -= ssize;
1988  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
1989  FFMIN(s->rps, s->height - i))) < 0) {
1990  if (avctx->err_recognition & AV_EF_EXPLODE) {
1991  av_freep(&five_planes);
1992  return ret;
1993  }
1994  break;
1995  }
1996  }
1997  decoded_height = FFMIN(i, s->height);
1998 
1999  if (s->predictor == 2) {
2000  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2001  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2002  return AVERROR_PATCHWELCOME;
2003  }
2004  dst = five_planes ? five_planes : p->data[plane];
2005  soff = s->bpp >> 3;
2006  if (s->planar)
2007  soff = FFMAX(soff / s->bppcount, 1);
2008  ssize = s->width * soff;
2009  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2010  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2011  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2012  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2013  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2014  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2015  for (i = 0; i < decoded_height; i++) {
2016  for (j = soff; j < ssize; j += 2)
2017  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2018  dst += stride;
2019  }
2020  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2021  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2022  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2023  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2024  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2025  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2026  for (i = 0; i < decoded_height; i++) {
2027  for (j = soff; j < ssize; j += 2)
2028  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2029  dst += stride;
2030  }
2031  } else {
2032  for (i = 0; i < decoded_height; i++) {
2033  for (j = soff; j < ssize; j++)
2034  dst[j] += dst[j - soff];
2035  dst += stride;
2036  }
2037  }
2038  }
2039 
2040  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2041  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2042  dst = p->data[plane];
2043  for (i = 0; i < s->height; i++) {
2044  for (j = 0; j < stride; j++)
2045  dst[j] = c - dst[j];
2046  dst += stride;
2047  }
2048  }
2049 
2050  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2051  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2052  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2053  uint8_t *src = five_planes ? five_planes : p->data[plane];
2054  dst = p->data[plane];
2055  for (i = 0; i < s->height; i++) {
2056  for (j = 0; j < s->width; j++) {
2057  int k = 255 - src[x * j + 3];
2058  int r = (255 - src[x * j ]) * k;
2059  int g = (255 - src[x * j + 1]) * k;
2060  int b = (255 - src[x * j + 2]) * k;
2061  dst[4 * j ] = r * 257 >> 16;
2062  dst[4 * j + 1] = g * 257 >> 16;
2063  dst[4 * j + 2] = b * 257 >> 16;
2064  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2065  }
2066  src += stride;
2067  dst += p->linesize[plane];
2068  }
2069  av_freep(&five_planes);
2070  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2071  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2072  dst = p->data[plane];
2073  for (i = 0; i < s->height; i++) {
2074  for (j = 0; j < s->width; j++) {
2075  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2076  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2077  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2078  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2079  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2080  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2081  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2082  AV_WB16(dst + 8 * j + 6, 65535);
2083  }
2084  dst += p->linesize[plane];
2085  }
2086  }
2087  }
2088 
2089  if (s->planar && s->bppcount > 2) {
2090  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2091  FFSWAP(int, p->linesize[0], p->linesize[2]);
2092  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2093  FFSWAP(int, p->linesize[0], p->linesize[1]);
2094  }
2095 
2096  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2097  uint16_t *dst = (uint16_t *)p->data[0];
2098  for (i = 0; i < s->height; i++) {
2099  for (j = 0; j < s->width; j++)
2100  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2101  dst += stride / 2;
2102  }
2103  }
2104 
2105  *got_frame = 1;
2106 
2107  return avpkt->size;
2108 }
2109 
2111 {
2112  TiffContext *s = avctx->priv_data;
2113  const AVCodec *codec;
2114  int ret;
2115 
2116  s->width = 0;
2117  s->height = 0;
2118  s->subsampling[0] =
2119  s->subsampling[1] = 1;
2120  s->avctx = avctx;
2121  ff_lzw_decode_open(&s->lzw);
2122  if (!s->lzw)
2123  return AVERROR(ENOMEM);
2125 
2126  /* Allocate JPEG frame */
2127  s->jpgframe = av_frame_alloc();
2128  if (!s->jpgframe)
2129  return AVERROR(ENOMEM);
2130 
2131  /* Prepare everything needed for JPEG decoding */
2133  if (!codec)
2134  return AVERROR_BUG;
2135  s->avctx_mjpeg = avcodec_alloc_context3(codec);
2136  if (!s->avctx_mjpeg)
2137  return AVERROR(ENOMEM);
2138  s->avctx_mjpeg->flags = avctx->flags;
2139  s->avctx_mjpeg->flags2 = avctx->flags2;
2140  s->avctx_mjpeg->dct_algo = avctx->dct_algo;
2141  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2142  ret = ff_codec_open2_recursive(s->avctx_mjpeg, codec, NULL);
2143  if (ret < 0) {
2144  return ret;
2145  }
2146 
2147  return 0;
2148 }
2149 
2150 static av_cold int tiff_end(AVCodecContext *avctx)
2151 {
2152  TiffContext *const s = avctx->priv_data;
2153 
2154  free_geotags(s);
2155 
2156  ff_lzw_decode_close(&s->lzw);
2157  av_freep(&s->deinvert_buf);
2158  s->deinvert_buf_size = 0;
2159  av_freep(&s->yuv_line);
2160  s->yuv_line_size = 0;
2161  av_freep(&s->fax_buffer);
2162  s->fax_buffer_size = 0;
2163  av_frame_free(&s->jpgframe);
2164  avcodec_free_context(&s->avctx_mjpeg);
2165  return 0;
2166 }
2167 
2168 #define OFFSET(x) offsetof(TiffContext, x)
2169 static const AVOption tiff_options[] = {
2170  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2171  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2172  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2173  { NULL },
2174 };
2175 
2176 static const AVClass tiff_decoder_class = {
2177  .class_name = "TIFF decoder",
2178  .item_name = av_default_item_name,
2179  .option = tiff_options,
2180  .version = LIBAVUTIL_VERSION_INT,
2181 };
2182 
2184  .name = "tiff",
2185  .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
2186  .type = AVMEDIA_TYPE_VIDEO,
2187  .id = AV_CODEC_ID_TIFF,
2188  .priv_data_size = sizeof(TiffContext),
2189  .init = tiff_init,
2190  .close = tiff_end,
2191  .decode = decode_frame,
2192  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2193  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
2194  .priv_class = &tiff_decoder_class,
2195 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:67
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:548
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:241
TiffContext::gb
GetByteContext gb
Definition: tiff.c:57
AVCodec
AVCodec.
Definition: codec.h:190
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:142
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:328
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:147
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1224
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:566
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:193
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:126
GetByteContext
Definition: bytestream.h:33
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:174
get_geokey_val
static char * get_geokey_val(int key, int val)
Definition: tiff.c:171
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:87
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:489
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:784
TiffContext::strippos
int strippos
Definition: tiff.c:94
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:89
init_image
static int init_image(TiffContext *s, ThreadFrame *frame)
Definition: tiff.c:1032
ff_tiff_decoder
AVCodec ff_tiff_decoder
Definition: tiff.c:2183
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:155
OFFSET
#define OFFSET(x)
Definition: tiff.c:2168
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
TiffContext::sot
int sot
Definition: tiff.c:93
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:239
TiffContext::fax_buffer_size
unsigned int fax_buffer_size
Definition: tiff.c:110
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:122
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:150
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2150
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:99
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:128
internal.h
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:91
AVPacket::data
uint8_t * data
Definition: packet.h:355
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:194
AVOption
AVOption.
Definition: opt.h:246
TIFF_LONG
@ TIFF_LONG
Definition: tiff_common.h:41
b
#define b
Definition: input.c:41
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
data
const char data[16]
Definition: mxf.c:91
AV_RB16
#define AV_RB16
Definition: intreadwrite.h:53
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:127
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:192
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:335
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:61
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:84
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:146
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:48
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:154
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:156
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:121
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
av_clip_uint16_c
static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
Clip a signed integer value into the 0-65535 range.
Definition: common.h:181
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:97
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:424
TiffGeoTagKeyName
Definition: tiff.h:215
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:185
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:129
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:141
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:103
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:47
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2169
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:65
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:198
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:54
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:189
dng_decode_strip
static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame)
Definition: tiff.c:1019
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:106
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:74
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:82
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:381
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:361
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:116
U
#define U(x)
Definition: vp56_arith.h:37
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:86
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:57
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:140
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:105
TiffContext::tile_length
int tile_length
Definition: tiff.c:100
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:70
val
static double val(void *priv, double ch)
Definition: aeval.c:76
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:280
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::dct_algo
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:1716
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:176
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:73
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:485
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:113
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:105
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
TIFF_SHORT
@ TIFF_SHORT
Definition: tiff_common.h:40
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
TiffGeoTag
Definition: tiff.h:207
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16)
Definition: tiff.c:812
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:92
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:48
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:114
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:216
TiffContext::stripsize
int stripsize
Definition: tiff.c:94
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
width
#define width
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:124
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:175
TiffContext::width
int width
Definition: tiff.c:68
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:260
g
const char * g
Definition: vf_curves.c:115
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:37
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:62
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:107
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:138
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:872
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
TIFF_STRING
@ TIFF_STRING
Definition: tiff_common.h:39
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:196
TiffContext::black_level
unsigned black_level
Definition: tiff.c:85
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:178
get_bits.h
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:64
LZWState
Definition: lzw.c:46
AV_RL16
#define AV_RL16
Definition: intreadwrite.h:42
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:56
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:83
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
key
const char * key
Definition: hwcontext_opencl.c:168
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:61
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:73
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:74
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:162
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:261
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:421
if
if(ret)
Definition: filter_design.txt:179
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:99
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:112
TiffContext::height
int height
Definition: tiff.c:68
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:66
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:125
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2110
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:43
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1749
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:191
TiffContext
Definition: tiff.c:54
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:81
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:56
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:103
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:213
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:188
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:63
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:206
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:92
src
#define src
Definition: vp8dsp.c:254
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:192
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
mathops.h
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:423
MJpegDecodeContext
Definition: mjpegdec.h:46
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:78
TIFF_BYTE
@ TIFF_BYTE
Definition: tiff_common.h:38
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:75
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:112
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:60
TIFF_G3
@ TIFF_G3
Definition: tiff.h:123
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:49
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:81
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:70
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:286
desc
const char * desc
Definition: nvenc.c:79
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
ff_codec_open2_recursive
int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Call avcodec_open2 recursively by decrementing counter, unlocking mutex, calling the function and the...
Definition: utils.c:536
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:43
TiffContext::fax_buffer
uint8_t * fax_buffer
Definition: tiff.c:109
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:148
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
TIFF_DOUBLE
@ TIFF_DOUBLE
Definition: tiff_common.h:49
bps
unsigned bps
Definition: movenc.c:1533
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:98
size
int size
Definition: twinvq_data.h:11134
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:69
AVFrameSideData::data
uint8_t * data
Definition: frame.h:208
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:135
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:55
TiffContext::fill_order
int fill_order
Definition: tiff.c:79
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:94
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:80
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:58
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
TiffContext::white_level
unsigned white_level
Definition: tiff.c:86
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:94
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:238
TiffContext::planar
int planar
Definition: tiff.c:75
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:52
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:50
r
#define r
Definition: input.c:40
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:157
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2176
RET_GEOKEY
#define RET_GEOKEY(TYPE, array, element)
Definition: tiff.c:132
planes
static const struct @315 planes[]
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:106
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:69
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:197
TiffContext::le
int le
Definition: tiff.c:72
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:115
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:76
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:72
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:195
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:49
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:186
TiffContext::tile_width
int tile_width
Definition: tiff.c:100
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:77
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:69
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:348
TiffContext::res
uint32_t res[4]
Definition: tiff.c:80
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:93
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:65
TiffContext::tile_count
int tile_count
Definition: tiff.c:101
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:262
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:39
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:67
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1729
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:41
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:103
TiffContext::stripoff
int stripoff
Definition: tiff.c:94
len
int len
Definition: vorbis_enc_data.h:452
TIFF_UNDEFINED
@ TIFF_UNDEFINED
Definition: tiff_common.h:44
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:184
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:90
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:59
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:79
avcodec.h
pv
#define pv
Definition: regdef.h:60
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:217
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1532
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:76
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:107
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
avcodec_find_decoder
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:919
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:71
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:51
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
pos
unsigned int pos
Definition: spdifenc.c:410
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:137
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:53
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:55
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:183
ff_tiff_proj_cs_type_codes
const TiffGeoTagKeyName ff_tiff_proj_cs_type_codes[]
Definition: tiff_data.c:516
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ADD_METADATA
#define ADD_METADATA(count, name, sep)
ThreadFrame
Definition: thread.h:34
TiffContext::sstype
int sstype
Definition: tiff.c:92
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
AV_RL32
#define AV_RL32
Definition: intreadwrite.h:146
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:77
TIFF_RATIONAL
@ TIFF_RATIONAL
Definition: tiff_common.h:42
config.h
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:232
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:586
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:95
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1205
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:131
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:521
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:96
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:116
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:98
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:144
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:261
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:64
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, uint16_t black_level, float scale_factor)
Definition: tiff.c:805
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
shift
static int shift(int a, int b)
Definition: sonic.c:82
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:95
faxcompr.h
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:255
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:98
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:273
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:206
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:121
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:130
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:187
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:83
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:556
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:99
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:147
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:60
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:63
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:265
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:120
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:144
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:90
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:145
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:54
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:379
ff_tiff_projection_codes
const TiffGeoTagKeyName ff_tiff_projection_codes[]
Definition: tiff_data.c:1497
TiffContext::predictor
int predictor
Definition: tiff.c:78
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:422
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, AVPacket *avpkt)
Definition: tiff.c:944
int
int
Definition: ffmpeg_filter.c:192
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:62
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:190
TiffContext::strips
int strips
Definition: tiff.c:92
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:152
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:113
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:89
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:263
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:108
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:136