FFmpeg  4.3
targa.c
Go to the documentation of this file.
1 /*
2  * Targa (.tga) image decoder
3  * Copyright (c) 2006 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/imgutils.h"
24 #include "avcodec.h"
25 #include "bytestream.h"
26 #include "internal.h"
27 #include "targa.h"
28 
29 typedef struct TargaContext {
31 } TargaContext;
32 
34  int stride, int *y, int h, int interleave)
35 {
36  *y += interleave;
37 
38  if (*y < h) {
39  return line + interleave * stride;
40  } else {
41  *y = (*y + 1) & (interleave - 1);
42  if (*y && *y < h) {
43  return start + *y * stride;
44  } else {
45  return NULL;
46  }
47  }
48 }
49 
51  uint8_t *start, int w, int h, int stride,
52  int bpp, int interleave)
53 {
54  int x, y;
55  int depth = (bpp + 1) >> 3;
56  int type, count;
57  uint8_t *line = start;
58  uint8_t *dst = line;
59 
60  x = y = count = 0;
61  while (dst) {
62  if (bytestream2_get_bytes_left(&s->gb) <= 0) {
63  av_log(avctx, AV_LOG_ERROR,
64  "Ran ouf of data before end-of-image\n");
65  return AVERROR_INVALIDDATA;
66  }
67  type = bytestream2_get_byteu(&s->gb);
68  count = (type & 0x7F) + 1;
69  type &= 0x80;
70  if (!type) {
71  do {
72  int n = FFMIN(count, w - x);
73  bytestream2_get_buffer(&s->gb, dst, n * depth);
74  count -= n;
75  dst += n * depth;
76  x += n;
77  if (x == w) {
78  x = 0;
79  dst = line = advance_line(start, line, stride, &y, h, interleave);
80  }
81  } while (dst && count > 0);
82  } else {
83  uint8_t tmp[4];
84  bytestream2_get_buffer(&s->gb, tmp, depth);
85  do {
86  int n = FFMIN(count, w - x);
87  count -= n;
88  x += n;
89  do {
90  memcpy(dst, tmp, depth);
91  dst += depth;
92  } while (--n);
93  if (x == w) {
94  x = 0;
95  dst = line = advance_line(start, line, stride, &y, h, interleave);
96  }
97  } while (dst && count > 0);
98  }
99  }
100 
101  if (count) {
102  av_log(avctx, AV_LOG_ERROR, "Packet went out of bounds\n");
103  return AVERROR_INVALIDDATA;
104  }
105 
106  return 0;
107 }
108 
109 static int decode_frame(AVCodecContext *avctx,
110  void *data, int *got_frame,
111  AVPacket *avpkt)
112 {
113  TargaContext * const s = avctx->priv_data;
114  AVFrame * const p = data;
115  uint8_t *dst;
116  int stride;
117  int idlen, pal, compr, y, w, h, bpp, flags, ret;
118  int first_clr, colors, csize;
119  int interleave;
120 
121  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
122 
123  /* parse image header */
124  idlen = bytestream2_get_byte(&s->gb);
125  pal = bytestream2_get_byte(&s->gb);
126  compr = bytestream2_get_byte(&s->gb);
127  first_clr = bytestream2_get_le16(&s->gb);
128  colors = bytestream2_get_le16(&s->gb);
129  csize = bytestream2_get_byte(&s->gb);
130  bytestream2_skip(&s->gb, 4); /* 2: x, 2: y */
131  w = bytestream2_get_le16(&s->gb);
132  h = bytestream2_get_le16(&s->gb);
133  bpp = bytestream2_get_byte(&s->gb);
134 
135  flags = bytestream2_get_byte(&s->gb);
136 
137  if (!pal && (first_clr || colors || csize)) {
138  av_log(avctx, AV_LOG_WARNING, "File without colormap has colormap information set.\n");
139  // specification says we should ignore those value in this case
140  first_clr = colors = csize = 0;
141  }
142 
143  if (bytestream2_get_bytes_left(&s->gb) < idlen + 2*colors) {
144  av_log(avctx, AV_LOG_ERROR,
145  "Not enough data to read header\n");
146  return AVERROR_INVALIDDATA;
147  }
148 
149  // skip identifier if any
150  bytestream2_skip(&s->gb, idlen);
151 
152  switch (bpp) {
153  case 8:
154  avctx->pix_fmt = ((compr & (~TGA_RLE)) == TGA_BW) ? AV_PIX_FMT_GRAY8 : AV_PIX_FMT_PAL8;
155  break;
156  case 15:
157  case 16:
158  avctx->pix_fmt = AV_PIX_FMT_RGB555LE;
159  break;
160  case 24:
161  avctx->pix_fmt = AV_PIX_FMT_BGR24;
162  break;
163  case 32:
164  avctx->pix_fmt = AV_PIX_FMT_BGRA;
165  break;
166  default:
167  av_log(avctx, AV_LOG_ERROR, "Bit depth %i is not supported\n", bpp);
168  return AVERROR_INVALIDDATA;
169  }
170 
171  if (colors && (colors + first_clr) > 256) {
172  av_log(avctx, AV_LOG_ERROR, "Incorrect palette: %i colors with offset %i\n", colors, first_clr);
173  return AVERROR_INVALIDDATA;
174  }
175 
176  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
177  return ret;
178 
179  if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
180  return ret;
182 
183  if (flags & TGA_TOPTOBOTTOM) {
184  dst = p->data[0];
185  stride = p->linesize[0];
186  } else { //image is upside-down
187  dst = p->data[0] + p->linesize[0] * (h - 1);
188  stride = -p->linesize[0];
189  }
190 
192  flags & TGA_INTERLEAVE4 ? 4 : 1;
193 
194  if (colors) {
195  int pal_size, pal_sample_size;
196 
197  switch (csize) {
198  case 32: pal_sample_size = 4; break;
199  case 24: pal_sample_size = 3; break;
200  case 16:
201  case 15: pal_sample_size = 2; break;
202  default:
203  av_log(avctx, AV_LOG_ERROR, "Palette entry size %i bits is not supported\n", csize);
204  return AVERROR_INVALIDDATA;
205  }
206  pal_size = colors * pal_sample_size;
207  if (avctx->pix_fmt != AV_PIX_FMT_PAL8) //should not occur but skip palette anyway
208  bytestream2_skip(&s->gb, pal_size);
209  else {
210  int t;
211  uint32_t *pal = ((uint32_t *)p->data[1]) + first_clr;
212 
213  if (bytestream2_get_bytes_left(&s->gb) < pal_size) {
214  av_log(avctx, AV_LOG_ERROR,
215  "Not enough data to read palette\n");
216  return AVERROR_INVALIDDATA;
217  }
218  switch (pal_sample_size) {
219  case 4:
220  for (t = 0; t < colors; t++)
221  *pal++ = bytestream2_get_le32u(&s->gb);
222  break;
223  case 3:
224  /* RGB24 */
225  for (t = 0; t < colors; t++)
226  *pal++ = (0xffU<<24) | bytestream2_get_le24u(&s->gb);
227  break;
228  case 2:
229  /* RGB555 */
230  for (t = 0; t < colors; t++) {
231  uint32_t v = bytestream2_get_le16u(&s->gb);
232  v = ((v & 0x7C00) << 9) |
233  ((v & 0x03E0) << 6) |
234  ((v & 0x001F) << 3);
235  /* left bit replication */
236  v |= (v & 0xE0E0E0U) >> 5;
237  *pal++ = (0xffU<<24) | v;
238  }
239  break;
240  }
241  p->palette_has_changed = 1;
242  }
243  }
244 
245  if ((compr & (~TGA_RLE)) == TGA_NODATA) {
246  memset(p->data[0], 0, p->linesize[0] * h);
247  } else {
248  if (compr & TGA_RLE) {
249  int res = targa_decode_rle(avctx, s, dst, w, h, stride, bpp, interleave);
250  if (res < 0)
251  return res;
252  } else {
253  size_t img_size = w * ((bpp + 1) >> 3);
254  uint8_t *line;
255  if (bytestream2_get_bytes_left(&s->gb) < img_size * h) {
256  av_log(avctx, AV_LOG_ERROR,
257  "Not enough data available for image\n");
258  return AVERROR_INVALIDDATA;
259  }
260 
261  line = dst;
262  y = 0;
263  do {
264  bytestream2_get_buffer(&s->gb, line, img_size);
265  line = advance_line(dst, line, stride, &y, h, interleave);
266  } while (line);
267  }
268 
269  if (flags & TGA_RIGHTTOLEFT) { // right-to-left, needs horizontal flip
270  int x;
271  for (y = 0; y < h; y++) {
272  void *line = &p->data[0][y * p->linesize[0]];
273  for (x = 0; x < w >> 1; x++) {
274  switch (bpp) {
275  case 32:
276  FFSWAP(uint32_t, ((uint32_t *)line)[x], ((uint32_t *)line)[w - x - 1]);
277  break;
278  case 24:
279  FFSWAP(uint8_t, ((uint8_t *)line)[3 * x ], ((uint8_t *)line)[3 * w - 3 * x - 3]);
280  FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 1], ((uint8_t *)line)[3 * w - 3 * x - 2]);
281  FFSWAP(uint8_t, ((uint8_t *)line)[3 * x + 2], ((uint8_t *)line)[3 * w - 3 * x - 1]);
282  break;
283  case 16:
284  FFSWAP(uint16_t, ((uint16_t *)line)[x], ((uint16_t *)line)[w - x - 1]);
285  break;
286  case 8:
287  FFSWAP(uint8_t, ((uint8_t *)line)[x], ((uint8_t *)line)[w - x - 1]);
288  }
289  }
290  }
291  }
292  }
293 
294 
295  *got_frame = 1;
296 
297  return avpkt->size;
298 }
299 
301  .name = "targa",
302  .long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"),
303  .type = AVMEDIA_TYPE_VIDEO,
304  .id = AV_CODEC_ID_TARGA,
305  .priv_data_size = sizeof(TargaContext),
306  .decode = decode_frame,
307  .capabilities = AV_CODEC_CAP_DR1,
308 };
AVCodec
AVCodec.
Definition: codec.h:190
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
TGA_INTERLEAVE2
@ TGA_INTERLEAVE2
Definition: targa.h:44
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
GetByteContext
Definition: bytestream.h:33
TGA_RIGHTTOLEFT
@ TGA_RIGHTTOLEFT
Definition: targa.h:42
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
ff_targa_decoder
AVCodec ff_targa_decoder
Definition: targa.c:300
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: targa.c:109
data
const char data[16]
Definition: mxf.c:91
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
targa.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
U
#define U(x)
Definition: vp56_arith.h:37
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
TGA_RLE
@ TGA_RLE
Definition: targa.h:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
TGA_BW
@ TGA_BW
Definition: targa.h:37
TargaContext::gb
GetByteContext gb
Definition: targa.c:30
NULL
#define NULL
Definition: coverity.c:32
TargaContext
Definition: targa.c:29
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
line
Definition: graph2dot.c:48
interleave
static av_always_inline void RENAME() interleave(TYPE *dst, TYPE *src0, TYPE *src1, int w2, int add, int shift)
Definition: dirac_dwt_template.c:55
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
TGA_INTERLEAVE4
@ TGA_INTERLEAVE4
Definition: targa.h:45
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:457
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AVPacket
This structure stores compressed data.
Definition: packet.h:332
targa_decode_rle
static int targa_decode_rle(AVCodecContext *avctx, TargaContext *s, uint8_t *start, int w, int h, int stride, int bpp, int interleave)
Definition: targa.c:50
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:564
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
TGA_TOPTOBOTTOM
@ TGA_TOPTOBOTTOM
Definition: targa.h:43
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_TARGA
@ AV_CODEC_ID_TARGA
Definition: codec_id.h:142
advance_line
static uint8_t * advance_line(uint8_t *start, uint8_t *line, int stride, int *y, int h, int interleave)
Definition: targa.c:33
TGA_NODATA
@ TGA_NODATA
Definition: targa.h:34
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40