FFmpeg  4.3
tdsc.c
Go to the documentation of this file.
1 /*
2  * TDSC decoder
3  * Copyright (C) 2015 Vittorio Giovara <vittorio.giovara@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * TDSC decoder
25  *
26  * Fourcc: TSDC
27  *
28  * TDSC is very simple. It codes picture by tiles, storing them in raw BGR24
29  * format or compressing them in JPEG. Frames can be full pictures or just
30  * updates to the previous frame. Cursor is found in its own frame or at the
31  * bottom of the picture. Every frame is then packed with zlib.
32  *
33  * Supports: BGR24
34  */
35 
36 #include <stdint.h>
37 #include <zlib.h>
38 
39 #include "libavutil/imgutils.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "internal.h"
44 
45 #define BITMAPINFOHEADER_SIZE 0x28
46 #define TDSF_HEADER_SIZE 0x56
47 #define TDSB_HEADER_SIZE 0x08
48 
49 typedef struct TDSCContext {
50  AVCodecContext *jpeg_avctx; // wrapper context for MJPEG
51 
52  int width, height;
54 
55  AVFrame *refframe; // full decoded frame (without cursor)
56  AVFrame *jpgframe; // decoded JPEG tile
57  uint8_t *tilebuffer; // buffer containing tile data
58 
59  /* zlib interaction */
61  uLongf deflatelen;
62 
63  /* All that is cursor */
68 } TDSCContext;
69 
70 /* 1 byte bits, 1 byte planes, 2 bytes format (probably) */
72  CUR_FMT_MONO = 0x01010004,
73  CUR_FMT_BGRA = 0x20010004,
74  CUR_FMT_RGBA = 0x20010008,
75 };
76 
77 static av_cold int tdsc_close(AVCodecContext *avctx)
78 {
79  TDSCContext *ctx = avctx->priv_data;
80 
81  av_frame_free(&ctx->refframe);
82  av_frame_free(&ctx->jpgframe);
83  av_freep(&ctx->deflatebuffer);
84  av_freep(&ctx->tilebuffer);
85  av_freep(&ctx->cursor);
86  avcodec_free_context(&ctx->jpeg_avctx);
87 
88  return 0;
89 }
90 
91 static av_cold int tdsc_init(AVCodecContext *avctx)
92 {
93  TDSCContext *ctx = avctx->priv_data;
94  const AVCodec *codec;
95  int ret;
96 
97  avctx->pix_fmt = AV_PIX_FMT_BGR24;
98 
99  /* These needs to be set to estimate buffer and frame size */
100  if (!(avctx->width && avctx->height)) {
101  av_log(avctx, AV_LOG_ERROR, "Video size not set.\n");
102  return AVERROR_INVALIDDATA;
103  }
104 
105  /* This value should be large enough for a RAW-only frame plus headers */
106  ctx->deflatelen = avctx->width * avctx->height * (3 + 1);
107  ret = av_reallocp(&ctx->deflatebuffer, ctx->deflatelen);
108  if (ret < 0)
109  return ret;
110 
111  /* Allocate reference and JPEG frame */
112  ctx->refframe = av_frame_alloc();
113  ctx->jpgframe = av_frame_alloc();
114  if (!ctx->refframe || !ctx->jpgframe)
115  return AVERROR(ENOMEM);
116 
117  /* Prepare everything needed for JPEG decoding */
119  if (!codec)
120  return AVERROR_BUG;
121  ctx->jpeg_avctx = avcodec_alloc_context3(codec);
122  if (!ctx->jpeg_avctx)
123  return AVERROR(ENOMEM);
124  ctx->jpeg_avctx->flags = avctx->flags;
125  ctx->jpeg_avctx->flags2 = avctx->flags2;
126  ctx->jpeg_avctx->dct_algo = avctx->dct_algo;
127  ctx->jpeg_avctx->idct_algo = avctx->idct_algo;
128  ret = ff_codec_open2_recursive(ctx->jpeg_avctx, codec, NULL);
129  if (ret < 0)
130  return ret;
131 
132  /* Set the output pixel format on the reference frame */
133  ctx->refframe->format = avctx->pix_fmt;
134 
135  return 0;
136 }
137 
138 #define APPLY_ALPHA(src, new, alpha) \
139  src = (src * (256 - alpha) + new * alpha) >> 8
140 
141 /* Paint a region over a buffer, without drawing out of its bounds. */
142 static void tdsc_paint_cursor(AVCodecContext *avctx, uint8_t *dst, int stride)
143 {
144  TDSCContext *ctx = avctx->priv_data;
145  const uint8_t *cursor = ctx->cursor;
146  int x = ctx->cursor_x - ctx->cursor_hot_x;
147  int y = ctx->cursor_y - ctx->cursor_hot_y;
148  int w = ctx->cursor_w;
149  int h = ctx->cursor_h;
150  int i, j;
151 
152  if (!ctx->cursor)
153  return;
154 
155  if (x + w > ctx->width)
156  w = ctx->width - x;
157  if (y + h > ctx->height)
158  h = ctx->height - y;
159  if (x < 0) {
160  w += x;
161  cursor += -x * 4;
162  } else {
163  dst += x * 3;
164  }
165  if (y < 0) {
166  h += y;
167  cursor += -y * ctx->cursor_stride;
168  } else {
169  dst += y * stride;
170  }
171  if (w < 0 || h < 0)
172  return;
173 
174  for (j = 0; j < h; j++) {
175  for (i = 0; i < w; i++) {
176  uint8_t alpha = cursor[i * 4];
177  APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
178  APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
179  APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
180  }
181  dst += stride;
182  cursor += ctx->cursor_stride;
183  }
184 }
185 
186 /* Load cursor data and store it in ABGR mode. */
188 {
189  TDSCContext *ctx = avctx->priv_data;
190  int i, j, k, ret, cursor_fmt;
191  uint8_t *dst;
192 
193  ctx->cursor_hot_x = bytestream2_get_le16(&ctx->gbc);
194  ctx->cursor_hot_y = bytestream2_get_le16(&ctx->gbc);
195  ctx->cursor_w = bytestream2_get_le16(&ctx->gbc);
196  ctx->cursor_h = bytestream2_get_le16(&ctx->gbc);
197 
198  ctx->cursor_stride = FFALIGN(ctx->cursor_w, 32) * 4;
199  cursor_fmt = bytestream2_get_le32(&ctx->gbc);
200 
201  if (ctx->cursor_x >= avctx->width || ctx->cursor_y >= avctx->height) {
202  av_log(avctx, AV_LOG_ERROR,
203  "Invalid cursor position (%d.%d outside %dx%d).\n",
204  ctx->cursor_x, ctx->cursor_y, avctx->width, avctx->height);
205  return AVERROR_INVALIDDATA;
206  }
207  if (ctx->cursor_w < 1 || ctx->cursor_w > 256 ||
208  ctx->cursor_h < 1 || ctx->cursor_h > 256) {
209  av_log(avctx, AV_LOG_ERROR,
210  "Invalid cursor dimensions %dx%d.\n",
211  ctx->cursor_w, ctx->cursor_h);
212  return AVERROR_INVALIDDATA;
213  }
214  if (ctx->cursor_hot_x > ctx->cursor_w ||
215  ctx->cursor_hot_y > ctx->cursor_h) {
216  av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %d.%d.\n",
217  ctx->cursor_hot_x, ctx->cursor_hot_y);
218  ctx->cursor_hot_x = FFMIN(ctx->cursor_hot_x, ctx->cursor_w - 1);
219  ctx->cursor_hot_y = FFMIN(ctx->cursor_hot_y, ctx->cursor_h - 1);
220  }
221 
222  ret = av_reallocp(&ctx->cursor, ctx->cursor_stride * ctx->cursor_h);
223  if (ret < 0) {
224  av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer.\n");
225  return ret;
226  }
227 
228  dst = ctx->cursor;
229  /* here data is packed in BE */
230  switch (cursor_fmt) {
231  case CUR_FMT_MONO:
232  for (j = 0; j < ctx->cursor_h; j++) {
233  for (i = 0; i < ctx->cursor_w; i += 32) {
234  uint32_t bits = bytestream2_get_be32(&ctx->gbc);
235  for (k = 0; k < 32; k++) {
236  dst[0] = !!(bits & 0x80000000);
237  dst += 4;
238  bits <<= 1;
239  }
240  }
241  dst += ctx->cursor_stride - ctx->cursor_w * 4;
242  }
243 
244  dst = ctx->cursor;
245  for (j = 0; j < ctx->cursor_h; j++) {
246  for (i = 0; i < ctx->cursor_w; i += 32) {
247  uint32_t bits = bytestream2_get_be32(&ctx->gbc);
248  for (k = 0; k < 32; k++) {
249  int mask_bit = !!(bits & 0x80000000);
250  switch (dst[0] * 2 + mask_bit) {
251  case 0:
252  dst[0] = 0xFF;
253  dst[1] = 0x00;
254  dst[2] = 0x00;
255  dst[3] = 0x00;
256  break;
257  case 1:
258  dst[0] = 0xFF;
259  dst[1] = 0xFF;
260  dst[2] = 0xFF;
261  dst[3] = 0xFF;
262  break;
263  default:
264  dst[0] = 0x00;
265  dst[1] = 0x00;
266  dst[2] = 0x00;
267  dst[3] = 0x00;
268  }
269  dst += 4;
270  bits <<= 1;
271  }
272  }
273  dst += ctx->cursor_stride - ctx->cursor_w * 4;
274  }
275  break;
276  case CUR_FMT_BGRA:
277  case CUR_FMT_RGBA:
278  /* Skip monochrome version of the cursor */
279  bytestream2_skip(&ctx->gbc,
280  ctx->cursor_h * (FFALIGN(ctx->cursor_w, 32) >> 3));
281  if (cursor_fmt & 8) { // RGBA -> ABGR
282  for (j = 0; j < ctx->cursor_h; j++) {
283  for (i = 0; i < ctx->cursor_w; i++) {
284  int val = bytestream2_get_be32(&ctx->gbc);
285  *dst++ = val >> 24;
286  *dst++ = val >> 16;
287  *dst++ = val >> 8;
288  *dst++ = val >> 0;
289  }
290  dst += ctx->cursor_stride - ctx->cursor_w * 4;
291  }
292  } else { // BGRA -> ABGR
293  for (j = 0; j < ctx->cursor_h; j++) {
294  for (i = 0; i < ctx->cursor_w; i++) {
295  int val = bytestream2_get_be32(&ctx->gbc);
296  *dst++ = val >> 0;
297  *dst++ = val >> 24;
298  *dst++ = val >> 16;
299  *dst++ = val >> 8;
300  }
301  dst += ctx->cursor_stride - ctx->cursor_w * 4;
302  }
303  }
304  break;
305  default:
306  avpriv_request_sample(avctx, "Cursor format %08x", cursor_fmt);
307  return AVERROR_PATCHWELCOME;
308  }
309 
310  return 0;
311 }
312 
313 /* Convert a single YUV pixel to RGB. */
314 static inline void tdsc_yuv2rgb(uint8_t *out, int Y, int U, int V)
315 {
316  out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16));
317  out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
318  out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
319 }
320 
321 /* Convert a YUV420 buffer to a RGB buffer. */
322 static av_always_inline void tdsc_blit(uint8_t *dst, int dst_stride,
323  const uint8_t *srcy, int srcy_stride,
324  const uint8_t *srcu, const uint8_t *srcv,
325  int srcuv_stride, int width, int height)
326 {
327  int col, line;
328  for (line = 0; line < height; line++) {
329  for (col = 0; col < width; col++)
330  tdsc_yuv2rgb(dst + col * 3, srcy[col],
331  srcu[col >> 1] - 128, srcv[col >> 1] - 128);
332 
333  dst += dst_stride;
334  srcy += srcy_stride;
335  srcu += srcuv_stride * (line & 1);
336  srcv += srcuv_stride * (line & 1);
337  }
338 }
339 
340 /* Invoke the MJPEG decoder to decode the tile. */
341 static int tdsc_decode_jpeg_tile(AVCodecContext *avctx, int tile_size,
342  int x, int y, int w, int h)
343 {
344  TDSCContext *ctx = avctx->priv_data;
345  AVPacket jpkt;
346  int ret;
347 
348  /* Prepare a packet and send to the MJPEG decoder */
349  av_init_packet(&jpkt);
350  jpkt.data = ctx->tilebuffer;
351  jpkt.size = tile_size;
352 
353  ret = avcodec_send_packet(ctx->jpeg_avctx, &jpkt);
354  if (ret < 0) {
355  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
356  return ret;
357  }
358 
359  ret = avcodec_receive_frame(ctx->jpeg_avctx, ctx->jpgframe);
360  if (ret < 0 || ctx->jpgframe->format != AV_PIX_FMT_YUVJ420P) {
361  av_log(avctx, AV_LOG_ERROR,
362  "JPEG decoding error (%d).\n", ret);
363 
364  /* Normally skip, error if explode */
365  if (avctx->err_recognition & AV_EF_EXPLODE)
366  return AVERROR_INVALIDDATA;
367  else
368  return 0;
369  }
370 
371  /* Let's paint onto the buffer */
372  tdsc_blit(ctx->refframe->data[0] + x * 3 + ctx->refframe->linesize[0] * y,
373  ctx->refframe->linesize[0],
374  ctx->jpgframe->data[0], ctx->jpgframe->linesize[0],
375  ctx->jpgframe->data[1], ctx->jpgframe->data[2],
376  ctx->jpgframe->linesize[1], w, h);
377 
378  av_frame_unref(ctx->jpgframe);
379 
380  return 0;
381 }
382 
383 /* Parse frame and either copy data or decode JPEG. */
384 static int tdsc_decode_tiles(AVCodecContext *avctx, int number_tiles)
385 {
386  TDSCContext *ctx = avctx->priv_data;
387  int i;
388 
389  /* Iterate over the number of tiles */
390  for (i = 0; i < number_tiles; i++) {
391  int tile_size;
392  int tile_mode;
393  int x, y, w, h;
394  int ret;
395 
396  if (bytestream2_get_bytes_left(&ctx->gbc) < 4 ||
397  bytestream2_get_le32(&ctx->gbc) != MKTAG('T','D','S','B') ||
399  av_log(avctx, AV_LOG_ERROR, "TDSB tag is too small.\n");
400  return AVERROR_INVALIDDATA;
401  }
402 
403  tile_size = bytestream2_get_le32(&ctx->gbc);
404  if (bytestream2_get_bytes_left(&ctx->gbc) < tile_size)
405  return AVERROR_INVALIDDATA;
406 
407  tile_mode = bytestream2_get_le32(&ctx->gbc);
408  bytestream2_skip(&ctx->gbc, 4); // unknown
409  x = bytestream2_get_le32(&ctx->gbc);
410  y = bytestream2_get_le32(&ctx->gbc);
411  w = bytestream2_get_le32(&ctx->gbc) - x;
412  h = bytestream2_get_le32(&ctx->gbc) - y;
413 
414  if (x >= ctx->width || y >= ctx->height) {
415  av_log(avctx, AV_LOG_ERROR,
416  "Invalid tile position (%d.%d outside %dx%d).\n",
417  x, y, ctx->width, ctx->height);
418  return AVERROR_INVALIDDATA;
419  }
420  if (x + w > ctx->width || y + h > ctx->height) {
421  av_log(avctx, AV_LOG_ERROR,
422  "Invalid tile size %dx%d\n", w, h);
423  return AVERROR_INVALIDDATA;
424  }
425 
426  ret = av_reallocp(&ctx->tilebuffer, tile_size);
427  if (!ctx->tilebuffer)
428  return ret;
429 
430  bytestream2_get_buffer(&ctx->gbc, ctx->tilebuffer, tile_size);
431 
432  if (tile_mode == MKTAG('G','E','P','J')) {
433  /* Decode JPEG tile and copy it in the reference frame */
434  ret = tdsc_decode_jpeg_tile(avctx, tile_size, x, y, w, h);
435  if (ret < 0)
436  return ret;
437  } else if (tile_mode == MKTAG(' ','W','A','R')) {
438  /* Just copy the buffer to output */
439  av_image_copy_plane(ctx->refframe->data[0] + x * 3 +
440  ctx->refframe->linesize[0] * y,
441  ctx->refframe->linesize[0], ctx->tilebuffer,
442  w * 3, w * 3, h);
443  } else {
444  av_log(avctx, AV_LOG_ERROR, "Unknown tile type %08x.\n", tile_mode);
445  return AVERROR_INVALIDDATA;
446  }
447  av_log(avctx, AV_LOG_DEBUG, "Tile %d, %dx%d (%d.%d)\n", i, w, h, x, y);
448  }
449 
450  return 0;
451 }
452 
453 static int tdsc_parse_tdsf(AVCodecContext *avctx, int number_tiles)
454 {
455  TDSCContext *ctx = avctx->priv_data;
456  int ret, w, h, init_refframe = !ctx->refframe->data[0];
457 
458  /* BITMAPINFOHEADER
459  * http://msdn.microsoft.com/en-us/library/windows/desktop/dd183376.aspx */
460  if (bytestream2_get_le32(&ctx->gbc) != BITMAPINFOHEADER_SIZE)
461  return AVERROR_INVALIDDATA;
462 
463  /* Store size, but wait for context reinit before updating avctx */
464  w = bytestream2_get_le32(&ctx->gbc);
465  h = -bytestream2_get_le32(&ctx->gbc);
466 
467  if (bytestream2_get_le16(&ctx->gbc) != 1 || // 1 plane
468  bytestream2_get_le16(&ctx->gbc) != 24) // BGR24
469  return AVERROR_INVALIDDATA;
470 
471  bytestream2_skip(&ctx->gbc, 24); // unused fields
472 
473  /* Update sizes */
474  if (avctx->width != w || avctx->height != h) {
475  av_log(avctx, AV_LOG_DEBUG, "Size update %dx%d -> %d%d.\n",
476  avctx->width, avctx->height, ctx->width, ctx->height);
477  ret = ff_set_dimensions(avctx, w, h);
478  if (ret < 0)
479  return ret;
480  init_refframe = 1;
481  }
482  ctx->refframe->width = ctx->width = w;
483  ctx->refframe->height = ctx->height = h;
484 
485  /* Allocate the reference frame if not already done or on size change */
486  if (init_refframe) {
487  ret = av_frame_get_buffer(ctx->refframe, 0);
488  if (ret < 0)
489  return ret;
490  }
491 
492  /* Decode all tiles in a frame */
493  return tdsc_decode_tiles(avctx, number_tiles);
494 }
495 
496 static int tdsc_parse_dtsm(AVCodecContext *avctx)
497 {
498  TDSCContext *ctx = avctx->priv_data;
499  int ret;
500  int action = bytestream2_get_le32(&ctx->gbc);
501 
502  bytestream2_skip(&ctx->gbc, 4); // some kind of ID or version maybe?
503 
504  if (action == 2 || action == 3) {
505  /* Load cursor coordinates */
506  ctx->cursor_x = bytestream2_get_le32(&ctx->gbc);
507  ctx->cursor_y = bytestream2_get_le32(&ctx->gbc);
508 
509  /* Load a full cursor sprite */
510  if (action == 3) {
511  ret = tdsc_load_cursor(avctx);
512  /* Do not consider cursor errors fatal unless in explode mode */
513  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
514  return ret;
515  }
516  } else {
517  avpriv_request_sample(avctx, "Cursor action %d", action);
518  }
519 
520  return 0;
521 }
522 
523 static int tdsc_decode_frame(AVCodecContext *avctx, void *data,
524  int *got_frame, AVPacket *avpkt)
525 {
526  TDSCContext *ctx = avctx->priv_data;
527  AVFrame *frame = data;
528  int ret, tag_header, keyframe = 0;
529  uLongf dlen;
530 
531  /* Resize deflate buffer on resolution change */
532  if (ctx->width != avctx->width || ctx->height != avctx->height) {
533  ctx->deflatelen = avctx->width * avctx->height * (3 + 1);
534  ret = av_reallocp(&ctx->deflatebuffer, ctx->deflatelen);
535  if (ret < 0)
536  return ret;
537  }
538  dlen = ctx->deflatelen;
539 
540  /* Frames are deflated, need to inflate them first */
541  ret = uncompress(ctx->deflatebuffer, &dlen, avpkt->data, avpkt->size);
542  if (ret) {
543  av_log(avctx, AV_LOG_ERROR, "Deflate error %d.\n", ret);
544  return AVERROR_UNKNOWN;
545  }
546 
547  bytestream2_init(&ctx->gbc, ctx->deflatebuffer, dlen);
548 
549  /* Check for tag and for size info */
550  if (bytestream2_get_bytes_left(&ctx->gbc) < 4 + 4) {
551  av_log(avctx, AV_LOG_ERROR, "Frame is too small.\n");
552  return AVERROR_INVALIDDATA;
553  }
554 
555  /* Read tag */
556  tag_header = bytestream2_get_le32(&ctx->gbc);
557 
558  if (tag_header == MKTAG('T','D','S','F')) {
559  int number_tiles;
561  av_log(avctx, AV_LOG_ERROR, "TDSF tag is too small.\n");
562  return AVERROR_INVALIDDATA;
563  }
564  /* First 4 bytes here are the number of GEPJ/WAR tiles in this frame */
565  number_tiles = bytestream2_get_le32(&ctx->gbc);
566 
567  bytestream2_skip(&ctx->gbc, 4); // internal timestamp maybe?
568  keyframe = bytestream2_get_le32(&ctx->gbc) == 0x30;
569 
570  ret = tdsc_parse_tdsf(avctx, number_tiles);
571  if (ret < 0)
572  return ret;
573 
574  /* Check if there is anything else we are able to parse */
575  if (bytestream2_get_bytes_left(&ctx->gbc) >= 4 + 4)
576  tag_header = bytestream2_get_le32(&ctx->gbc);
577  }
578 
579  /* This tag can be after a TDSF block or on its own frame */
580  if (tag_header == MKTAG('D','T','S','M')) {
581  /* First 4 bytes here are the total size in bytes for this frame */
582  int tag_size = bytestream2_get_le32(&ctx->gbc);
583 
584  if (bytestream2_get_bytes_left(&ctx->gbc) < tag_size) {
585  av_log(avctx, AV_LOG_ERROR, "DTSM tag is too small.\n");
586  return AVERROR_INVALIDDATA;
587  }
588 
589  ret = tdsc_parse_dtsm(avctx);
590  if (ret < 0)
591  return ret;
592  }
593 
594  /* Get the output frame and copy the reference frame */
595  ret = ff_get_buffer(avctx, frame, 0);
596  if (ret < 0)
597  return ret;
598 
599  ret = av_frame_copy(frame, ctx->refframe);
600  if (ret < 0)
601  return ret;
602 
603  /* Paint the cursor on the output frame */
604  tdsc_paint_cursor(avctx, frame->data[0], frame->linesize[0]);
605 
606  /* Frame is ready to be output */
607  if (keyframe) {
608  frame->pict_type = AV_PICTURE_TYPE_I;
609  frame->key_frame = 1;
610  } else {
611  frame->pict_type = AV_PICTURE_TYPE_P;
612  }
613  *got_frame = 1;
614 
615  return avpkt->size;
616 }
617 
619  .name = "tdsc",
620  .long_name = NULL_IF_CONFIG_SMALL("TDSC"),
621  .type = AVMEDIA_TYPE_VIDEO,
622  .id = AV_CODEC_ID_TDSC,
623  .init = tdsc_init,
624  .decode = tdsc_decode_frame,
625  .close = tdsc_close,
626  .priv_data_size = sizeof(TDSCContext),
627  .capabilities = AV_CODEC_CAP_DR1,
628  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
630 };
TDSCContext::deflatelen
uLongf deflatelen
Definition: tdsc.c:61
AVCodec
AVCodec.
Definition: codec.h:190
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
APPLY_ALPHA
#define APPLY_ALPHA(src, new, alpha)
Definition: tdsc.c:138
tdsc_yuv2rgb
static void tdsc_yuv2rgb(uint8_t *out, int Y, int U, int V)
Definition: tdsc.c:314
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
GetByteContext
Definition: bytestream.h:33
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:406
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
TDSCContext::cursor_stride
int cursor_stride
Definition: tdsc.c:65
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
data
const char data[16]
Definition: mxf.c:91
TDSCContext::width
int width
Definition: tdsc.c:52
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
TDSCContext::tilebuffer
uint8_t * tilebuffer
Definition: tdsc.c:57
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
TDSCContext::gbc
GetByteContext gbc
Definition: tdsc.c:53
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
U
#define U(x)
Definition: vp56_arith.h:37
TDSCContext::cursor_h
int cursor_h
Definition: tdsc.c:66
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
val
static double val(void *priv, double ch)
Definition: aeval.c:76
AVCodecContext::dct_algo
int dct_algo
DCT algorithm, see FF_DCT_* below.
Definition: avcodec.h:1716
tdsc_parse_tdsf
static int tdsc_parse_tdsf(AVCodecContext *avctx, int number_tiles)
Definition: tdsc.c:453
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
tdsc_init
static av_cold int tdsc_init(AVCodecContext *avctx)
Definition: tdsc.c:91
tdsc_paint_cursor
static void tdsc_paint_cursor(AVCodecContext *avctx, uint8_t *dst, int stride)
Definition: tdsc.c:142
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
width
#define width
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1466
bits
uint8_t bits
Definition: vp3data.h:202
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
TDSCContext::cursor_hot_x
int cursor_hot_x
Definition: tdsc.c:67
ctx
AVFormatContext * ctx
Definition: movenc.c:48
tdsc_parse_dtsm
static int tdsc_parse_dtsm(AVCodecContext *avctx)
Definition: tdsc.c:496
AV_CODEC_ID_TDSC
@ AV_CODEC_ID_TDSC
Definition: codec_id.h:236
TDSCContext::cursor_x
int cursor_x
Definition: tdsc.c:66
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
TDSB_HEADER_SIZE
#define TDSB_HEADER_SIZE
Definition: tdsc.c:47
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
V
#define V
Definition: avdct.c:30
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
TDSCContext::jpeg_avctx
AVCodecContext * jpeg_avctx
Definition: tdsc.c:50
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
TDSCCursorFormat
TDSCCursorFormat
Definition: tdsc.c:71
TDSCContext
Definition: tdsc.c:49
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1854
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
ff_codec_open2_recursive
int ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Call avcodec_open2 recursively by decrementing counter, unlocking mutex, calling the function and the...
Definition: utils.c:536
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:800
av_reallocp
int av_reallocp(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory through a pointer to a pointer.
Definition: mem.c:163
TDSCContext::cursor_w
int cursor_w
Definition: tdsc.c:66
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
line
Definition: graph2dot.c:48
tdsc_decode_tiles
static int tdsc_decode_tiles(AVCodecContext *avctx, int number_tiles)
Definition: tdsc.c:384
Y
#define Y
Definition: boxblur.h:38
TDSCContext::deflatebuffer
uint8_t * deflatebuffer
Definition: tdsc.c:60
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
TDSCContext::cursor_y
int cursor_y
Definition: tdsc.c:66
TDSCContext::cursor
uint8_t * cursor
Definition: tdsc.c:64
TDSCContext::cursor_hot_y
int cursor_hot_y
Definition: tdsc.c:67
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
CUR_FMT_RGBA
@ CUR_FMT_RGBA
Definition: tdsc.c:74
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1729
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
TDSF_HEADER_SIZE
#define TDSF_HEADER_SIZE
Definition: tdsc.c:46
avcodec.h
ff_tdsc_decoder
AVCodec ff_tdsc_decoder
Definition: tdsc.c:618
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
tdsc_load_cursor
static int tdsc_load_cursor(AVCodecContext *avctx)
Definition: tdsc.c:187
avcodec_find_decoder
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:919
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
tdsc_decode_frame
static int tdsc_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: tdsc.c:523
AVCodecContext
main external API structure.
Definition: avcodec.h:526
CUR_FMT_MONO
@ CUR_FMT_MONO
Definition: tdsc.c:72
tdsc_decode_jpeg_tile
static int tdsc_decode_jpeg_tile(AVCodecContext *avctx, int tile_size, int x, int y, int w, int h)
Definition: tdsc.c:341
TDSCContext::jpgframe
AVFrame * jpgframe
Definition: tdsc.c:56
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
TDSCContext::height
int height
Definition: tdsc.c:52
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
TDSCContext::refframe
AVFrame * refframe
Definition: tdsc.c:55
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
tdsc_blit
static av_always_inline void tdsc_blit(uint8_t *dst, int dst_stride, const uint8_t *srcy, int srcy_stride, const uint8_t *srcu, const uint8_t *srcv, int srcuv_stride, int width, int height)
Definition: tdsc.c:322
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
tdsc_close
static av_cold int tdsc_close(AVCodecContext *avctx)
Definition: tdsc.c:77
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
CUR_FMT_BGRA
@ CUR_FMT_BGRA
Definition: tdsc.c:73
BITMAPINFOHEADER_SIZE
#define BITMAPINFOHEADER_SIZE
Definition: tdsc.c:45
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35