FFmpeg  4.3
rawdec.c
Go to the documentation of this file.
1 /*
2  * Raw Video Decoder
3  * Copyright (c) 2001 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Raw Video Decoder
25  */
26 
27 #include "avcodec.h"
28 #include "bswapdsp.h"
29 #include "get_bits.h"
30 #include "internal.h"
31 #include "raw.h"
32 #include "libavutil/avassert.h"
33 #include "libavutil/buffer.h"
34 #include "libavutil/common.h"
35 #include "libavutil/intreadwrite.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/opt.h"
38 
39 typedef struct RawVideoContext {
42  int frame_size; /* size of the frame in bytes */
43  int flip;
44  int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
45  int is_mono;
46  int is_pal8;
49  int is_yuv2;
50  int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
51  int tff;
52 
55  unsigned int bitstream_buf_size;
57 
58 static const AVOption options[]={
59 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
60 {NULL}
61 };
62 
63 static const AVClass rawdec_class = {
64  .class_name = "rawdec",
65  .option = options,
66  .version = LIBAVUTIL_VERSION_INT,
67 };
68 
70 {
72  const AVPixFmtDescriptor *desc;
73 
74  ff_bswapdsp_init(&context->bbdsp);
75 
76  if ( avctx->codec_tag == MKTAG('r','a','w',' ')
77  || avctx->codec_tag == MKTAG('N','O','1','6'))
79  avctx->bits_per_coded_sample);
80  else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
82  avctx->bits_per_coded_sample);
83  else if (avctx->codec_tag && (avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0))
85  else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
87  avctx->bits_per_coded_sample);
88 
90  if (!desc) {
91  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
92  return AVERROR(EINVAL);
93  }
94 
95  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL)) {
97  if (!context->palette)
98  return AVERROR(ENOMEM);
99 #if FF_API_PSEUDOPAL
100  if (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
101  avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
102 #endif
103  else {
104  memset(context->palette->data, 0, AVPALETTE_SIZE);
105  if (avctx->bits_per_coded_sample == 1)
106  memset(context->palette->data, 0xff, 4);
107  }
108  }
109 
110  if ((avctx->extradata_size >= 9 &&
111  !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
112  avctx->codec_tag == MKTAG('c','y','u','v') ||
113  avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
114  avctx->codec_tag == MKTAG('W','R','A','W'))
115  context->flip = 1;
116 
117  if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
118  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
119  context->is_mono = 1;
120  else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
121  context->is_pal8 = 1;
122 
123  if (avctx->codec_tag == MKTAG('B','1','W','0') ||
124  avctx->codec_tag == MKTAG('B','0','W','1'))
125  context->is_nut_mono = 1;
126  else if (avctx->codec_tag == MKTAG('P','A','L',8))
127  context->is_nut_pal8 = 1;
128 
129  if (avctx->codec_tag == AV_RL32("yuv2") &&
130  avctx->pix_fmt == AV_PIX_FMT_YUYV422)
131  context->is_yuv2 = 1;
132 
133  return 0;
134 }
135 
136 static void flip(AVCodecContext *avctx, AVFrame *frame)
137 {
138  frame->data[0] += frame->linesize[0] * (avctx->height - 1);
139  frame->linesize[0] *= -1;
140 }
141 
142 /*
143  * Scale sample to 16-bit resolution
144  */
145 #define SCALE16(x, bits) (((x) << (16 - (bits))) | ((x) >> (2 * (bits) - 16)))
146 
147 /**
148  * Scale buffer to 16 bits per coded sample resolution
149  */
150 #define MKSCALE16(name, r16, w16) \
151 static void name(AVCodecContext *avctx, uint8_t * dst, const uint8_t *buf, int buf_size, int packed) \
152 { \
153  int i; \
154  if (!packed) { \
155  for (i = 0; i + 1 < buf_size; i += 2) \
156  w16(dst + i, SCALE16(r16(buf + i), avctx->bits_per_coded_sample)); \
157  } else { \
158  GetBitContext gb; \
159  init_get_bits(&gb, buf, buf_size * 8); \
160  for (i = 0; i < avctx->width * avctx->height; i++) { \
161  int sample = get_bits(&gb, avctx->bits_per_coded_sample); \
162  w16(dst + i*2, SCALE16(sample, avctx->bits_per_coded_sample)); \
163  } \
164  } \
165 }
166 
167 MKSCALE16(scale16be, AV_RB16, AV_WB16)
168 MKSCALE16(scale16le, AV_RL16, AV_WL16)
169 
170 static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
171  AVPacket *avpkt)
172 {
173  const AVPixFmtDescriptor *desc;
174  RawVideoContext *context = avctx->priv_data;
175  const uint8_t *buf = avpkt->data;
176  int buf_size = avpkt->size;
177  int linesize_align = 4;
178  int stride;
179  int res, len;
180  int need_copy;
181 
182  AVFrame *frame = data;
183 
184  if (avctx->width <= 0) {
185  av_log(avctx, AV_LOG_ERROR, "width is not set\n");
186  return AVERROR_INVALIDDATA;
187  }
188  if (avctx->height <= 0) {
189  av_log(avctx, AV_LOG_ERROR, "height is not set\n");
190  return AVERROR_INVALIDDATA;
191  }
192 
193  if (context->is_nut_mono)
194  stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
195  else if (context->is_nut_pal8)
196  stride = avctx->width;
197  else
198  stride = avpkt->size / avctx->height;
199 
200  av_log(avctx, AV_LOG_DEBUG, "PACKET SIZE: %d, STRIDE: %d\n", avpkt->size, stride);
201 
202  if (stride == 0 || avpkt->size < stride * avctx->height) {
203  av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
204  return AVERROR_INVALIDDATA;
205  }
206 
207  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
208 
209  if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4 ||
210  avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1 ||
211  (avctx->bits_per_coded_sample == 0 && (context->is_nut_pal8 || context->is_mono)) ) &&
212  (context->is_mono || context->is_pal8) &&
213  (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
214  context->is_nut_mono || context->is_nut_pal8)) {
215  context->is_1_2_4_8_bpp = 1;
216  if (context->is_mono) {
217  int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
218  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
219  FFALIGN(row_bytes, 16) * 8,
220  avctx->height, 1);
221  } else
222  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
223  FFALIGN(avctx->width, 16),
224  avctx->height, 1);
225  } else {
226  context->is_lt_16bpp = av_get_bits_per_pixel(desc) == 16 && avctx->bits_per_coded_sample > 8 && avctx->bits_per_coded_sample < 16;
227  context->frame_size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width,
228  avctx->height, 1);
229  }
230  if (context->frame_size < 0)
231  return context->frame_size;
232 
233  need_copy = !avpkt->buf || context->is_1_2_4_8_bpp || context->is_yuv2 || context->is_lt_16bpp;
234 
235  frame->pict_type = AV_PICTURE_TYPE_I;
236  frame->key_frame = 1;
237 
238  res = ff_decode_frame_props(avctx, frame);
239  if (res < 0)
240  return res;
241 
242  frame->pkt_pos = avctx->internal->last_pkt_props->pos;
243  frame->pkt_duration = avctx->internal->last_pkt_props->duration;
244 
245  if (context->tff >= 0) {
246  frame->interlaced_frame = 1;
247  frame->top_field_first = context->tff;
248  }
249 
250  if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
251  return res;
252 
253  if (need_copy)
254  frame->buf[0] = av_buffer_alloc(FFMAX(context->frame_size, buf_size));
255  else
256  frame->buf[0] = av_buffer_ref(avpkt->buf);
257  if (!frame->buf[0])
258  return AVERROR(ENOMEM);
259 
260  // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
261  if (context->is_1_2_4_8_bpp) {
262  int i, j, row_pix = 0;
263  uint8_t *dst = frame->buf[0]->data;
264  buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
265  if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
266  int pix_per_byte = context->is_mono ? 8 : 1;
267  for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
268  dst[j] = buf[i];
269  row_pix += pix_per_byte;
270  if (row_pix >= avctx->width) {
271  i += stride - (i % stride) - 1;
272  j += 16 - (j % 16) - 1;
273  row_pix = 0;
274  }
275  }
276  } else if (avctx->bits_per_coded_sample == 4) {
277  for (i = 0, j = 0; 2 * j + 1 < buf_size && i<avpkt->size; i++, j++) {
278  dst[2 * j + 0] = buf[i] >> 4;
279  dst[2 * j + 1] = buf[i] & 15;
280  row_pix += 2;
281  if (row_pix >= avctx->width) {
282  i += stride - (i % stride) - 1;
283  j += 8 - (j % 8) - 1;
284  row_pix = 0;
285  }
286  }
287  } else if (avctx->bits_per_coded_sample == 2) {
288  for (i = 0, j = 0; 4 * j + 3 < buf_size && i<avpkt->size; i++, j++) {
289  dst[4 * j + 0] = buf[i] >> 6;
290  dst[4 * j + 1] = buf[i] >> 4 & 3;
291  dst[4 * j + 2] = buf[i] >> 2 & 3;
292  dst[4 * j + 3] = buf[i] & 3;
293  row_pix += 4;
294  if (row_pix >= avctx->width) {
295  i += stride - (i % stride) - 1;
296  j += 4 - (j % 4) - 1;
297  row_pix = 0;
298  }
299  }
300  } else {
301  av_assert0(avctx->bits_per_coded_sample == 1);
302  for (i = 0, j = 0; 8 * j + 7 < buf_size && i<avpkt->size; i++, j++) {
303  dst[8 * j + 0] = buf[i] >> 7;
304  dst[8 * j + 1] = buf[i] >> 6 & 1;
305  dst[8 * j + 2] = buf[i] >> 5 & 1;
306  dst[8 * j + 3] = buf[i] >> 4 & 1;
307  dst[8 * j + 4] = buf[i] >> 3 & 1;
308  dst[8 * j + 5] = buf[i] >> 2 & 1;
309  dst[8 * j + 6] = buf[i] >> 1 & 1;
310  dst[8 * j + 7] = buf[i] & 1;
311  row_pix += 8;
312  if (row_pix >= avctx->width) {
313  i += stride - (i % stride) - 1;
314  j += 2 - (j % 2) - 1;
315  row_pix = 0;
316  }
317  }
318  }
319  linesize_align = 16;
320  buf = dst;
321  } else if (context->is_lt_16bpp) {
322  uint8_t *dst = frame->buf[0]->data;
323  int packed = (avctx->codec_tag & 0xFFFFFF) == MKTAG('B','I','T', 0);
324  int swap = avctx->codec_tag >> 24;
325 
326  if (packed && swap) {
327  av_fast_padded_malloc(&context->bitstream_buf, &context->bitstream_buf_size, buf_size);
328  if (!context->bitstream_buf)
329  return AVERROR(ENOMEM);
330  if (swap == 16)
331  context->bbdsp.bswap16_buf(context->bitstream_buf, (const uint16_t*)buf, buf_size / 2);
332  else if (swap == 32)
333  context->bbdsp.bswap_buf(context->bitstream_buf, (const uint32_t*)buf, buf_size / 4);
334  else
335  return AVERROR_INVALIDDATA;
336  buf = context->bitstream_buf;
337  }
338 
339  if (desc->flags & AV_PIX_FMT_FLAG_BE)
340  scale16be(avctx, dst, buf, buf_size, packed);
341  else
342  scale16le(avctx, dst, buf, buf_size, packed);
343 
344  buf = dst;
345  } else if (need_copy) {
346  memcpy(frame->buf[0]->data, buf, buf_size);
347  buf = frame->buf[0]->data;
348  }
349 
350  if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
351  avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
352  buf += buf_size - context->frame_size;
353 
354  len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
355  if (buf_size < len && ((avctx->codec_tag & 0xFFFFFF) != MKTAG('B','I','T', 0) || !need_copy)) {
356  av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
357  av_buffer_unref(&frame->buf[0]);
358  return AVERROR(EINVAL);
359  }
360 
361  if ((res = av_image_fill_arrays(frame->data, frame->linesize,
362  buf, avctx->pix_fmt,
363  avctx->width, avctx->height, 1)) < 0) {
364  av_buffer_unref(&frame->buf[0]);
365  return res;
366  }
367 
368  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
369  int pal_size;
371  &pal_size);
372  int ret;
373 
374  if (pal && pal_size != AVPALETTE_SIZE) {
375  av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", pal_size);
376  pal = NULL;
377  }
378 
379  if (!context->palette)
381  if (!context->palette) {
382  av_buffer_unref(&frame->buf[0]);
383  return AVERROR(ENOMEM);
384  }
385  ret = av_buffer_make_writable(&context->palette);
386  if (ret < 0) {
387  av_buffer_unref(&frame->buf[0]);
388  return ret;
389  }
390 
391  if (pal) {
392  memcpy(context->palette->data, pal, AVPALETTE_SIZE);
393  frame->palette_has_changed = 1;
394  } else if (context->is_nut_pal8) {
395  int vid_size = avctx->width * avctx->height;
396  int pal_size = avpkt->size - vid_size;
397 
398  if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
399  pal = avpkt->data + vid_size;
400  memcpy(context->palette->data, pal, pal_size);
401  frame->palette_has_changed = 1;
402  }
403  }
404  }
405 
406  if ((avctx->pix_fmt==AV_PIX_FMT_RGB24 ||
407  avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
408  avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
409  avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
410  avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
411  avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
412  avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
413  avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
414  avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
415  FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
416  frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
417 
418  if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
419  FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
420  FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
421  int la0 = FFALIGN(frame->linesize[0], linesize_align);
422  frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
423  frame->linesize[0] = la0;
424  frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
425  }
426 
427  if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
428  (desc->flags & FF_PSEUDOPAL)) {
429  frame->buf[1] = av_buffer_ref(context->palette);
430  if (!frame->buf[1]) {
431  av_buffer_unref(&frame->buf[0]);
432  return AVERROR(ENOMEM);
433  }
434  frame->data[1] = frame->buf[1]->data;
435  }
436 
437  if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
438  ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
439  frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
440 
441  if (context->flip)
442  flip(avctx, frame);
443 
444  if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
445  avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
446  avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
447  avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
448  FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
449 
450  if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
451  frame->data[1] = frame->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
452  frame->data[2] = frame->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
453  }
454 
455  if (avctx->codec_tag == AV_RL32("yuv2") &&
456  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
457  int x, y;
458  uint8_t *line = frame->data[0];
459  for (y = 0; y < avctx->height; y++) {
460  for (x = 0; x < avctx->width; x++)
461  line[2 * x + 1] ^= 0x80;
462  line += frame->linesize[0];
463  }
464  }
465 
466  if (avctx->codec_tag == AV_RL32("b64a") &&
467  avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
468  uint8_t *dst = frame->data[0];
469  uint64_t v;
470  int x, y;
471  for (y = 0; y < avctx->height; y++) {
472  for (x = 0; x >> 3 < avctx->width; x += 8) {
473  v = AV_RB64(&dst[x]);
474  AV_WB64(&dst[x], v << 16 | v >> 48);
475  }
476  dst += frame->linesize[0];
477  }
478  }
479 
480  if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
481  frame->interlaced_frame = 1;
482  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
483  frame->top_field_first = 1;
484  }
485 
486  *got_frame = 1;
487  return buf_size;
488 }
489 
491 {
493 
494  av_buffer_unref(&context->palette);
495  return 0;
496 }
497 
499  .name = "rawvideo",
500  .long_name = NULL_IF_CONFIG_SMALL("raw video"),
501  .type = AVMEDIA_TYPE_VIDEO,
502  .id = AV_CODEC_ID_RAWVIDEO,
503  .priv_data_size = sizeof(RawVideoContext),
505  .close = raw_close_decoder,
506  .decode = raw_decode,
507  .priv_class = &rawdec_class,
508  .capabilities = AV_CODEC_CAP_PARAM_CHANGE,
509 };
raw_init_decoder
static av_cold int raw_init_decoder(AVCodecContext *avctx)
Definition: rawdec.c:69
RawVideoContext::frame_size
int frame_size
Definition: rawdec.c:42
AVCodec
AVCodec.
Definition: codec.h:190
bswapdsp.h
stride
int stride
Definition: mace.c:144
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
av_buffer_alloc
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
RawVideoContext::flip
int flip
Definition: rawdec.c:43
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:406
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:62
RawVideoContext::is_nut_mono
int is_nut_mono
Definition: rawdec.c:47
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
RawVideoContext::bbdsp
BswapDSPContext bbdsp
Definition: rawdec.c:53
internal.h
RawVideoContext::bitstream_buf_size
unsigned int bitstream_buf_size
Definition: rawdec.c:55
AVOption
AVOption.
Definition: opt.h:246
AV_PKT_DATA_PALETTE
@ AV_PKT_DATA_PALETTE
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette.
Definition: packet.h:46
data
const char data[16]
Definition: mxf.c:91
AV_PIX_FMT_MONOWHITE
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:75
AV_RB16
#define AV_RB16
Definition: intreadwrite.h:53
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
av_get_bits_per_pixel
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2501
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
AV_WB64
#define AV_WB64(p, v)
Definition: intreadwrite.h:433
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:39
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
AV_FIELD_TB
@ AV_FIELD_TB
Definition: codec_par.h:41
raw.h
avassert.h
RawVideoContext::is_lt_16bpp
int is_lt_16bpp
Definition: rawdec.c:50
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
ff_raw_pix_fmt_tags
const PixelFormatTag ff_raw_pix_fmt_tags[]
Definition: raw.c:31
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
options
static const AVOption options[]
Definition: rawdec.c:58
AV_RB64
#define AV_RB64
Definition: intreadwrite.h:164
intreadwrite.h
frame_size
int frame_size
Definition: mxfenc.c:2139
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
get_bits.h
AV_RL16
#define AV_RL16
Definition: intreadwrite.h:42
avpriv_set_systematic_pal2
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
avpriv_find_pix_fmt
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
Definition: utils.c:467
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
RawVideoContext::is_pal8
int is_pal8
Definition: rawdec.c:46
RawVideoContext
Definition: rawdec.c:39
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
NULL
#define NULL
Definition: coverity.c:32
RawVideoContext::is_1_2_4_8_bpp
int is_1_2_4_8_bpp
Definition: rawdec.c:44
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
RawVideoContext::palette
AVBufferRef * palette
Definition: rawdec.c:41
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
RawVideoContext::is_yuv2
int is_yuv2
Definition: rawdec.c:49
desc
const char * desc
Definition: nvenc.c:79
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
RawVideoContext::bitstream_buf
void * bitstream_buf
Definition: rawdec.c:54
flip
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:136
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
av_image_fill_arrays
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
Definition: imgutils.c:411
AV_PIX_FMT_FLAG_PSEUDOPAL
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:166
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
avpriv_pix_fmt_bps_mov
const PixelFormatTag avpriv_pix_fmt_bps_mov[]
Definition: raw.c:328
av_buffer_make_writable
int av_buffer_make_writable(AVBufferRef **pbuf)
Create a writable reference from a given buffer reference, avoiding data copy if possible.
Definition: buffer.c:151
MKSCALE16
#define MKSCALE16(name, r16, w16)
Scale buffer to 16 bits per coded sample resolution.
Definition: rawdec.c:150
buffer.h
RawVideoContext::is_nut_pal8
int is_nut_pal8
Definition: rawdec.c:48
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
av_image_get_buffer_size
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
Definition: imgutils.c:431
line
Definition: graph2dot.c:48
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1750
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
raw_decode
static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: rawdec.c:170
common.h
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
AV_PIX_FMT_FLAG_BE
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
Definition: pixdesc.h:128
uint8_t
uint8_t
Definition: audio_convert.c:194
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
len
int len
Definition: vorbis_enc_data.h:452
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
raw_close_decoder
static av_cold int raw_close_decoder(AVCodecContext *avctx)
Definition: rawdec.c:490
ff_decode_frame_props
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1685
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AV_RL32
#define AV_RL32
Definition: intreadwrite.h:146
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
avpriv_pix_fmt_bps_avi
const PixelFormatTag avpriv_pix_fmt_bps_avi[]
Definition: raw.c:315
rawdec_class
static const AVClass rawdec_class
Definition: rawdec.c:63
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
RawVideoContext::tff
int tff
Definition: rawdec.c:51
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:551
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
imgutils.h
RawVideoContext::av_class
AVClass * av_class
Definition: rawdec.c:40
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_PSEUDOPAL
#define FF_PSEUDOPAL
Definition: internal.h:367
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
BswapDSPContext
Definition: bswapdsp.h:24
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
RawVideoContext::is_mono
int is_mono
Definition: rawdec.c:45
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
ff_rawvideo_decoder
AVCodec ff_rawvideo_decoder
Definition: rawdec.c:498