Go to the documentation of this file.
45 #define BITSTREAM_READER_LE
54 #define VP8X_FLAG_ANIMATION 0x02
55 #define VP8X_FLAG_XMP_METADATA 0x04
56 #define VP8X_FLAG_EXIF_METADATA 0x08
57 #define VP8X_FLAG_ALPHA 0x10
58 #define VP8X_FLAG_ICC 0x20
60 #define MAX_PALETTE_SIZE 256
61 #define MAX_CACHE_BITS 11
62 #define NUM_CODE_LENGTH_CODES 19
63 #define HUFFMAN_CODES_PER_META_CODE 5
64 #define NUM_LITERAL_CODES 256
65 #define NUM_LENGTH_CODES 24
66 #define NUM_DISTANCE_CODES 40
67 #define NUM_SHORT_DISTANCES 120
68 #define MAX_HUFFMAN_CODE_LENGTH 15
77 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
81 { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
82 { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
83 { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
84 { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
85 { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
86 { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
87 { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
88 { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
89 { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
90 { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
91 { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
92 { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
93 { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
94 { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
95 { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
212 #define GET_PIXEL(frame, x, y) \
213 ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
215 #define GET_PIXEL_COMP(frame, x, y, c) \
216 (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
225 if (
img->huffman_groups) {
226 for (
i = 0;
i <
img->nb_huffman_groups;
i++) {
232 memset(
img, 0,
sizeof(*
img));
276 if (
r->nb_symbols == 1)
277 return r->simple_symbols[0];
288 int max_code_length = 0;
292 for (sym = 0; sym < alphabet_size; sym++) {
293 if (code_lengths[sym] > 0) {
302 r->simple_symbols[0] =
code;
307 for (sym = 0; sym < alphabet_size; sym++)
308 max_code_length =
FFMAX(max_code_length, code_lengths[sym]);
319 for (
len = 1;
len <= max_code_length;
len++) {
320 for (sym = 0; sym < alphabet_size; sym++) {
321 if (code_lengths[sym] !=
len)
328 if (!
r->nb_symbols) {
334 code_lengths,
sizeof(*code_lengths),
sizeof(*code_lengths),
335 codes,
sizeof(*codes),
sizeof(*codes), 0);
364 HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
365 int *code_lengths =
NULL;
367 int i, symbol, max_symbol, prev_code_len,
ret;
373 for (
i = 0;
i < num_codes;
i++)
390 if (max_symbol > alphabet_size) {
392 max_symbol, alphabet_size);
397 max_symbol = alphabet_size;
402 while (symbol < alphabet_size) {
410 code_lengths[symbol++] = code_len;
412 prev_code_len = code_len;
414 int repeat = 0, length = 0;
421 length = prev_code_len;
434 if (symbol + repeat > alphabet_size) {
436 "invalid symbol %d + repeat %d > alphabet size %d\n",
437 symbol, repeat, alphabet_size);
442 code_lengths[symbol++] = length;
457 #define PARSE_BLOCK_SIZE(w, h) do { \
458 block_bits = get_bits(&s->gb, 3) + 2; \
459 blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
460 blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
469 if (
s->reduced_width > 0)
484 for (y = 0; y <
img->frame->height; y++) {
485 for (
x = 0;
x <
img->frame->width;
x++) {
488 int p = p0 << 8 | p1;
492 s->nb_huffman_groups =
max + 1;
532 int width_bits, index_size,
ret,
x;
539 else if (index_size <= 4)
541 else if (index_size <= 16)
552 img->size_reduction = width_bits;
554 s->reduced_width = (
s->width + ((1 << width_bits) - 1)) >> width_bits;
557 ct =
img->frame->data[0] + 4;
558 for (
x = 4;
x <
img->frame->width * 4;
x++, ct++)
575 group = g0 << 8 | g1;
583 uint32_t cache_idx = (0x1E35A7BD *
c) >> (32 -
img->color_cache_bits);
584 img->color_cache[cache_idx] =
c;
594 img = &
s->image[role];
604 img->frame->width =
w;
605 img->frame->height =
h;
617 if (
img->color_cache_bits < 1 ||
img->color_cache_bits > 11) {
619 img->color_cache_bits);
623 sizeof(*
img->color_cache));
624 if (!
img->color_cache)
627 img->color_cache_bits = 0;
630 img->nb_huffman_groups = 1;
635 img->nb_huffman_groups =
s->nb_huffman_groups;
639 sizeof(*
img->huffman_groups));
640 if (!
img->huffman_groups)
643 for (
i = 0;
i <
img->nb_huffman_groups;
i++) {
647 if (!j &&
img->color_cache_bits > 0)
648 alphabet_size += 1 <<
img->color_cache_bits;
665 while (y < img->
frame->height) {
677 if (
img->color_cache_bits)
686 int prefix_code, length,
distance, ref_x, ref_y;
690 if (prefix_code < 4) {
691 length = prefix_code + 1;
698 if (prefix_code > 39
U) {
700 "distance prefix code too large: %d\n", prefix_code);
703 if (prefix_code < 4) {
736 ref_x =
FFMAX(0, ref_x);
737 ref_y =
FFMAX(0, ref_y);
742 for (
i = 0;
i < length;
i++) {
747 if (
img->color_cache_bits)
755 if (ref_x ==
width) {
759 if (y ==
img->frame->height || ref_y ==
img->frame->height)
767 if (!
img->color_cache_bits) {
771 if (cache_idx >= 1 <<
img->color_cache_bits) {
773 "color cache index out-of-bounds\n");
827 p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
828 p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
829 p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
830 p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
837 p[0] = p_l[0] + p_tl[0] >> 1;
838 p[1] = p_l[1] + p_tl[1] >> 1;
839 p[2] = p_l[2] + p_tl[2] >> 1;
840 p[3] = p_l[3] + p_tl[3] >> 1;
847 p[0] = p_l[0] + p_t[0] >> 1;
848 p[1] = p_l[1] + p_t[1] >> 1;
849 p[2] = p_l[2] + p_t[2] >> 1;
850 p[3] = p_l[3] + p_t[3] >> 1;
857 p[0] = p_tl[0] + p_t[0] >> 1;
858 p[1] = p_tl[1] + p_t[1] >> 1;
859 p[2] = p_tl[2] + p_t[2] >> 1;
860 p[3] = p_tl[3] + p_t[3] >> 1;
867 p[0] = p_t[0] + p_tr[0] >> 1;
868 p[1] = p_t[1] + p_tr[1] >> 1;
869 p[2] = p_t[2] + p_tr[2] >> 1;
870 p[3] = p_t[3] + p_tr[3] >> 1;
877 p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
878 p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
879 p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
880 p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
888 (
FFABS(p_l[1] - p_tl[1]) -
FFABS(p_t[1] - p_tl[1])) +
889 (
FFABS(p_l[2] - p_tl[2]) -
FFABS(p_t[2] - p_tl[2])) +
890 (
FFABS(p_l[3] - p_tl[3]) -
FFABS(p_t[3] - p_tl[3]));
901 p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
902 p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
903 p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
904 p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
910 return av_clip_uint8(d + (d -
c) / 2);
936 uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
943 if (
x ==
frame->width - 1)
962 for (y = 0; y <
img->frame->height; y++) {
963 for (
x = 0;
x <
img->frame->width;
x++) {
978 "invalid predictor mode: %d\n", m);
1002 for (y = 0; y <
img->frame->height; y++) {
1003 for (
x = 0;
x <
img->frame->width;
x++) {
1022 for (y = 0; y <
img->frame->height; y++) {
1023 for (
x = 0;
x <
img->frame->width;
x++) {
1051 for (y = 0; y <
img->frame->height; y++) {
1053 memcpy(
line, p,
img->frame->linesize[0]);
1057 for (
x = 0;
x <
img->frame->width;
x++) {
1059 p[2] =
get_bits(&gb_g, pixel_bits);
1071 if (
img->frame->height *
img->frame->width > 300) {
1077 memset(palette +
size, 0, 256 * 4 -
size);
1078 for (y = 0; y <
img->frame->height; y++) {
1079 for (
x = 0;
x <
img->frame->width;
x++) {
1086 for (y = 0; y <
img->frame->height; y++) {
1087 for (
x = 0;
x <
img->frame->width;
x++) {
1106 if (
s->width &&
s->width !=
w) {
1111 if (
s->height &&
s->height !=
h) {
1119 int *got_frame,
uint8_t *data_start,
1120 unsigned int data_size,
int is_alpha_chunk)
1125 if (!is_alpha_chunk) {
1134 if (!is_alpha_chunk) {
1156 if (!
s->width || !
s->height)
1163 s->nb_transforms = 0;
1164 s->reduced_width = 0;
1172 goto free_and_return;
1188 goto free_and_return;
1197 goto free_and_return;
1200 for (
i =
s->nb_transforms - 1;
i >= 0;
i--) {
1201 switch (
s->transforms[
i]) {
1216 goto free_and_return;
1236 ls =
frame->linesize[3];
1239 dec =
frame->data[3] + 1;
1240 for (
x = 1;
x <
frame->width;
x++, dec++)
1244 dec =
frame->data[3] + ls;
1245 for (y = 1; y <
frame->height; y++, dec += ls)
1246 *dec += *(dec - ls);
1251 for (y = 1; y <
frame->height; y++) {
1252 dec =
frame->data[3] + y * ls + 1;
1253 for (
x = 1;
x <
frame->width;
x++, dec++)
1258 for (y = 1; y <
frame->height; y++) {
1259 dec =
frame->data[3] + y * ls + 1;
1260 for (
x = 1;
x <
frame->width;
x++, dec++)
1261 *dec += *(dec - ls);
1265 for (y = 1; y <
frame->height; y++) {
1266 dec =
frame->data[3] + y * ls + 1;
1267 for (
x = 1;
x <
frame->width;
x++, dec++)
1268 dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
1276 unsigned int data_size)
1285 for (y = 0; y <
s->height; y++)
1290 int alpha_got_frame = 0;
1293 if (!
s->alpha_frame)
1297 data_start, data_size, 1);
1302 if (!alpha_got_frame) {
1308 for (y = 0; y <
s->height; y++) {
1311 for (
x = 0;
x <
s->width;
x++) {
1321 if (
s->alpha_filter)
1328 int *got_frame,
uint8_t *data_start,
1329 unsigned int data_size)
1335 if (!
s->initialized) {
1338 s->v.actually_webp = 1;
1343 if (data_size > INT_MAX) {
1363 s->alpha_data_size);
1377 uint32_t chunk_type, chunk_size;
1392 if (bytestream2_get_le32(&gb) !=
MKTAG(
'R',
'I',
'F',
'F')) {
1397 chunk_size = bytestream2_get_le32(&gb);
1401 if (bytestream2_get_le32(&gb) !=
MKTAG(
'W',
'E',
'B',
'P')) {
1407 char chunk_str[5] = { 0 };
1409 chunk_type = bytestream2_get_le32(&gb);
1410 chunk_size = bytestream2_get_le32(&gb);
1411 if (chunk_size == UINT32_MAX)
1413 chunk_size += chunk_size & 1;
1421 switch (chunk_type) {
1422 case MKTAG(
'V',
'P',
'8',
' '):
1432 case MKTAG(
'V',
'P',
'8',
'L'):
1443 case MKTAG(
'V',
'P',
'8',
'X'):
1444 if (
s->width ||
s->height || *got_frame) {
1448 vp8x_flags = bytestream2_get_byte(&gb);
1450 s->width = bytestream2_get_le24(&gb) + 1;
1451 s->height = bytestream2_get_le24(&gb) + 1;
1456 case MKTAG(
'A',
'L',
'P',
'H'): {
1457 int alpha_header, filter_m, compression;
1461 "ALPHA chunk present, but alpha bit not set in the "
1464 if (chunk_size == 0) {
1468 alpha_header = bytestream2_get_byte(&gb);
1470 s->alpha_data_size = chunk_size - 1;
1473 filter_m = (alpha_header >> 2) & 0x03;
1474 compression = alpha_header & 0x03;
1478 "skipping unsupported ALPHA chunk\n");
1481 s->alpha_compression = compression;
1482 s->alpha_filter = filter_m;
1487 case MKTAG(
'E',
'X',
'I',
'F'): {
1498 "EXIF chunk present, but Exif bit not set in the "
1503 avpkt->
size - exif_offset);
1523 case MKTAG(
'I',
'C',
'C',
'P'): {
1533 "ICCP chunk present, but ICC Profile bit not set in the "
1544 case MKTAG(
'A',
'N',
'I',
'M'):
1545 case MKTAG(
'A',
'N',
'M',
'F'):
1546 case MKTAG(
'X',
'M',
'P',
' '):
1547 AV_WL32(chunk_str, chunk_type);
1553 AV_WL32(chunk_str, chunk_type);
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
#define AV_LOG_WARNING
Something somehow does not look correct.
static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define NUM_SHORT_DISTANCES
static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p, uint8_t *data_start, unsigned int data_size)
static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size)
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, uint8_t *data_start, unsigned int data_size, int is_alpha_chunk)
@ PRED_MODE_AVG_T_AVG_L_TR
@ ALPHA_FILTER_HORIZONTAL
uint16_t simple_symbols[2]
#define MKTAG(a, b, c, d)
static int8_t ff_u8_to_s8(uint8_t a)
static const uint8_t block_bits[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
#define GET_PIXEL_COMP(frame, x, y, c)
@ PRED_MODE_ADD_SUBTRACT_FULL
@ COLOR_INDEXING_TRANSFORM
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
const uint8_t ff_reverse[256]
static const uint16_t table[]
static int parse_transform_color(WebPContext *s)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define AV_LOG_VERBOSE
Detailed information.
#define UPDATE_CACHE(name, gb)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
enum TransformType transforms[4]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void skip_bits(GetBitContext *s, int n)
enum AlphaCompression alpha_compression
static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
static void update_canvas_size(AVCodecContext *avctx, int w, int h)
int key_frame
1 -> keyframe, 0-> not
void(* inv_predict_func)(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
#define VP8X_FLAG_EXIF_METADATA
static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
static av_always_inline uint8_t color_transform_delta(uint8_t color_pred, uint8_t color)
static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role, int w, int h)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int read_huffman_code_normal(WebPContext *s, HuffReader *hc, int alphabet_size)
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define FF_CODEC_PROPERTY_LOSSLESS
#define CLOSE_READER(name, gb)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define HUFFMAN_CODES_PER_META_CODE
static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES]
static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
#define NUM_DISTANCE_CODES
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
#define NUM_CODE_LENGTH_CODES
#define xi(width, name, var, range_min, range_max, subs,...)
#define SKIP_BITS(name, gb, num)
void ff_free_vlc(VLC *vlc)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define GET_PIXEL(frame, x, y)
static av_cold int webp_decode_close(AVCodecContext *avctx)
HuffReader * huffman_groups
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int apply_subtract_green_transform(WebPContext *s)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static const inv_predict_func inverse_predict[14]
static const int8_t transform[32][32]
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
static int parse_transform_color_indexing(WebPContext *s)
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define LAST_SKIP_BITS(name, gb, num)
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE]
#define NUM_LITERAL_CODES
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
@ IMAGE_ROLE_COLOR_INDEXING
enum AVPictureType pict_type
Picture type of the frame.
static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int decode_entropy_image(WebPContext *s)
static int apply_color_transform(WebPContext *s)
#define OPEN_READER(name, gb)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static int parse_transform_predictor(WebPContext *s)
@ PRED_MODE_AVG_AVG_L_TL_AVG_T_TR
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
unsigned properties
Properties of the stream that gets decoded.
#define av_malloc_array(a, b)
static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2]
static int apply_predictor_transform(WebPContext *s)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
static av_always_inline int webp_get_vlc(GetBitContext *gb, VLC_TYPE(*table)[2])
static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
ImageContext image[IMAGE_ROLE_NB]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
typedef void(RENAME(mix_any_func_type))
#define SHOW_UBITS(name, gb, num)
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
static void image_ctx_free(ImageContext *img)
static int apply_color_indexing_transform(WebPContext *s)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Structure to hold side data for an AVFrame.
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet used
#define MAX_HUFFMAN_CODE_LENGTH
#define PARSE_BLOCK_SIZE(w, h)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
This structure stores compressed data.
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
int width
picture width / height.
static float distance(float x, float y, int band)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
static HuffReader * get_huffman_group(WebPContext *s, ImageContext *img, int x, int y)
static int huff_reader_build_canonical(HuffReader *r, int *code_lengths, int alphabet_size)
static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl, const uint8_t *p_t, const uint8_t *p_tr)
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
enum AlphaFilter alpha_filter
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
@ PRED_MODE_ADD_SUBTRACT_HALF
@ IMAGE_ROLE_COLOR_TRANSFORM