51 #define FRAGMENT_PIXELS 8 60 #define SB_NOT_CODED 0 61 #define SB_PARTIALLY_CODED 1 62 #define SB_FULLY_CODED 2 67 #define MAXIMUM_LONG_BIT_RUN 4129 69 #define MODE_INTER_NO_MV 0 71 #define MODE_INTER_PLUS_MV 2 72 #define MODE_INTER_LAST_MV 3 73 #define MODE_INTER_PRIOR_LAST 4 74 #define MODE_USING_GOLDEN 5 75 #define MODE_GOLDEN_MV 6 76 #define MODE_INTER_FOURMV 7 77 #define CODING_MODE_COUNT 8 126 { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
127 { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
128 { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
129 { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
156 #define MIN_DEQUANT_VAL 2 202 int fragment_width[2];
203 int fragment_height[2];
206 int fragment_start[3];
212 int8_t (*motion_val[2])[2];
215 uint16_t coded_dc_scale_factor[2][64];
216 uint32_t coded_ac_scale_factor[64];
220 uint16_t qr_base[2][3][64];
239 int16_t *dct_tokens[3][64];
241 #define TOKEN_EOB(eob_run) ((eob_run) << 2) 242 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1) 243 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2) 249 int num_coded_frags[3][64];
254 int *coded_fragment_list[3];
258 int num_kf_coded_fragment[3];
350 for (i = 0; i < 16; i++) {
363 for (j = 0; j < 2; j++)
364 for (i = 0; i < 7; i++)
367 for (i = 0; i < 2; i++)
381 int sb_x, sb_y, plane;
384 for (plane = 0; plane < 3; plane++) {
392 for (sb_y = 0; sb_y < sb_height; sb_y++)
393 for (sb_x = 0; sb_x < sb_width; sb_x++)
394 for (i = 0; i < 16; i++) {
396 y = 4 * sb_y + hilbert_offset[
i][1];
398 if (x < frag_width && y < frag_height)
416 int i, plane, inter, qri, bmi, bmj, qistart;
418 for (inter = 0; inter < 2; inter++) {
419 for (plane = 0; plane < 3; plane++) {
422 for (qri = 0; qri < s->
qr_count[inter][plane]; qri++) {
423 sum += s->
qr_size[inter][plane][qri];
424 if (s->
qps[qpi] <= sum)
427 qistart = sum - s->
qr_size[inter][plane][qri];
428 bmi = s->
qr_base[inter][plane][qri];
429 bmj = s->
qr_base[inter][plane][qri + 1];
430 for (i = 0; i < 64; i++) {
433 s->
qr_size[inter][plane][qri]) /
434 (2 * s->
qr_size[inter][plane][qri]);
436 int qmin = 8 << (inter + !
i);
437 int qscale = i ? ac_scale_factor : dc_scale_factor;
438 int qbias = (1 + inter) * 3;
440 (i == 0 || s->
version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
441 : (qscale * (coeff - qbias) / 100 + qbias) * 4;
445 s->
qmat[
qpi][inter][plane][0] = s->
qmat[0][inter][plane][0];
467 int superblock_starts[3] = {
471 int current_superblock = 0;
473 int num_partial_superblocks = 0;
476 int current_fragment;
478 int plane0_num_coded_frags = 0;
487 while (current_superblock < s->superblock_count &&
get_bits_left(gb) > 0) {
495 if (current_run == 34)
500 "Invalid partially coded superblock run length\n");
506 current_superblock += current_run;
508 num_partial_superblocks += current_run;
513 if (num_partial_superblocks < s->superblock_count) {
514 int superblocks_decoded = 0;
516 current_superblock = 0;
520 while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
529 if (current_run == 34)
532 for (j = 0; j < current_run; current_superblock++) {
535 "Invalid fully coded superblock run length\n");
545 superblocks_decoded += current_run;
551 if (num_partial_superblocks) {
568 for (plane = 0; plane < 3; plane++) {
569 int sb_start = superblock_starts[plane];
572 int num_coded_frags = 0;
576 for (i = sb_start; i < sb_end; i++) {
578 for (j = 0; j < 16; j++) {
581 if (current_fragment != -1) {
591 for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
596 for (j = 0; j < 16; j++) {
599 if (current_fragment != -1) {
605 if (current_run-- == 0) {
629 plane0_num_coded_frags = num_coded_frags;
631 for (i = 0; i < 64; i++)
640 #define BLOCK_X (2 * mb_x + (k & 1)) 641 #define BLOCK_Y (2 * mb_y + (k >> 1)) 643 #if CONFIG_VP4_DECODER 652 while ((bits =
show_bits(gb, 9)) == 0x1ff) {
661 skip_bits(gb, 2 + n); \ 662 v += (1 << n) + get_bits(gb, n); } 663 #define thresh(n) (0x200 - (0x80 >> n)) 664 #define else_if(n) else if (bits < thresh(n)) body(n) 667 }
else if (bits < thresh(0)) {
689 *next_block_pattern_table = 0;
699 int next_block_pattern_table;
700 int bit, current_run, has_partial;
712 current_run = vp4_get_mb_count(s, gb);
724 current_run = vp4_get_mb_count(s, gb);
729 current_run = vp4_get_mb_count(s, gb);
739 next_block_pattern_table = 0;
741 for (plane = 0; plane < 3; plane++) {
750 for (sb_y = 0; sb_y < sb_height; sb_y++) {
751 for (sb_x = 0; sb_x < sb_width; sb_x++) {
752 for (j = 0; j < 4; j++) {
753 int mb_x = 2 * sb_x + (j >> 1);
754 int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
755 int mb_coded, pattern, coded;
757 if (mb_x >= mb_width || mb_y >= mb_height)
765 pattern = vp4_get_block_pattern(s, gb, &next_block_pattern_table);
769 for (k = 0; k < 4; k++) {
773 coded = pattern & (8 >> k);
792 int i, j, k, sb_x, sb_y;
794 int current_macroblock;
795 int current_fragment;
810 for (i = 0; i < 8; i++)
812 for (i = 0; i < 8; i++)
813 custom_mode_alphabet[
get_bits(gb, 3)] =
i;
814 alphabet = custom_mode_alphabet;
825 for (j = 0; j < 4; j++) {
826 int mb_x = 2 * sb_x + (j >> 1);
827 int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
837 for (k = 0; k < 4; k++) {
855 for (k = 0; k < 4; k++) {
861 #define SET_CHROMA_MODES \ 862 if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \ 863 frag[s->fragment_start[1]].coding_method = coding_mode; \ 864 if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \ 865 frag[s->fragment_start[2]].coding_method = coding_mode; 874 for (k = 0; k < 2; k++) {
879 for (k = 0; k < 4; k++) {
896 return last_motion < 0 ? -v : v;
905 int j, k, sb_x, sb_y;
909 int last_motion_x = 0;
910 int last_motion_y = 0;
911 int prior_last_motion_x = 0;
912 int prior_last_motion_y = 0;
913 int last_gold_motion_x = 0;
914 int last_gold_motion_y = 0;
915 int current_macroblock;
916 int current_fragment;
932 for (j = 0; j < 4; j++) {
933 int mb_x = 2 * sb_x + (j >> 1);
934 int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
944 if (coding_mode == 2) {
945 last_gold_motion_x = motion_x[0] =
vp4_get_mv(s, gb, 0, last_gold_motion_x);
946 last_gold_motion_y = motion_y[0] =
vp4_get_mv(s, gb, 1, last_gold_motion_y);
951 if (coding_mode == 0) {
954 }
else if (coding_mode == 1) {
958 motion_x[0] =
vp4_get_mv(s, gb, 0, last_motion_x);
959 motion_y[0] =
vp4_get_mv(s, gb, 1, last_motion_y);
964 prior_last_motion_x = last_motion_x;
965 prior_last_motion_y = last_motion_y;
966 last_motion_x = motion_x[0];
967 last_motion_y = motion_y[0];
973 prior_last_motion_x = last_motion_x;
974 prior_last_motion_y = last_motion_y;
978 for (k = 0; k < 4; k++) {
981 if (coding_mode == 0) {
984 }
else if (coding_mode == 1) {
988 motion_x[k] =
vp4_get_mv(s, gb, 0, prior_last_motion_x);
989 motion_y[k] =
vp4_get_mv(s, gb, 1, prior_last_motion_y);
991 last_motion_x = motion_x[k];
992 last_motion_y = motion_y[k];
1002 motion_x[0] = last_motion_x;
1003 motion_y[0] = last_motion_y;
1012 motion_x[0] = prior_last_motion_x;
1013 motion_y[0] = prior_last_motion_y;
1016 prior_last_motion_x = last_motion_x;
1017 prior_last_motion_y = last_motion_y;
1018 last_motion_x = motion_x[0];
1019 last_motion_y = motion_y[0];
1032 for (k = 0; k < 4; k++) {
1036 s->
motion_val[0][current_fragment][0] = motion_x[k];
1037 s->
motion_val[0][current_fragment][1] = motion_y[k];
1039 s->
motion_val[0][current_fragment][0] = motion_x[0];
1040 s->
motion_val[0][current_fragment][1] = motion_y[0];
1046 motion_x[0] =
RSHIFT(motion_x[0] + motion_x[1] +
1047 motion_x[2] + motion_x[3], 2);
1048 motion_y[0] =
RSHIFT(motion_y[0] + motion_y[1] +
1049 motion_y[2] + motion_y[3], 2);
1052 motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1053 motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1060 motion_x[0] =
RSHIFT(motion_x[0] + motion_x[1], 1);
1061 motion_y[0] =
RSHIFT(motion_y[0] + motion_y[1], 1);
1062 motion_x[1] =
RSHIFT(motion_x[2] + motion_x[3], 1);
1063 motion_y[1] =
RSHIFT(motion_y[2] + motion_y[3], 1);
1065 motion_x[1] = motion_x[0];
1066 motion_y[1] = motion_y[0];
1069 motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1070 motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1073 for (k = 0; k < 2; k++) {
1079 for (k = 0; k < 4; k++) {
1099 int qpi,
i, j,
bit, run_length, blocks_decoded, num_blocks_at_qpi;
1102 for (qpi = 0; qpi < s->
nqps - 1 && num_blocks > 0; qpi++) {
1103 i = blocks_decoded = num_blocks_at_qpi = 0;
1115 if (run_length == 34)
1117 blocks_decoded += run_length;
1120 num_blocks_at_qpi += run_length;
1122 for (j = 0; j < run_length; i++) {
1131 }
while (blocks_decoded < num_blocks &&
get_bits_left(gb) > 0);
1133 num_blocks -= num_blocks_at_qpi;
1149 int bits_to_get, zero_run;
1153 bits_to_get =
get_bits(gb, bits_to_get);
1187 int16_t *dct_tokens = s->
dct_tokens[plane][coeff_index];
1194 if (num_coeffs < 0) {
1196 "Invalid number of coefficients at level %d\n", coeff_index);
1200 if (eob_run > num_coeffs) {
1202 blocks_ended = num_coeffs;
1203 eob_run -= num_coeffs;
1206 blocks_ended = eob_run;
1212 dct_tokens[j++] = blocks_ended << 2;
1216 token =
get_vlc2(gb, vlc_table, 11, 3);
1218 if ((
unsigned) token <= 6
U) {
1225 if (eob_run > num_coeffs - coeff_i) {
1226 dct_tokens[j++] =
TOKEN_EOB(num_coeffs - coeff_i);
1227 blocks_ended += num_coeffs - coeff_i;
1228 eob_run -= num_coeffs - coeff_i;
1229 coeff_i = num_coeffs;
1232 blocks_ended += eob_run;
1236 }
else if (token >= 0) {
1237 zero_run =
get_coeff(gb, token, &coeff);
1247 all_fragments[coded_fragment_list[coeff_i]].
dc =
coeff;
1252 if (coeff_index + zero_run > 64) {
1254 "Invalid zero run of %d with %d coeffs left\n",
1255 zero_run, 64 - coeff_index);
1256 zero_run = 64 - coeff_index;
1261 for (i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1276 for (i = coeff_index + 1; i < 64; i++)
1281 s->
dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1282 else if (coeff_index < 63)
1283 s->
dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1291 int fragment_height);
1303 int residual_eob_run = 0;
1318 0, residual_eob_run);
1319 if (residual_eob_run < 0)
1320 return residual_eob_run;
1329 1, residual_eob_run);
1330 if (residual_eob_run < 0)
1331 return residual_eob_run;
1333 2, residual_eob_run);
1334 if (residual_eob_run < 0)
1335 return residual_eob_run;
1352 for (i = 1; i <= 5; i++) {
1356 for (i = 6; i <= 14; i++) {
1360 for (i = 15; i <= 27; i++) {
1364 for (i = 28; i <= 63; i++) {
1370 for (i = 1; i <= 63; i++) {
1371 residual_eob_run =
unpack_vlcs(s, gb, y_tables[i], i,
1372 0, residual_eob_run);
1373 if (residual_eob_run < 0)
1374 return residual_eob_run;
1376 residual_eob_run =
unpack_vlcs(s, gb, c_tables[i], i,
1377 1, residual_eob_run);
1378 if (residual_eob_run < 0)
1379 return residual_eob_run;
1380 residual_eob_run =
unpack_vlcs(s, gb, c_tables[i], i,
1381 2, residual_eob_run);
1382 if (residual_eob_run < 0)
1383 return residual_eob_run;
1389 #if CONFIG_VP4_DECODER 1398 int plane,
int eob_tracker[64],
int fragment)
1406 while (!eob_tracker[coeff_i]) {
1413 if ((
unsigned) token <= 6
U) {
1416 eob_tracker[coeff_i] = eob_run - 1;
1418 }
else if (token >= 0) {
1419 zero_run =
get_coeff(gb, token, &coeff);
1422 if (coeff_i + zero_run > 64) {
1424 "Invalid zero run of %d with %d coeffs left\n",
1425 zero_run, 64 - coeff_i);
1426 zero_run = 64 - coeff_i;
1429 coeff_i += zero_run;
1445 eob_tracker[coeff_i]--;
1459 for (i = 0; i < 4; i++)
1462 for (j = 1; j < 5; j++)
1463 for (i = 0; i < 4; i++)
1464 vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1471 for (i = 0; i < 4; i++)
1474 for (i = 1; i < 5; i++)
1475 dc_pred[i][0] = dc_pred[i][4];
1485 dc += dc_pred[-6].
dc;
1490 dc += dc_pred[6].
dc;
1494 if (count != 2 && dc_pred[-1].
type ==
type) {
1495 dc += dc_pred[-1].
dc;
1499 if (count != 2 && dc_pred[1].
type ==
type) {
1500 dc += dc_pred[1].
dc;
1505 return count == 2 ? dc / 2 : last_dc[
type];
1512 for (plane = 0; plane < 3; plane++) {
1513 for (i = 0; i < 64; i++) {
1528 int plane, sb_y, sb_x;
1529 int eob_tracker[64];
1545 tables[0][0] = &s->
dc_vlc[dc_y_table];
1546 tables[1][0] = &s->
dc_vlc[dc_c_table];
1547 for (i = 1; i <= 5; i++) {
1548 tables[0][
i] = &s->
ac_vlc_1[ac_y_table];
1549 tables[1][
i] = &s->
ac_vlc_1[ac_c_table];
1551 for (i = 6; i <= 14; i++) {
1552 tables[0][
i] = &s->
ac_vlc_2[ac_y_table];
1553 tables[1][
i] = &s->
ac_vlc_2[ac_c_table];
1555 for (i = 15; i <= 27; i++) {
1556 tables[0][
i] = &s->
ac_vlc_3[ac_y_table];
1557 tables[1][
i] = &s->
ac_vlc_3[ac_c_table];
1559 for (i = 28; i <= 63; i++) {
1560 tables[0][
i] = &s->
ac_vlc_4[ac_y_table];
1561 tables[1][
i] = &s->
ac_vlc_4[ac_c_table];
1564 vp4_set_tokens_base(s);
1566 memset(last_dc, 0,
sizeof(last_dc));
1569 memset(eob_tracker, 0,
sizeof(eob_tracker));
1575 for (j = 0; j < 6; j++)
1576 for (i = 0; i < 6; i++)
1577 vp4_dc_predictor_reset(&dc_pred[j][i]);
1581 vp4_dc_pred_before(s, dc_pred, sb_x);
1582 for (j = 0; j < 16; j++) {
1585 int x = 4 * sb_x + hx;
1586 int y = 4 * sb_y + hy;
1598 if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1604 vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1606 this_dc_pred->
type = dc_block_type,
1609 vp4_dc_pred_after(s, dc_pred, sb_x);
1614 vp4_set_tokens_base(s);
1625 #define COMPATIBLE_FRAME(x) \ 1626 (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type) 1627 #define DC_COEFF(u) s->all_fragments[u].dc 1632 int fragment_height)
1640 int i = first_fragment;
1645 int vl, vul, vu, vur;
1657 static const int predictor_transform[16][4] = {
1671 { -104, 116, 0, 116 },
1673 { -104, 116, 0, 116 }
1682 static const unsigned char compatible_frame[9] = {
1693 int current_frame_type;
1709 for (y = 0; y < fragment_height; y++) {
1711 for (x = 0; x < fragment_width; x++, i++) {
1715 current_frame_type =
1726 u = i - fragment_width;
1731 ul = i - fragment_width - 1;
1736 if (x + 1 < fragment_width) {
1737 ur = i - fragment_width + 1;
1744 if (transform == 0) {
1747 predicted_dc = last_dc[current_frame_type];
1751 (predictor_transform[
transform][0] * vul) +
1752 (predictor_transform[transform][1] * vu) +
1753 (predictor_transform[
transform][2] * vur) +
1754 (predictor_transform[transform][3] * vl);
1756 predicted_dc /= 128;
1760 if ((transform == 15) || (transform == 13)) {
1761 if (
FFABS(predicted_dc - vu) > 128)
1763 else if (
FFABS(predicted_dc - vl) > 128)
1765 else if (
FFABS(predicted_dc - vul) > 128)
1773 last_dc[current_frame_type] =
DC_COEFF(i);
1780 int ystart,
int yend)
1794 for (y = ystart; y < yend; y++) {
1795 for (x = 0; x <
width; x++) {
1805 stride, bounding_values);
1812 stride, bounding_values);
1818 if ((x < width - 1) &&
1821 plane_data + 8 * x + 8,
1822 stride, bounding_values);
1828 if ((y < height - 1) &&
1831 plane_data + 8 * x + 8 * stride,
1832 stride, bounding_values);
1838 plane_data += 8 *
stride;
1847 int plane,
int inter, int16_t
block[64])
1849 int16_t *dequantizer = s->
qmat[frag->
qpi][inter][plane];
1855 switch (token & 3) {
1864 i += (token >> 2) & 0x7f;
1869 block[perm[
i]] = (token >> 9) * dequantizer[perm[i]];
1873 block[perm[
i]] = (token >> 2) * dequantizer[perm[i]];
1884 block[0] = frag->
dc * s->
qmat[0][inter][plane][0];
1903 y_flipped == s->
height ? INT_MAX
1934 int motion_y,
int y)
1938 int border = motion_y & 1;
1946 ref_row = y + (motion_y >> 1);
1947 ref_row =
FFMAX(
FFABS(ref_row), ref_row + 8 + border);
1952 #if CONFIG_VP4_DECODER 1956 static int vp4_mc_loop_filter(
Vp3DecodeContext *s,
int plane,
int motion_x,
int motion_y,
int bx,
int by,
1959 int motion_shift = plane ? 4 : 2;
1960 int subpel_mask = plane ? 3 : 1;
1966 int x_subpel, y_subpel;
1967 int x_offset, y_offset;
1969 int block_width = plane ? 8 : 16;
1973 #define loop_stride 12 1977 x = 8 * bx + motion_x / motion_shift;
1978 y = 8 * by + motion_y / motion_shift;
1980 x_subpel = motion_x & subpel_mask;
1981 y_subpel = motion_y & subpel_mask;
1983 if (x_subpel || y_subpel) {
1993 x2 = x + block_width;
1994 y2 = y + block_width;
1996 if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
1999 x_offset = (-(x + 2) & 7) + 2;
2000 y_offset = (-(y + 2) & 7) + 2;
2002 if (x_offset > 8 + x_subpel && y_offset > 8 + y_subpel)
2007 12, 12, src_x - 1, src_y - 1,
2011 if (x_offset <= 8 + x_subpel)
2014 if (y_offset <= 8 + y_subpel)
2022 if (!x_offset && !y_offset)
2027 12, 12, src_x - 1, src_y - 1,
2031 #define safe_loop_filter(name, ptr, stride, bounding_values) \ 2032 if ((uintptr_t)(ptr) & 7) \ 2033 s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \ 2035 s->vp3dsp.name(ptr, stride, bounding_values); 2038 safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2041 safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2044 for (i = 0; i < 9; i++)
2045 memcpy(temp + i*
stride, loop + (i + 1) * loop_stride + 1, 9);
2059 int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2060 int motion_halfpel_index;
2062 int plane, first_pixel;
2067 for (plane = 0; plane < 3; plane++) {
2077 int8_t(*motion_val)[2] = s->
motion_val[!!plane];
2097 for (; sb_y < slice_height; sb_y++) {
2099 for (sb_x = 0; sb_x < slice_width; sb_x++) {
2101 for (j = 0; j < 16; j++) {
2103 y = 4 * sb_y + hilbert_offset[j][1];
2104 fragment = y * fragment_width + x;
2106 i = fragment_start + fragment;
2109 if (x >= fragment_width || y >= fragment_height)
2112 first_pixel = 8 * y * stride + 8 * x;
2117 motion_val[fragment][1],
2124 motion_source = golden_plane;
2126 motion_source = last_plane;
2128 motion_source += first_pixel;
2129 motion_halfpel_index = 0;
2136 int standard_mc = 1;
2137 motion_x = motion_val[fragment][0];
2138 motion_y = motion_val[fragment][1];
2139 #if CONFIG_VP4_DECODER 2140 if (plane && s->
version >= 2) {
2141 motion_x = (motion_x >> 1) | (motion_x & 1);
2142 motion_y = (motion_y >> 1) | (motion_y & 1);
2146 src_x = (motion_x >> 1) + 8 * x;
2147 src_y = (motion_y >> 1) + 8 * y;
2149 motion_halfpel_index = motion_x & 0x01;
2150 motion_source += (motion_x >> 1);
2152 motion_halfpel_index |= (motion_y & 0x01) << 1;
2153 motion_source += ((motion_y >> 1) * stride);
2155 #if CONFIG_VP4_DECODER 2160 if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2161 motion_source =
temp;
2167 if (standard_mc && (
2168 src_x < 0 || src_y < 0 ||
2169 src_x + 9 >= plane_width ||
2170 src_y + 9 >= plane_height)) {
2180 motion_source =
temp;
2191 if (motion_halfpel_index != 3) {
2193 output_plane + first_pixel,
2194 motion_source,
stride, 8);
2198 int d = (motion_x ^ motion_y) >> 31;
2201 motion_source + stride + 1 + d,
2228 output_plane + first_pixel,
2229 last_plane + first_pixel,
2238 FFMIN(4 * sb_y + 3, fragment_height - 1));
2258 int y_fragment_count, c_fragment_count;
2271 memset(s-> num_kf_coded_fragment, -1,
sizeof(s-> num_kf_coded_fragment));
2318 int i, inter, plane, ret;
2321 int y_fragment_count, c_fragment_count;
2322 #if CONFIG_VP4_DECODER 2347 for (i = 0; i < 64; i++) {
2348 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3)) 2356 for (i = 0; i < 3; i++)
2399 for (i = 0; i < 64; i++) {
2409 for (inter = 0; inter < 2; inter++) {
2410 for (plane = 0; plane < 3; plane++) {
2412 s->
qr_size[inter][plane][0] = 63;
2414 s->
qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2420 for (i = 0; i < 16; i++) {
2446 #if CONFIG_VP4_DECODER 2448 for (i = 0; i < 16; i++) {
2477 for (i = 0; i < 16; i++) {
2526 #if CONFIG_VP4_DECODER 2527 for (j = 0; j < 2; j++)
2528 for (i = 0; i < 7; i++)
2534 for (i = 0; i < 2; i++)
2573 if (src->
f->
data[0])
2591 int qps_changed = 0,
i, err;
2593 if (!
s1->current_frame.f->data[0] ||
2602 if ((err = ref_frames(s,
s1)) < 0)
2608 for (
i = 0;
i < 3;
i++) {
2609 if (s->
qps[
i] !=
s1->qps[1]) {
2615 if (s->
qps[0] !=
s1->qps[0])
2620 memcpy(s->
qps,
s1->qps,
sizeof(s->
qps));
2631 void *
data,
int *got_frame,
2636 int buf_size = avpkt->
size;
2644 #if CONFIG_THEORA_DECODER 2650 av_log(avctx,
AV_LOG_ERROR,
"midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2664 }
else if (type == 2) {
2677 "Header packet passed to frame decoder, skipping\n");
2689 for (i = 0; i < 3; i++)
2701 s->
keyframe ?
"key" :
"", avctx->frame_number + 1, s->
qps[0]);
2734 if (avctx->frame_number == 0)
2736 "VP version: %d\n", s->
version);
2742 "Warning, unsupported keyframe coding type?!\n");
2745 #if CONFIG_VP4_DECODER 2747 int mb_height, mb_width;
2748 int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2760 if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2771 "vp3: first frame not a keyframe\n");
2793 #if CONFIG_VP4_DECODER 2795 if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2819 #if CONFIG_VP4_DECODER 2821 if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2828 for (
i = 0;
i < 3;
i++) {
2842 for (
i = 0;
i < 3;
i++) {
2887 ff_dlog(avctx,
"hti %d hbits %x token %d entry : %d size %d\n",
2910 #if CONFIG_THEORA_DECODER 2918 int visible_width, visible_height, colorspace;
2919 uint8_t offset_x = 0, offset_y = 0;
2933 if (s->
theora < 0x030200) {
2936 "Old (<alpha3) Theora bitstream, flipped image\n");
2944 if (s->
theora >= 0x030200) {
2954 visible_width + offset_x > s->
width ||
2955 visible_height + offset_y > s->
height) {
2957 "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2958 visible_width, visible_height, offset_x, offset_y,
2965 if (fps.
num && fps.
den) {
2966 if (fps.
num < 0 || fps.
den < 0) {
2971 fps.
den, fps.
num, 1 << 30);
2976 if (aspect.
num && aspect.
den) {
2979 aspect.
num, aspect.
den, 1 << 30);
2983 if (s->
theora < 0x030200)
2990 if (s->
theora >= 0x030200) {
3005 avctx->
width = visible_width;
3006 avctx->
height = visible_height;
3013 if (colorspace == 1)
3015 else if (colorspace == 2)
3018 if (colorspace == 1 || colorspace == 2) {
3030 int i, n, matrices, inter, plane;
3035 if (s->
theora >= 0x030200) {
3039 for (i = 0; i < 64; i++)
3043 if (s->
theora >= 0x030200)
3048 for (i = 0; i < 64; i++)
3051 if (s->
theora >= 0x030200)
3056 for (i = 0; i < 64; i++)
3060 if (s->
theora >= 0x030200)
3065 if (matrices > 384) {
3070 for (n = 0; n < matrices; n++)
3071 for (i = 0; i < 64; i++)
3074 for (inter = 0; inter <= 1; inter++) {
3075 for (plane = 0; plane <= 2; plane++) {
3077 if (inter || plane > 0)
3085 qtj = (3 * inter + plane - 1) / 3;
3086 plj = (plane + 2) % 3;
3099 if (i >= matrices) {
3101 "invalid base matrix index\n");
3108 s->
qr_size[inter][plane][qri++] =
i;
3122 for (s->
hti = 0; s->
hti < 80; s->
hti++) {
3145 const uint8_t *header_start[3];
3160 42, header_start, header_len) < 0) {
3165 for (i = 0; i < 3; i++) {
3166 if (header_len[i] <= 0)
3174 if (!(ptype & 0x80)) {
3197 "Unknown Theora config packet: %d\n", ptype & ~0x80);
3202 "%d bits left in packet %X\n",
3204 if (s->
theora < 0x030200)
3217 .
init = theora_decode_init,
3244 #if CONFIG_VP4_DECODER
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
uint8_t idct_scantable[64]
discard all frames except keyframes
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_NUM_DATA_POINTERS
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments, macroblocks <-> fragments, superblocks <-> macroblocks.
This structure describes decoded (raw) audio or video data.
#define TOKEN_EOB(eob_run)
static void render_slice(Vp3DecodeContext *s, int slice)
static const uint16_t vp4_dc_bias[16][32][2]
static void flush(AVCodecContext *avctx)
int bounding_values_array[256+2]
int coded_width
Bitstream width / height, may be different from width/height e.g.
void(* put_no_rnd_pixels_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
Copy 8xH pixels from source to destination buffer using a bilinear filter with no rounding (i...
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
uint16_t qr_base[2][3][64]
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static av_cold int init(AVCodecContext *avctx)
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
static const uint16_t fragment_run_length_vlc_table[30][2]
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
#define avpriv_request_sample(...)
#define MODE_INTER_PLUS_MV
static av_cold int init_frames(Vp3DecodeContext *s)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
static const uint8_t zero_run_base[32]
void(* v_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_cold int vp3_decode_init(AVCodecContext *avctx)
static const uint8_t coeff_get_bits[32]
int num_kf_coded_fragment[3]
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
static const uint16_t vp4_ac_scale_factor[64]
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
static av_cold int vp3_decode_end(AVCodecContext *avctx)
static void error(const char *err)
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static const uint16_t vp4_ac_bias_0[16][32][2]
int * superblock_fragments
VLC superblock_run_length_vlc
#define MAXIMUM_LONG_BIT_RUN
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static const uint16_t ac_bias_3[16][32][2]
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
static const uint16_t dc_bias[16][32][2]
Vp3Fragment * all_fragments
static void init_loop_filter(Vp3DecodeContext *s)
#define COMPATIBLE_FRAME(x)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define TOKEN_ZERO_RUN(coeff, zero_run)
#define FF_DEBUG_PICT_INFO
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
static av_cold int end(AVCodecContext *avctx)
static int FUNC() huffman_table(CodedBitstreamContext *ctx, RWContext *rw, JPEGRawHuffmanTable *current)
Multithreading support functions.
uint8_t idct_permutation[64]
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
#define u(width, name, range_min, range_max)
static void vp3_decode_flush(AVCodecContext *avctx)
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
uint8_t filter_limit_values[64]
static int get_bits_count(const GetBitContext *s)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
bitstream reader API header.
static const uint8_t vp31_intra_c_dequant[64]
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
static const uint8_t mode_code_vlc_table[8][2]
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
static const uint16_t table[]
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
static void body(uint32_t ABCD[4], const uint8_t *src, int nblocks)
static const uint16_t ac_bias_1[16][32][2]
static const uint8_t vp4_pred_block_type_map[8]
static int get_bits_left(GetBitContext *gb)
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static const uint8_t motion_vector_vlc_table[63][2]
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define CODING_MODE_COUNT
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
void(* idct_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
static const int8_t fixed_motion_vector_table[64]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int flags
AV_CODEC_FLAG_*.
void(* h_loop_filter)(uint8_t *src, ptrdiff_t stride, int *bounding_values)
AVCodec ff_theora_decoder
static av_cold void free_tables(AVCodecContext *avctx)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static const uint8_t offset[127][2]
uint16_t coded_dc_scale_factor[2][64]
static const int ModeAlphabet[6][CODING_MODE_COUNT]
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
static const uint8_t *const tables[]
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
static const int16_t *const coeff_tables[32]
unsigned char * macroblock_coding
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
enum AVPictureType pict_type
Picture type of the frame.
static const uint8_t vp31_intra_y_dequant[64]
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define FF_THREAD_FRAME
Decode more than one frame at once.
VLC fragment_run_length_vlc
int width
picture width / height.
#define SB_PARTIALLY_CODED
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
uint8_t * edge_emu_buffer
static const uint8_t vp4_mv_table_selector[32]
static const uint16_t vp4_ac_bias_3[16][32][2]
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
static const uint16_t vp31_ac_scale_factor[64]
static const int8_t motion_vector_table[63]
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use. ...
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static const uint16_t ac_bias_2[16][32][2]
static const uint8_t hilbert_offset[16][2]
static const uint8_t vp4_y_dc_scale_factor[64]
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
int total_num_coded_frags
void(* idct_dc_add)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
static const int8_t transform[32][32]
static const uint8_t vp4_filter_limit_values[64]
#define AV_LOG_INFO
Standard information.
static const uint8_t vp4_block_pattern_table_selector[14]
Libavcodec external API header.
static const uint16_t ac_bias_0[16][32][2]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
ThreadFrame current_frame
main external API structure.
static const uint8_t vp4_generic_dequant[64]
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
uint8_t qr_size[2][3][64]
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
static unsigned int get_bits1(GetBitContext *s)
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
static void skip_bits(GetBitContext *s, int n)
enum AVColorSpace colorspace
YUV colorspace type.
Rational number (pair of numerator and denominator).
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
int * nkf_coded_fragment_list
const uint8_t ff_zigzag_direct[64]
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
#define TOKEN_COEFF(coeff)
static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
static VLC_TYPE vlc_tables[VLC_TABLES_SIZE][2]
static const uint8_t vp31_dc_scale_factor[64]
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
static const uint16_t superblock_run_length_vlc_table[34][2]
#define MODE_USING_GOLDEN
uint32_t huffman_table[80][32][2]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
#define MODE_INTER_FOURMV
static const uint8_t vp4_uv_dc_scale_factor[64]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static const struct @167 eob_run_table[7]
static const uint8_t vp4_block_pattern_vlc[2][14][2]
static int theora_header(AVFormatContext *s, int idx)
static const uint16_t vp4_ac_bias_2[16][32][2]
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
int * coded_fragment_list[3]
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static const uint8_t vp31_inter_dequant[64]
unsigned char * superblock_coding
common internal api header.
int16_t * dct_tokens_base
#define bit(string, value)
static int get_eob_run(GetBitContext *gb, int token)
static const uint16_t vp4_ac_bias_1[16][32][2]
Core video DSP helper functions.
uint8_t base_matrix[384][64]
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
int * kf_coded_fragment_list
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
VLC_TYPE(* table)[2]
code, bits
int key_frame
1 -> keyframe, 0-> not
static const double coeff[2][5]
int flags2
AV_CODEC_FLAG2_*.
#define MODE_INTER_PRIOR_LAST
VP4Predictor * dc_pred_row
static const uint16_t vp4_mv_vlc[2][7][63][2]
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
#define MODE_INTER_LAST_MV
#define av_malloc_array(a, b)
void(* idct_put)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
static const uint8_t vp31_filter_limit_values[64]
#define MKTAG(a, b, c, d)
AVPixelFormat
Pixel format.
This structure stores compressed data.
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
void ff_free_vlc(VLC *vlc)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
uint32_t coded_ac_scale_factor[64]
static const uint8_t zero_run_get_bits[32]
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
int8_t(*[2] motion_val)[2]