40 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
41 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
42 #elif CONFIG_VP7_DECODER
43 #define VPX(vp7, f) vp7_ ## f
44 #else // CONFIG_VP8_DECODER
45 #define VPX(vp7, f) vp8_ ## f
87 #if CONFIG_VP8_DECODER
130 for (i = 0; i < 5; i++)
224 for (i = 0; i < 4; i++)
227 for (i = 0; i < 4; i++)
231 for (i = 0; i < 3; i++)
240 for (i = 0; i < 4; i++) {
273 if (buf_size - size < 0)
316 for (i = 0; i < 4; i++) {
369 for (i = 0; i < 4; i++)
370 for (j = 0; j < 16; j++)
380 for (i = 0; i < 4; i++)
381 for (j = 0; j < 8; j++)
382 for (k = 0; k < 3; k++)
391 #define VP7_MVC_SIZE 17
392 #define VP8_MVC_SIZE 19
401 for (i = 0; i < 4; i++)
404 for (i = 0; i < 3; i++)
408 for (i = 0; i < 2; i++)
409 for (j = 0; j < mvc_size; j++)
429 for (j = 1; j < 3; j++) {
430 for (i = 0; i < height / 2; i++)
437 const uint8_t *src,
int src_linesize,
442 for (j = 0; j <
height; j++) {
443 for (i = 0; i <
width; i++) {
444 uint8_t y = src[j * src_linesize + i];
445 dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
456 if (!s->
keyframe && (alpha || beta)) {
483 width, height, alpha, beta);
492 int part1_size, hscale, vscale, i, j,
ret;
496 s->
profile = (buf[0] >> 1) & 7;
504 part1_size =
AV_RL24(buf) >> 4;
506 if (buf_size < 4 - s->
profile + part1_size) {
518 buf_size -= part1_size;
526 if (hscale || vscale)
535 for (i = 0; i < 2; i++)
547 for (i = 0; i < 4; i++) {
552 for (j = 0; j < 3; j++)
557 for (j = 0; j < 4; j++)
611 for (i = 1; i < 16; i++)
638 int header_size, hscale, vscale,
ret;
650 header_size =
AV_RL24(buf) >> 5;
664 if (header_size > buf_size - 7 * s->
keyframe) {
670 if (
AV_RL24(buf) != 0x2a019d) {
672 "Invalid start code 0x%x\n",
AV_RL24(buf));
675 width =
AV_RL16(buf + 3) & 0x3fff;
676 height =
AV_RL16(buf + 5) & 0x3fff;
677 hscale = buf[4] >> 6;
678 vscale = buf[6] >> 6;
682 if (hscale || vscale)
699 buf_size -= header_size;
765 dst->
x = av_clip(src->
x, av_clip(s->
mv_min.
x, INT16_MIN, INT16_MAX),
766 av_clip(s->
mv_max.
x, INT16_MIN, INT16_MAX));
767 dst->
y = av_clip(src->
y, av_clip(s->
mv_min.
y, INT16_MIN, INT16_MAX),
768 av_clip(s->
mv_max.
y, INT16_MIN, INT16_MAX));
781 for (i = 0; i < 3; i++)
783 for (i = (vp7 ? 7 : 9); i > 3; i--)
838 const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
848 top_mv = top_mb->
bmv;
864 for (n = 0; n < num; n++) {
866 uint32_t left, above;
870 left =
AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
872 left =
AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
874 above =
AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
876 above =
AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
913 int xoffset,
int yoffset,
int boundary,
914 int *edge_x,
int *edge_y)
916 int vwidth = mb_width + 1;
917 int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
918 if (
new < boundary ||
new % vwidth == vwidth - 1)
920 *edge_y =
new / vwidth;
921 *edge_x =
new % vwidth;
932 int mb_x,
int mb_y,
int layout)
935 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
936 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
960 if (
AV_RN32A(&near_mv[CNT_NEAREST])) {
961 if (mv ==
AV_RN32A(&near_mv[CNT_NEAREST])) {
963 }
else if (
AV_RN32A(&near_mv[CNT_NEAR])) {
964 if (mv !=
AV_RN32A(&near_mv[CNT_NEAR]))
972 AV_WN32A(&near_mv[CNT_NEAREST], mv);
993 if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
994 AV_WN32A(&mb->
mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 :
AV_RN32A(&near_mv[CNT_NEAREST]));
1004 mb->
bmv[0] = mb->
mv;
1007 mb->
mv = near_mv[CNT_NEAR];
1008 mb->
bmv[0] = mb->
mv;
1011 mb->
mv = near_mv[CNT_NEAREST];
1012 mb->
bmv[0] = mb->
mv;
1017 mb->
bmv[0] = mb->
mv;
1023 int mb_x,
int mb_y,
int layout)
1028 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1029 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1038 mb_edge[0] = mb + 2;
1039 mb_edge[2] = mb + 1;
1050 #define MV_EDGE_CHECK(n) \
1052 VP8Macroblock *edge = mb_edge[n]; \
1053 int edge_ref = edge->ref_frame; \
1054 if (edge_ref != VP56_FRAME_CURRENT) { \
1055 uint32_t mv = AV_RN32A(&edge->mv); \
1057 if (cur_sign_bias != sign_bias[edge_ref]) { \
1060 mv = ((mv & 0x7fff7fff) + \
1061 0x00010001) ^ (mv & 0x80008000); \
1063 if (!n || mv != AV_RN32A(&near_mv[idx])) \
1064 AV_WN32A(&near_mv[++idx], mv); \
1065 cnt[idx] += 1 + (n != 2); \
1067 cnt[CNT_ZERO] += 1 + (n != 2); \
1080 if (cnt[CNT_SPLITMV] &&
1081 AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) ==
AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1082 cnt[CNT_NEAREST] += 1;
1085 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1087 FFSWAP(
VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1093 clamp_mv(s, &mb->
mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1104 mb->
bmv[0] = mb->
mv;
1108 mb->
bmv[0] = mb->
mv;
1112 mb->
bmv[0] = mb->
mv;
1117 mb->
bmv[0] = mb->
mv;
1123 int mb_x,
int keyframe,
int layout)
1139 for (y = 0; y < 4; y++) {
1140 for (x = 0; x < 4; x++) {
1144 left[
y] = top[x] = *intra4x4;
1150 for (i = 0; i < 16; i++)
1161 const char *vp7_feature_name[] = {
"q-index",
1163 "partial-golden-update",
1168 for (i = 0; i < 4; i++) {
1174 "Feature %s present in macroblock (value 0x%x)\n",
1183 *segment = ref ? *ref : *segment;
1250 int i,
uint8_t *token_prob, int16_t qmul[2],
1251 const uint8_t scan[16],
int vp7)
1265 token_prob = probs[i][0];
1273 token_prob = probs[i + 1][1];
1293 int cat = (a << 1) + b;
1294 coeff = 3 + (8 << cat);
1298 token_prob = probs[i + 1][2];
1310 int16_t
dc = block[0];
1319 block[0] = pred[0] =
dc;
1324 block[0] = pred[0] =
dc;
1338 token_prob, qmul, scan,
IS_VP7);
1341 #ifndef vp8_decode_block_coeffs_internal
1369 int i,
int zero_nhood, int16_t qmul[2],
1370 const uint8_t scan[16],
int vp7)
1372 uint8_t *token_prob = probs[i][zero_nhood];
1376 token_prob, qmul, scan)
1386 int i, x,
y, luma_start = 0, luma_ctx = 3;
1387 int nnz_pred, nnz, nnz_total = 0;
1392 nnz_pred = t_nnz[8] + l_nnz[8];
1398 l_nnz[8] = t_nnz[8] = !!nnz;
1418 for (y = 0; y < 4; y++)
1419 for (x = 0; x < 4; x++) {
1420 nnz_pred = l_nnz[
y] + t_nnz[x];
1423 luma_start, nnz_pred,
1429 t_nnz[x] = l_nnz[
y] = !!nnz;
1436 for (i = 4; i < 6; i++)
1437 for (y = 0; y < 2; y++)
1438 for (x = 0; x < 2; x++) {
1439 nnz_pred = l_nnz[i + 2 *
y] + t_nnz[i + 2 * x];
1445 t_nnz[i + 2 * x] = l_nnz[i + 2 *
y] = !!nnz;
1459 int linesize,
int uvlinesize,
int simple)
1461 AV_COPY128(top_border, src_y + 15 * linesize);
1463 AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1464 AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1470 uint8_t *src_cr,
int linesize,
int uvlinesize,
int mb_x,
1471 int mb_y,
int mb_width,
int simple,
int xchg)
1473 uint8_t *top_border_m1 = top_border - 32;
1475 src_cb -= uvlinesize;
1476 src_cr -= uvlinesize;
1478 #define XCHG(a, b, xchg) \
1486 XCHG(top_border_m1 + 8, src_y - 8, xchg);
1487 XCHG(top_border, src_y, xchg);
1488 XCHG(top_border + 8, src_y + 8, 1);
1489 if (mb_x < mb_width - 1)
1490 XCHG(top_border + 32, src_y + 16, 1);
1494 if (!simple || !mb_y) {
1495 XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1496 XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1497 XCHG(top_border + 16, src_cb, 1);
1498 XCHG(top_border + 24, src_cr, 1);
1548 int *copy_buf,
int vp7)
1552 if (!mb_x && mb_y) {
1586 int x,
y,
mode, nnz;
1602 const uint8_t lo = is_vp7 ? 128 : 127;
1603 const uint8_t hi = is_vp7 ? 128 : 129;
1604 uint8_t tr_top[4] = { lo, lo, lo, lo };
1612 if (mb_y && mb_x == s->
mb_width - 1) {
1613 tr = tr_right[-1] * 0x01010101
u;
1620 for (y = 0; y < 4; y++) {
1622 for (x = 0; x < 4; x++) {
1627 if ((y == 0 || x == 3) && mb_y == 0) {
1630 topright = tr_right;
1633 mb_y + y, ©, is_vp7);
1635 dst = copy_dst + 12;
1639 AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1645 copy_dst[3] = ptr[4 * x - s->
linesize - 1];
1654 copy_dst[11] = ptr[4 * x - 1];
1655 copy_dst[19] = ptr[4 * x + s->
linesize - 1];
1656 copy_dst[27] = ptr[4 * x + s->
linesize * 2 - 1];
1657 copy_dst[35] = ptr[4 * x + s->
linesize * 3 - 1];
1686 mb_x, mb_y, is_vp7);
1697 { 0, 1, 2, 1, 2, 1, 2, 1 },
1699 { 0, 3, 5, 3, 5, 3, 5, 3 },
1700 { 0, 2, 3, 2, 3, 2, 3, 2 },
1722 int x_off,
int y_off,
int block_w,
int block_h,
1729 int src_linesize = linesize;
1731 int mx = (mv->
x * 2) & 7, mx_idx = subpel_idx[0][mx];
1732 int my = (mv->
y * 2) & 7, my_idx = subpel_idx[0][my];
1734 x_off += mv->
x >> 2;
1735 y_off += mv->
y >> 2;
1739 src += y_off * linesize + x_off;
1740 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1741 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1743 src - my_idx * linesize - mx_idx,
1745 block_w + subpel_idx[1][mx],
1746 block_h + subpel_idx[1][my],
1747 x_off - mx_idx, y_off - my_idx,
1752 mc_func[my_idx][mx_idx](dst, linesize,
src, src_linesize, block_h, mx, my);
1755 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1756 linesize, block_h, 0, 0);
1780 int x_off,
int y_off,
int block_w,
int block_h,
1787 int mx = mv->
x & 7, mx_idx = subpel_idx[0][mx];
1788 int my = mv->
y & 7, my_idx = subpel_idx[0][my];
1790 x_off += mv->
x >> 3;
1791 y_off += mv->
y >> 3;
1794 src1 += y_off * linesize + x_off;
1795 src2 += y_off * linesize + x_off;
1797 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1798 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1800 src1 - my_idx * linesize - mx_idx,
1802 block_w + subpel_idx[1][mx],
1803 block_h + subpel_idx[1][my],
1804 x_off - mx_idx, y_off - my_idx, width, height);
1806 mc_func[my_idx][mx_idx](dst1, linesize, src1,
EDGE_EMU_LINESIZE, block_h, mx, my);
1809 src2 - my_idx * linesize - mx_idx,
1810 EDGE_EMU_LINESIZE, linesize,
1811 block_w + subpel_idx[1][mx],
1812 block_h + subpel_idx[1][my],
1813 x_off - mx_idx, y_off - my_idx, width, height);
1815 mc_func[my_idx][mx_idx](dst2, linesize, src2,
EDGE_EMU_LINESIZE, block_h, mx, my);
1817 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1818 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1822 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1823 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1830 int bx_off,
int by_off,
int block_w,
int block_h,
1837 ref_frame, mv, x_off + bx_off, y_off + by_off,
1838 block_w, block_h, width, height, s->
linesize,
1857 dst[2] + by_off * s->
uvlinesize + bx_off, ref_frame,
1858 &uvmv, x_off + bx_off, y_off + by_off,
1859 block_w, block_h, width, height, s->
uvlinesize,
1870 if (s->
ref_count[ref - 1] > (mb_xy >> 5)) {
1871 int x_off = mb_x << 4, y_off = mb_y << 4;
1872 int mx = (mb->
mv.
x >> 2) + x_off + 8;
1873 int my = (mb->
mv.
y >> 2) + y_off;
1875 int off = mx + (my + (mb_x & 3) * 4) * s->
linesize + 64;
1880 off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->
uvlinesize + 64;
1892 int x_off = mb_x << 4, y_off = mb_y << 4;
1900 0, 0, 16, 16, width,
height, &mb->
mv);
1907 for (y = 0; y < 4; y++) {
1908 for (x = 0; x < 4; x++) {
1910 ref, &bmv[4 * y + x],
1911 4 * x + x_off, 4 * y + y_off, 4, 4,
1922 for (y = 0; y < 2; y++) {
1923 for (x = 0; x < 2; x++) {
1924 uvmv.
x = mb->
bmv[2 * y * 4 + 2 * x ].
x +
1925 mb->
bmv[2 * y * 4 + 2 * x + 1].
x +
1926 mb->
bmv[(2 * y + 1) * 4 + 2 * x ].x +
1927 mb->
bmv[(2 * y + 1) * 4 + 2 * x + 1].
x;
1928 uvmv.
y = mb->
bmv[2 * y * 4 + 2 * x ].
y +
1929 mb->
bmv[2 * y * 4 + 2 * x + 1].
y +
1930 mb->
bmv[(2 * y + 1) * 4 + 2 * x ].y +
1931 mb->
bmv[(2 * y + 1) * 4 + 2 * x + 1].
y;
1940 &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1949 0, 0, 16, 8, width,
height, &bmv[0]);
1951 0, 8, 16, 8, width,
height, &bmv[1]);
1955 0, 0, 8, 16, width,
height, &bmv[0]);
1957 8, 0, 8, 16, width,
height, &bmv[1]);
1961 0, 0, 8, 8, width,
height, &bmv[0]);
1963 8, 0, 8, 8, width,
height, &bmv[1]);
1965 0, 8, 8, 8, width,
height, &bmv[2]);
1967 8, 8, 8, 8, width,
height, &bmv[3]);
1979 for (y = 0; y < 4; y++) {
1982 if (nnz4 & ~0x01010101) {
1983 for (x = 0; x < 4; x++) {
2004 for (ch = 0; ch < 2; ch++) {
2007 uint8_t *ch_dst = dst[1 + ch];
2008 if (nnz4 & ~0x01010101) {
2009 for (y = 0; y < 2; y++) {
2010 for (x = 0; x < 2; x++) {
2013 td->
block[4 + ch][(y << 1) + x],
2017 td->
block[4 + ch][(y << 1) + x],
2021 goto chroma_idct_end;
2038 int interior_limit, filter_level;
2052 filter_level = av_clip_uintp2(filter_level, 6);
2054 interior_limit = filter_level;
2059 interior_limit =
FFMAX(interior_limit, 1);
2069 int mb_x,
int mb_y,
int is_vp7)
2071 int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2077 static const uint8_t hev_thresh_lut[2][64] = {
2078 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2079 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2080 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2082 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2083 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2084 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2092 bedge_lim_y = filter_level;
2093 bedge_lim_uv = filter_level * 2;
2094 mbedge_lim = filter_level + 2;
2097 bedge_lim_uv = filter_level * 2 + inner_limit;
2098 mbedge_lim = bedge_lim_y + 4;
2101 hev_thresh = hev_thresh_lut[s->
keyframe][filter_level];
2105 mbedge_lim, inner_limit, hev_thresh);
2107 mbedge_lim, inner_limit, hev_thresh);
2110 #define H_LOOP_FILTER_16Y_INNER(cond) \
2111 if (cond && inner_filter) { \
2112 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2113 bedge_lim_y, inner_limit, \
2115 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2116 bedge_lim_y, inner_limit, \
2118 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2119 bedge_lim_y, inner_limit, \
2121 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2122 uvlinesize, bedge_lim_uv, \
2123 inner_limit, hev_thresh); \
2130 mbedge_lim, inner_limit, hev_thresh);
2132 mbedge_lim, inner_limit, hev_thresh);
2137 linesize, bedge_lim_y,
2138 inner_limit, hev_thresh);
2140 linesize, bedge_lim_y,
2141 inner_limit, hev_thresh);
2143 linesize, bedge_lim_y,
2144 inner_limit, hev_thresh);
2146 dst[2] + 4 * uvlinesize,
2147 uvlinesize, bedge_lim_uv,
2148 inner_limit, hev_thresh);
2158 int mbedge_lim, bedge_lim;
2167 bedge_lim = 2 * filter_level + inner_limit;
2168 mbedge_lim = bedge_lim + 4;
2187 #define MARGIN (16 << 2)
2197 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
2199 ((s->
mb_width + 1) * (mb_y + 1) + 1);
2206 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
2211 prev_frame && prev_frame->
seg_map ?
2234 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2236 int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2237 if (otd->thread_mb_pos < tmp) { \
2238 pthread_mutex_lock(&otd->lock); \
2239 td->wait_mb_pos = tmp; \
2241 if (otd->thread_mb_pos >= tmp) \
2243 pthread_cond_wait(&otd->cond, &otd->lock); \
2245 td->wait_mb_pos = INT_MAX; \
2246 pthread_mutex_unlock(&otd->lock); \
2250 #define update_pos(td, mb_y, mb_x) \
2252 int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2253 int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2255 int is_null = !next_td || !prev_td; \
2256 int pos_check = (is_null) ? 1 \
2257 : (next_td != td && \
2258 pos >= next_td->wait_mb_pos) || \
2260 pos >= prev_td->wait_mb_pos); \
2261 td->thread_mb_pos = pos; \
2262 if (sliced_threading && pos_check) { \
2263 pthread_mutex_lock(&td->lock); \
2264 pthread_cond_broadcast(&td->cond); \
2265 pthread_mutex_unlock(&td->lock); \
2269 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2270 #define update_pos(td, mb_y, mb_x) while(0)
2274 int jobnr,
int threadnr,
int is_vp7)
2279 int mb_x, mb_xy = mb_y * s->
mb_width;
2292 prev_td = &s->
thread_data[(jobnr + num_jobs - 1) % num_jobs];
2296 next_td = &s->
thread_data[(jobnr + 1) % num_jobs];
2306 memset(mb - 1, 0,
sizeof(*mb));
2310 if (!is_vp7 || mb_y == 0)
2316 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb_xy++, mb++) {
2318 if (prev_td != td) {
2319 if (threadnr != 0) {
2321 mb_x + (is_vp7 ? 2 : 1),
2322 mb_y - (is_vp7 ? 2 : 1));
2325 mb_x + (is_vp7 ? 2 : 1) + s->
mb_width + 3,
2326 mb_y - (is_vp7 ? 2 : 1));
2333 dst[2] - dst[1], 2);
2337 prev_frame && prev_frame->seg_map ?
2338 prev_frame->seg_map->data + mb_xy :
NULL, 0, is_vp7);
2369 if (s->
deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2395 int jobnr,
int threadnr)
2401 int jobnr,
int threadnr)
2407 int jobnr,
int threadnr,
int is_vp7)
2429 prev_td = &s->
thread_data[(jobnr + num_jobs - 1) % num_jobs];
2433 next_td = &s->
thread_data[(jobnr + 1) % num_jobs];
2435 for (mb_x = 0; mb_x < s->
mb_width; mb_x++, mb++) {
2439 (mb_x + 1) + (s->
mb_width + 3), mb_y - 1);
2444 if (num_jobs == 1) {
2456 filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2466 int jobnr,
int threadnr)
2472 int jobnr,
int threadnr)
2479 int threadnr,
int is_vp7)
2488 for (mb_y = jobnr; mb_y < s->
mb_height; mb_y += num_jobs) {
2508 int jobnr,
int threadnr)
2514 int jobnr,
int threadnr)
2525 int ret, i, referenced, num_jobs;
2554 for (i = 0; i < 5; i++)
2556 &s->
frames[i] != prev_frame &&
2579 "Discarding interframe without a prior keyframe!\n");
2584 curframe->tf.f->key_frame = s->
keyframe;
2611 s->
linesize = curframe->tf.f->linesize[0];
2684 #if CONFIG_VP7_DECODER
2755 #if CONFIG_VP7_DECODER
2767 #if CONFIG_VP8_DECODER
2783 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2798 s->
prob[0] = s_src->
prob[!s_src->update_probabilities];
2804 if (s_src->frames[i].tf.f->data[0]) {
2805 int ret = vp8_ref_frame(s, &s->
frames[i], &s_src->frames[i]);
2811 s->
framep[0] = REBASE(s_src->next_framep[0]);
2812 s->
framep[1] = REBASE(s_src->next_framep[1]);
2813 s->
framep[2] = REBASE(s_src->next_framep[2]);
2814 s->
framep[3] = REBASE(s_src->next_framep[3]);
2820 #if CONFIG_VP7_DECODER
2827 .
init = vp7_decode_init,
2829 .
decode = vp7_decode_frame,
2835 #if CONFIG_VP8_DECODER
VP8Macroblock * macroblocks
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
static const uint8_t vp8_submv_prob[5][3]
static const uint16_t vp7_ydc_qlookup[]
const struct AVCodec * codec
discard all frames except keyframes
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const uint8_t vp7_mv_default_prob[2][17]
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
(only used in prediction) no split MVs
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static const uint8_t vp7_pred4x4_mode[]
int8_t sign_bias[4]
one state [0, 1] per ref frame type
int coded_width
Bitstream width / height, may be different from width/height e.g.
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
#define AV_LOG_WARNING
Something somehow does not look correct.
#define VP7_MV_PRED_COUNT
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
uint8_t feature_value[4][4]
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
static av_always_inline void decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
uint8_t * intra4x4_pred_mode_top
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
static void vp8_decode_flush(AVCodecContext *avctx)
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
struct VP8Context::@94 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
#define FF_ARRAY_ELEMS(a)
static const int8_t vp8_pred8x8c_tree[3][2]
static const uint16_t vp7_y2dc_qlookup[]
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
#define CONFIG_VP7_DECODER
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
static const uint8_t vp8_pred8x8c_prob_inter[3]
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
static const uint8_t vp8_mbsplits[5][16]
enum AVDiscard skip_frame
Skip decoding for selected frames.
static const int8_t vp8_pred16x16_tree_intra[4][2]
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
uint8_t intra4x4_pred_mode_top[4]
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
static int vp7_update_dimensions(VP8Context *s, int width, int height)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int fade_present
Fade bit present in bitstream (VP7)
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Multithreading support functions.
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const uint8_t vp8_mv_update_prob[2][19]
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
int update_last
update VP56_FRAME_PREVIOUS with the current one
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
static void parse_segment_info(VP8Context *s)
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
static void fade(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int width, int height, int alpha, int beta)
vp8_mc_func put_pixels_tab[3][3][3]
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
uint8_t feature_index_prob[4][3]
uint8_t intra4x4_pred_mode_mb[16]
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
uint8_t intra4x4_pred_mode_left[4]
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
uint8_t colorspace
0 is the only value allowed (meaning bt601)
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
static const uint8_t vp8_mbsplit_count[4]
static double alpha(void *priv, double x, double y)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const int8_t vp8_coeff_band_indexes[8][10]
static const uint8_t vp8_pred4x4_mode[]
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
static const uint8_t vp8_dct_cat2_prob[]
static const uint8_t vp8_mv_default_prob[2][19]
static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static const int sizes[][2]
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static const uint8_t vp8_mbfirstidx[4][16]
#define CONFIG_VP8_DECODER
#define EDGE_EMU_LINESIZE
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
const char * name
Name of the codec implementation.
VP8Macroblock * macroblocks_base
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
static const uint8_t vp8_pred4x4_prob_inter[9]
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
static const int vp7_mode_contexts[31][4]
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static void vp7_get_quants(VP8Context *s)
Libavcodec external API header.
struct VP8Context::@93 filter
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static const uint8_t vp8_pred16x16_prob_inter[4]
static void vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
useful rectangle filling function
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
#define H_LOOP_FILTER_16Y_INNER(cond)
uint8_t feature_present_prob[4]
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
uint8_t fullrange
whether we can skip clamping in dsp functions
struct VP8Context::@95 lf_delta
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
int width
picture width / height.
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_cold int vp8_init_frames(VP8Context *s)
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
int16_t luma_dc_qmul[2]
luma dc-only block quant
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
uint8_t(* top_border)[16+8+8]
struct VP8Context::@92 segmentation
Base parameters for segmentation, i.e.
struct VP8Context::@96 prob[2]
These are all of the updatable probabilities for binary decisions.
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
static const int8_t vp7_feature_index_tree[4][2]
static const uint8_t vp7_feature_value_size[2][4]
#define vp56_rac_get_prob
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
static void flush(AVCodecContext *avctx)
the normal 2^n-1 "JPEG" YUV ranges
static const float pred[4]
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
static const int8_t mv[256][2]
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
static const uint8_t vp8_pred8x8c_prob_intra[3]
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
static const uint8_t zigzag_scan[16+1]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
main external API structure.
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
uint8_t * data
The data buffer.
VP8Frame * next_framep[4]
int mb_layout
This describes the macroblock memory layout.
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
static const uint8_t vp8_mbsplit_prob[3]
VP56RangeCoder c
header context, includes mb modes and motion vectors
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
VP56RangeCoder coeff_partition[8]
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static const int8_t vp8_pred16x16_tree_inter[4][2]
BYTE int const BYTE int int int height
#define FF_THREAD_FRAME
Decode more than one frame at once.
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
static int vp8_update_dimensions(VP8Context *s, int width, int height)
VP8FilterStrength * filter_strength
enum AVColorSpace colorspace
YUV colorspace type.
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
static void vp78_update_probability_tables(VP8Context *s)
static const int8_t vp8_pred4x4_tree[9][2]
uint8_t enabled
whether each mb can have a different strength based on mode/ref
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
static const uint8_t subpel_idx[3][8]
static void update_refs(VP8Context *s)
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
static const uint8_t vp8_coeff_band[16]
int allocate_progress
Whether to allocate progress for frame threading.
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
static const uint8_t vp8_pred16x16_prob_intra[4]
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
uint8_t feature_enabled[4]
Macroblock features (VP7)
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
2 8x16 blocks (horizontal)
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
the normal 219*2^(n-8) "MPEG" YUV ranges
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
discard all non reference
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
void(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
common internal api header.
static void vp8_get_quants(VP8Context *s)
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LOCAL_ALIGNED(a, t, v,...)
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
static const SiprModeParam modes[MODE_COUNT]
int(* update_thread_context)(AVCodecContext *dst, const AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
#define update_pos(td, mb_y, mb_x)
struct AVCodecInternal * internal
Private context used for internal data.
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
static const double coeff[2][5]
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
static int init_thread_copy(AVCodecContext *avctx)
int8_t filter_level[4]
base loop filter level
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static const int vp8_mode_contexts[6][4]
static const uint8_t vp8_dct_cat1_prob[]
#define FFSWAP(type, a, b)
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
void ff_vp8dsp_init(VP8DSPContext *c)
static void vp78_reset_probability_tables(VP8Context *s)
This structure stores compressed data.
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
const uint8_t *const ff_vp8_dct_cat_prob[]
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
VP8ThreadData * thread_data
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
static const uint16_t vp7_y2ac_qlookup[]
static const uint8_t vp7_submv_prob[3]
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)