47 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
92 int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
93 ((height >> log2_min_cb_size) + 1);
147 uint8_t luma_weight_l0_flag[16];
148 uint8_t chroma_weight_l0_flag[16];
149 uint8_t luma_weight_l1_flag[16];
150 uint8_t chroma_weight_l1_flag[16];
151 int luma_log2_weight_denom;
154 if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
160 int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)
get_se_golomb(gb);
161 if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
170 if (!luma_weight_l0_flag[i]) {
177 chroma_weight_l0_flag[i] =
get_bits1(gb);
180 chroma_weight_l0_flag[i] = 0;
183 if (luma_weight_l0_flag[i]) {
185 if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
190 if (chroma_weight_l0_flag[i]) {
191 for (j = 0; j < 2; j++) {
195 if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
196 || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
214 if (!luma_weight_l1_flag[i]) {
221 chroma_weight_l1_flag[i] =
get_bits1(gb);
224 chroma_weight_l1_flag[i] = 0;
227 if (luma_weight_l1_flag[i]) {
229 if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
234 if (chroma_weight_l1_flag[i]) {
235 for (j = 0; j < 2; j++) {
239 if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
240 || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
263 int prev_delta_msb = 0;
264 unsigned int nb_sps = 0, nb_sh;
282 for (i = 0; i < rps->
nb_refs; i++) {
302 if (i && i != nb_sps)
303 delta += prev_delta_msb;
309 prev_delta_msb =
delta;
322 unsigned int num = 0, den = 0;
327 avctx->
width = sps->
width - ow->left_offset - ow->right_offset;
328 avctx->
height = sps->
height - ow->top_offset - ow->bottom_offset;
359 if (num != 0 && den != 0)
372 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \ 373 CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \ 374 CONFIG_HEVC_NVDEC_HWACCEL + \ 375 CONFIG_HEVC_VAAPI_HWACCEL + \ 376 CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \ 377 CONFIG_HEVC_VDPAU_HWACCEL) 383 #if CONFIG_HEVC_DXVA2_HWACCEL 386 #if CONFIG_HEVC_D3D11VA_HWACCEL 390 #if CONFIG_HEVC_VAAPI_HWACCEL 393 #if CONFIG_HEVC_VDPAU_HWACCEL 396 #if CONFIG_HEVC_NVDEC_HWACCEL 399 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 404 #if CONFIG_HEVC_DXVA2_HWACCEL 407 #if CONFIG_HEVC_D3D11VA_HWACCEL 411 #if CONFIG_HEVC_VAAPI_HWACCEL 414 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 417 #if CONFIG_HEVC_NVDEC_HWACCEL 422 #if CONFIG_HEVC_VDPAU_HWACCEL 425 #if CONFIG_HEVC_NVDEC_HWACCEL 431 #if CONFIG_HEVC_VAAPI_HWACCEL 438 #if CONFIG_HEVC_NVDEC_HWACCEL 474 for (i = 0; i < 3; i++) {
483 for(c_idx = 0; c_idx < c_count; c_idx++) {
549 if (sps->
width != last_sps->width || sps->
height != last_sps->height ||
551 last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
571 int slice_address_length;
581 "Invalid slice segment address: %u.\n",
630 "Ignoring POC change between slices: %d -> %d\n", s->
poc, poc);
646 int numbits, rps_idx;
654 rps_idx = numbits > 0 ?
get_bits(gb, numbits) : 0;
760 "Invalid collocated_ref_idx: %d.\n",
777 "Invalid number of merging MVP candidates: %d.\n",
799 int deblocking_filter_override_flag = 0;
802 deblocking_filter_override_flag =
get_bits1(gb);
804 if (deblocking_filter_override_flag) {
809 if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
810 tc_offset_div2 < -6 || tc_offset_div2 > 6) {
812 "Invalid deblock filter offsets: %d, %d\n",
813 beta_offset_div2, tc_offset_div2);
856 if (offset_len < 1 || offset_len > 32) {
892 for (i = 0; i < length; i++)
901 "The slice_qp %d is outside the valid range " 933 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)]) 935 #define SET_SAO(elem, value) \ 937 if (!sao_merge_up_flag && !sao_merge_left_flag) \ 939 else if (sao_merge_left_flag) \ 940 sao->elem = CTB(s->sao, rx-1, ry).elem; \ 941 else if (sao_merge_up_flag) \ 942 sao->elem = CTB(s->sao, rx, ry-1).elem; \ 950 int sao_merge_left_flag = 0;
951 int sao_merge_up_flag = 0;
961 if (ry > 0 && !sao_merge_left_flag) {
986 for (i = 0; i < 4; i++)
990 for (i = 0; i < 4; i++) {
999 }
else if (c_idx != 2) {
1005 for (i = 0; i < 4; i++) {
1013 sao->
offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1025 if (log2_res_scale_abs_plus1 != 0) {
1028 (1 - 2 * res_scale_sign_flag);
1038 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1039 int log2_cb_size,
int log2_trafo_size,
1040 int blk_idx,
int cbf_luma,
int *cbf_cb,
int *cbf_cr)
1043 const int log2_trafo_size_c = log2_trafo_size - s->
ps.
sps->
hshift[1];
1047 int trafo_size = 1 << log2_trafo_size;
1053 if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1057 int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1059 (cbf_cb[1] || cbf_cr[1]));
1071 "The cu_qp_delta %d is outside the valid range " 1085 if (cu_chroma_qp_offset_flag) {
1086 int cu_chroma_qp_offset_idx = 0;
1090 "cu_chroma_qp_offset_idx not yet tested.\n");
1124 int trafo_size_h = 1 << (log2_trafo_size_c + s->
ps.
sps->
hshift[1]);
1125 int trafo_size_v = 1 << (log2_trafo_size_c + s->
ps.
sps->
vshift[1]);
1136 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (i << log2_trafo_size_c), 1);
1140 log2_trafo_size_c, scan_idx_c, 1);
1148 int size = 1 << log2_trafo_size_c;
1152 for (i = 0; i < (size *
size); i++) {
1165 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (i << log2_trafo_size_c), 2);
1169 log2_trafo_size_c, scan_idx_c, 2);
1177 int size = 1 << log2_trafo_size_c;
1181 for (i = 0; i < (size *
size); i++) {
1188 int trafo_size_h = 1 << (log2_trafo_size + 1);
1189 int trafo_size_v = 1 << (log2_trafo_size + s->
ps.
sps->
vshift[1]);
1193 trafo_size_h, trafo_size_v);
1194 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (i << log2_trafo_size), 1);
1198 log2_trafo_size, scan_idx_c, 1);
1203 trafo_size_h, trafo_size_v);
1204 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (i << log2_trafo_size), 2);
1208 log2_trafo_size, scan_idx_c, 2);
1213 int trafo_size_h = 1 << (log2_trafo_size_c + s->
ps.
sps->
hshift[1]);
1214 int trafo_size_v = 1 << (log2_trafo_size_c + s->
ps.
sps->
vshift[1]);
1220 trafo_size_h, trafo_size_v);
1221 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 1);
1222 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 2);
1224 }
else if (blk_idx == 3) {
1225 int trafo_size_h = 1 << (log2_trafo_size + 1);
1226 int trafo_size_v = 1 << (log2_trafo_size + s->
ps.
sps->
vshift[1]);
1228 trafo_size_h, trafo_size_v);
1233 trafo_size_h, trafo_size_v);
1234 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1235 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1245 int cb_size = 1 << log2_cb_size;
1253 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1254 for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1255 s->
is_pcm[i + j * min_pu_width] = 2;
1259 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1260 int log2_cb_size,
int log2_trafo_size,
1261 int trafo_depth,
int blk_idx,
1262 const int *base_cbf_cb,
const int *base_cbf_cr)
1270 cbf_cb[0] = base_cbf_cb[0];
1271 cbf_cb[1] = base_cbf_cb[1];
1272 cbf_cr[0] = base_cbf_cr[0];
1273 cbf_cr[1] = base_cbf_cr[1];
1276 if (trafo_depth == 1) {
1292 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1294 trafo_depth < lc->cu.max_trafo_depth &&
1309 if (trafo_depth == 0 || cbf_cb[0]) {
1316 if (trafo_depth == 0 || cbf_cr[0]) {
1324 if (split_transform_flag) {
1325 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1326 const int x1 = x0 + trafo_size_split;
1327 const int y1 = y0 + trafo_size_split;
1329 #define SUBDIVIDE(x, y, idx) \ 1331 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \ 1332 log2_trafo_size - 1, trafo_depth + 1, idx, \ 1351 cbf_cb[0] || cbf_cr[0] ||
1357 log2_cb_size, log2_trafo_size,
1358 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1364 for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1365 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1366 int x_tu = (x0 + j) >> log2_min_tu_size;
1367 int y_tu = (y0 +
i) >> log2_min_tu_size;
1368 s->
cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1385 int cb_size = 1 << log2_cb_size;
1440 int block_w,
int block_h,
int luma_weight,
int luma_offset)
1444 ptrdiff_t srcstride = ref->
linesize[0];
1453 x_off += mv->
x >> 2;
1454 y_off += mv->
y >> 2;
1465 edge_emu_stride, srcstride,
1469 pic_width, pic_height);
1471 srcstride = edge_emu_stride;
1476 block_h, mx, my, block_w);
1480 luma_weight, luma_offset, mx, my, block_w);
1500 AVFrame *ref0,
const Mv *mv0,
int x_off,
int y_off,
1501 int block_w,
int block_h,
AVFrame *ref1,
const Mv *mv1,
struct MvField *current_mv)
1504 ptrdiff_t src0stride = ref0->
linesize[0];
1505 ptrdiff_t src1stride = ref1->
linesize[0];
1508 int mx0 = mv0->
x & 3;
1509 int my0 = mv0->
y & 3;
1510 int mx1 = mv1->
x & 3;
1511 int my1 = mv1->
y & 3;
1514 int x_off0 = x_off + (mv0->
x >> 2);
1515 int y_off0 = y_off + (mv0->
y >> 2);
1516 int x_off1 = x_off + (mv1->
x >> 2);
1517 int y_off1 = y_off + (mv1->
y >> 2);
1531 edge_emu_stride, src0stride,
1535 pic_width, pic_height);
1537 src0stride = edge_emu_stride;
1548 edge_emu_stride, src1stride,
1552 pic_width, pic_height);
1554 src1stride = edge_emu_stride;
1558 block_h, mx0, my0, block_w);
1561 block_h, mx1, my1, block_w);
1591 ptrdiff_t dststride,
uint8_t *
src0, ptrdiff_t srcstride,
int reflist,
1592 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int chroma_weight,
int chroma_offset)
1597 const Mv *
mv = ¤t_mv->
mv[reflist];
1603 intptr_t mx = av_mod_uintp2(mv->
x, 2 + hshift);
1604 intptr_t my = av_mod_uintp2(mv->
y, 2 + vshift);
1605 intptr_t _mx = mx << (1 - hshift);
1606 intptr_t _my = my << (1 - vshift);
1608 x_off += mv->
x >> (2 + hshift);
1609 y_off += mv->
y >> (2 + vshift);
1620 edge_emu_stride, srcstride,
1624 pic_width, pic_height);
1627 srcstride = edge_emu_stride;
1631 block_h, _mx, _my, block_w);
1635 chroma_weight, chroma_offset, _mx, _my, block_w);
1656 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int cidx)
1661 ptrdiff_t src1stride = ref0->
linesize[cidx+1];
1662 ptrdiff_t src2stride = ref1->
linesize[cidx+1];
1667 Mv *mv0 = ¤t_mv->
mv[0];
1668 Mv *mv1 = ¤t_mv->
mv[1];
1672 intptr_t mx0 = av_mod_uintp2(mv0->
x, 2 + hshift);
1673 intptr_t my0 = av_mod_uintp2(mv0->
y, 2 + vshift);
1674 intptr_t mx1 = av_mod_uintp2(mv1->
x, 2 + hshift);
1675 intptr_t my1 = av_mod_uintp2(mv1->
y, 2 + vshift);
1676 intptr_t _mx0 = mx0 << (1 - hshift);
1677 intptr_t _my0 = my0 << (1 - vshift);
1678 intptr_t _mx1 = mx1 << (1 - hshift);
1679 intptr_t _my1 = my1 << (1 - vshift);
1681 int x_off0 = x_off + (mv0->
x >> (2 + hshift));
1682 int y_off0 = y_off + (mv0->
y >> (2 + vshift));
1683 int x_off1 = x_off + (mv1->
x >> (2 + hshift));
1684 int y_off1 = y_off + (mv1->
y >> (2 + vshift));
1698 edge_emu_stride, src1stride,
1702 pic_width, pic_height);
1705 src1stride = edge_emu_stride;
1717 edge_emu_stride, src2stride,
1721 pic_width, pic_height);
1724 src2stride = edge_emu_stride;
1728 block_h, _mx0, _my0, block_w);
1731 src2, src2stride, lc->
tmp,
1732 block_h, _mx1, _my1, block_w);
1735 src2, src2stride, lc->
tmp,
1742 _mx1, _my1, block_w);
1749 int y =
FFMAX(0, (mv->
y >> 2) + y0 + height + 9);
1756 int nPbH,
int log2_cb_size,
int part_idx,
1768 if (inter_pred_idc !=
PRED_L1) {
1776 part_idx, merge_idx, mv, mvp_flag, 0);
1781 if (inter_pred_idc !=
PRED_L0) {
1794 part_idx, merge_idx, mv, mvp_flag, 1);
1802 int log2_cb_size,
int partIdx,
int idx)
1804 #define POS(c_idx, x, y) \ 1805 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \ 1806 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)] 1809 struct MvField current_mv = {{{ 0 }}};
1821 int x_cb = x0 >> log2_min_cb_size;
1822 int y_cb = y0 >> log2_min_cb_size;
1838 partIdx, merge_idx, ¤t_mv);
1841 partIdx, merge_idx, ¤t_mv);
1849 tab_mvf[(y_pu + j) * min_pu_width + x_pu +
i] = current_mv;
1852 ref0 = refPicList[0].
ref[current_mv.
ref_idx[0]];
1858 ref1 = refPicList[1].
ref[current_mv.
ref_idx[1]];
1871 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1877 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1880 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1890 ¤t_mv.
mv[1], x0, y0, nPbW, nPbH,
1896 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1900 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1910 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1911 ref1->frame, ¤t_mv.
mv[1], ¤t_mv);
1915 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
1918 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
1927 int prev_intra_luma_pred_flag)
1945 int intra_pred_mode;
1950 if ((y0 - 1) < y_ctb)
1953 if (cand_left == cand_up) {
1954 if (cand_left < 2) {
1959 candidate[0] = cand_left;
1960 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1961 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1964 candidate[0] = cand_left;
1965 candidate[1] = cand_up;
1975 if (prev_intra_luma_pred_flag) {
1976 intra_pred_mode = candidate[lc->
pu.
mpm_idx];
1978 if (candidate[0] > candidate[1])
1980 if (candidate[0] > candidate[2])
1982 if (candidate[1] > candidate[2])
1986 for (i = 0; i < 3; i++)
1987 if (intra_pred_mode >= candidate[i])
1994 for (i = 0; i < size_in_pus; i++) {
1995 memset(&s->
tab_ipm[(y_pu + i) * min_pu_width + x_pu],
1996 intra_pred_mode, size_in_pus);
1998 for (j = 0; j < size_in_pus; j++) {
2003 return intra_pred_mode;
2007 int log2_cb_size,
int ct_depth)
2014 for (y = 0; y < length; y++)
2020 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2021 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2027 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2028 uint8_t prev_intra_luma_pred_flag[4];
2030 int pb_size = (1 << log2_cb_size) >> split;
2031 int side = split + 1;
2035 for (i = 0; i < side; i++)
2036 for (j = 0; j < side; j++)
2039 for (i = 0; i < side; i++) {
2040 for (j = 0; j < side; j++) {
2041 if (prev_intra_luma_pred_flag[2 * i + j])
2048 prev_intra_luma_pred_flag[2 * i + j]);
2053 for (i = 0; i < side; i++) {
2054 for (j = 0; j < side; j++) {
2056 if (chroma_mode != 4) {
2069 if (chroma_mode != 4) {
2073 mode_idx = intra_chroma_table[chroma_mode];
2080 if (chroma_mode != 4) {
2096 int pb_size = 1 << log2_cb_size;
2104 if (size_in_pus == 0)
2106 for (j = 0; j < size_in_pus; j++)
2107 memset(&s->
tab_ipm[(y_pu + j) * min_pu_width + x_pu],
INTRA_DC, size_in_pus);
2109 for (j = 0; j < size_in_pus; j++)
2110 for (k = 0; k < size_in_pus; k++)
2116 int cb_size = 1 << log2_cb_size;
2119 int length = cb_size >> log2_min_cb_size;
2121 int x_cb = x0 >> log2_min_cb_size;
2122 int y_cb = y0 >> log2_min_cb_size;
2123 int idx = log2_cb_size - 2;
2134 for (x = 0; x < 4; x++)
2146 x = y_cb * min_cb_width + x_cb;
2147 for (y = 0; y < length; y++) {
2148 memset(&s->
skip_flag[x], skip_flag, length);
2153 x = y_cb * min_cb_width + x_cb;
2154 for (y = 0; y < length; y++) {
2181 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2207 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2211 hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2215 hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2219 hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2223 hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2227 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2228 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2229 hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2235 int rqt_root_cbf = 1;
2242 const static int cbf[2] = { 0 };
2248 log2_cb_size, 0, 0, cbf, cbf);
2261 x = y_cb * min_cb_width + x_cb;
2262 for (y = 0; y < length; y++) {
2267 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2268 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2278 int log2_cb_size,
int cb_depth)
2281 const int cb_size = 1 << log2_cb_size;
2286 if (x0 + cb_size <= s->ps.sps->width &&
2287 y0 + cb_size <= s->ps.sps->height &&
2306 const int cb_size_split = cb_size >> 1;
2307 const int x1 = x0 + cb_size_split;
2308 const int y1 = y0 + cb_size_split;
2316 if (more_data && x1 < s->ps.sps->width) {
2321 if (more_data && y1 < s->ps.sps->height) {
2326 if (more_data && x1 < s->ps.sps->width &&
2327 y1 < s->ps.sps->height) {
2333 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2334 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2338 return ((x1 + cb_size_split) < s->
ps.
sps->
width ||
2346 if ((!((x0 + cb_size) %
2353 return !end_of_slice_flag;
2368 int ctb_addr_in_slice = ctb_addr_rs - s->
sh.
slice_addr;
2373 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2399 if (ctb_addr_in_slice <= 0)
2401 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2434 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2454 if (more_data < 0) {
2489 int *ctb_row_p = input_ctb_row;
2490 int ctb_row = ctb_row_p[job];
2500 ret =
init_get_bits8(&lc->
gb,
s->data +
s->sh.offset[ctb_row - 1],
s->sh.size[ctb_row - 1]);
2506 while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2507 int x_ctb = (ctb_addr_rs %
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2508 int y_ctb = (ctb_addr_rs /
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2522 hls_sao_param(
s, x_ctb >>
s->ps.sps->log2_ctb_size, y_ctb >>
s->ps.sps->log2_ctb_size);
2525 if (more_data < 0) {
2536 if (!more_data && (x_ctb+ctb_size) <
s->ps.sps->width && ctb_row !=
s->sh.num_entry_point_offsets) {
2542 if ((x_ctb+ctb_size) >=
s->ps.sps->width && (y_ctb+ctb_size) >=
s->ps.sps->height ) {
2547 ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2550 if(x_ctb >=
s->ps.sps->width) {
2558 s->tab_slice_address[ctb_addr_rs] = -1;
2567 int length = nal->
size;
2572 int64_t startheader, cmpt = 0;
2603 for (j = 0, cmpt = 0, startheader = offset + s->
sh.
entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2612 for (j = 0, cmpt = 0, startheader = offset
2625 if (length < offset) {
2725 const int mapping[3] = {2, 0, 1};
2726 const int chroma_den = 50000;
2727 const int luma_den = 10000;
2734 for (i = 0; i < 3; i++) {
2735 const int j = mapping[
i];
2755 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2764 "min_luminance=%f, max_luminance=%f\n",
2863 int ctb_addr_ts, ret;
2957 if (s->
max_ra == INT_MAX) {
2979 }
else if (!s->
ref) {
2986 "Non-matching NAL types of the VCL NALUs: %d %d\n",
2996 "Error constructing the reference lists for the current slice.\n");
3020 if (ctb_addr_ts < 0) {
3049 int eos_at_start = 1;
3062 "Error splitting the input into NAL units.\n");
3089 if (ret >= 0 && s->
overlap > 2)
3093 "Error parsing NAL unit #%d.\n", i);
3108 for (i = 0; i < 16; i++)
3109 av_log(log_ctx, level,
"%02"PRIx8, md5[i]);
3138 for (i = 0; frame->
data[
i]; i++) {
3146 for (j = 0; j <
h; j++) {
3151 (
const uint16_t *) src, w);
3204 int new_extradata_size;
3218 &new_extradata_size);
3219 if (new_extradata && new_extradata_size > 0) {
3233 "hardware accelerator failed to decode picture\n");
3314 for (i = 0; i < 3; i++) {
3535 #define OFFSET(x) offsetof(HEVCContext, x) 3536 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) 3539 {
"apply_defdispwin",
"Apply default display window from VUI",
OFFSET(apply_defdispwin),
3541 {
"strict-displaywin",
"stricly apply default display window size",
OFFSET(apply_defdispwin),
3559 .priv_class = &hevc_decoder_class,
3571 #if CONFIG_HEVC_DXVA2_HWACCEL 3574 #if CONFIG_HEVC_D3D11VA_HWACCEL 3577 #if CONFIG_HEVC_D3D11VA2_HWACCEL 3580 #if CONFIG_HEVC_NVDEC_HWACCEL 3583 #if CONFIG_HEVC_VAAPI_HWACCEL 3586 #if CONFIG_HEVC_VDPAU_HWACCEL 3589 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL static int set_side_data(HEVCContext *s)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
unsigned int log2_min_cb_size
discard all frames except keyframes
uint8_t log2_sao_offset_scale_luma
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static enum AVPixelFormat pix_fmt
int anticlockwise_rotation
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static int verify_md5(HEVCContext *s, AVFrame *frame)
This structure describes decoded (raw) audio or video data.
void(* bswap16_buf)(uint16_t *dst, const uint16_t *src, int len)
int current_frame_is_frame0_flag
unsigned MaxCLL
Max content light level (cd/m^2).
#define atomic_store(object, desired)
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
static void flush(AVCodecContext *avctx)
enum AVStereo3DView view
Determines which views are packed.
uint8_t diff_cu_chroma_qp_offset_depth
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
int coded_width
Bitstream width / height, may be different from width/height e.g.
int max_dec_pic_buffering
void(* put_pcm)(uint8_t *_dst, ptrdiff_t _stride, int width, int height, struct GetBitContext *gb, int pcm_bit_depth)
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
void(* put_hevc_qpel_bi_w[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int wx1, int ox0, int ox1, intptr_t mx, intptr_t my, int width)
#define LIBAVUTIL_VERSION_INT
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int16_t x
horizontal component of motion vector
hardware decoding through Videotoolbox
static av_cold int init(AVCodecContext *avctx)
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
void * hwaccel_picture_private
uint8_t intra_split_flag
IntraSplitFlag.
int rem_intra_luma_pred_mode
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
enum AVColorRange color_range
MPEG vs JPEG YUV range.
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
static const AVClass hevc_decoder_class
Views are next to each other, but when upscaling apply a checkerboard pattern.
#define HWACCEL_NVDEC(codec)
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Frame contains only the right view.
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
const char * av_default_item_name(void *ptr)
Return the context name.
uint8_t weighted_bipred_flag
static void hevc_decode_flush(AVCodecContext *avctx)
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
uint8_t seq_loop_filter_across_slices_enabled_flag
uint8_t cabac_init_present_flag
#define AV_PIX_FMT_YUV420P12
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void(* put_hevc_epel_uni[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my, int width)
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
discard all non intra frames
uint8_t used_by_curr_pic_lt_sps_flag[HEVC_MAX_LONG_TERM_REF_PICS]
ShortTermRPS st_rps[HEVC_MAX_SHORT_TERM_REF_PIC_SETS]
Views are next to each other.
int * ctb_addr_ts_to_rs
CtbAddrTSToRS.
int num_ref_idx_l0_default_active
num_ref_idx_l0_default_active_minus1 + 1
static void error(const char *err)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int8_t cr_qp_offset_list[6]
int ff_hevc_merge_flag_decode(HEVCContext *s)
#define SET_SAO(elem, value)
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static void hls_sao_param(HEVCContext *s, int rx, int ry)
uint16_t seq_decode
Sequence counters for decoded and output frames, so that old frames are output first after a POC rese...
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
Macro definitions for various function/variable attributes.
HEVCSEIMasteringDisplay mastering_display
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Frame contains only the left view.
uint8_t entropy_coding_sync_enabled_flag
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
enum AVDiscard skip_frame
Skip decoding for selected frames.
AVBufferPool * rpl_tab_pool
candidate references for the current frame
uint8_t log2_sao_offset_scale_chroma
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
unsigned int log2_max_trafo_size
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
void(* put_hevc_epel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
AVBufferRef * sps_list[HEVC_MAX_SPS_COUNT]
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
HEVCSEIContentLight content_light
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
int is_nalff
this flag is != 0 if bitstream is encapsulated as a format defined in 14496-15
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
enum HEVCNALUnitType nal_unit_type
#define HWACCEL_VDPAU(codec)
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
int(* decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size)
Callback for parameter data (SPS/PPS/VPS etc).
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
uint8_t ctb_up_right_flag
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
struct HEVCFrame * ref[HEVC_MAX_REFS]
uint8_t vps_timing_info_present_flag
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
#define BOUNDARY_LEFT_TILE
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
int num_ref_idx_l1_default_active
num_ref_idx_l1_default_active_minus1 + 1
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
unsigned int log2_min_pcm_cb_size
uint8_t poc_msb_present[32]
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
#define QPEL_EXTRA_BEFORE
Structure to hold side data for an AVFrame.
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
static double av_q2d(AVRational a)
Convert an AVRational to a double.
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
void(* put_hevc_qpel_uni_w[10][2][2])(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width)
AVBufferRef * pps_list[HEVC_MAX_PPS_COUNT]
uint8_t loop_filter_disable_flag
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
uint8_t cu_transquant_bypass_flag
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* put_hevc_qpel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
int8_t cb_qp_offset_list[6]
static av_unused const uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
uint8_t transquant_bypass_enable_flag
int temporal_id
temporal_id_plus1 - 1
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
#define HWACCEL_D3D11VA(codec)
HEVCLocalContext * HEVClcList[MAX_NB_THREADS]
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
const AVProfile ff_hevc_profiles[]
int slice_idx
number of the slice being currently decoded
static int get_bits_left(GetBitContext *gb)
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
uint8_t intra_pred_mode[4]
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
#define SAMPLE_CTB(tab, x, y)
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int has_b_frames
Size of the frame reordering buffer in the decoder.
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
int flags
Additional information about the frame packing.
uint8_t slice_initialized
1 if the independent slice segment header was successfully parsed
unsigned int log2_max_poc_lsb
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
int content_interpretation_type
#define atomic_load(object)
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
AVBufferRef * rpl_tab_buf
discard all bidirectional frames
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int vui_timing_info_present_flag
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
int active_thread_type
Which multithreading methods are in use by the codec.
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
ATSC A53 Part 4 Closed Captions.
void(* intra_pred[4])(struct HEVCContext *s, int x0, int y0, int c_idx)
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
unsigned int log2_ctb_size
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
uint8_t * sao_pixel_buffer_h[3]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
int preferred_transfer_characteristics
#define AV_PIX_FMT_YUV444P10
static const uint8_t offset[127][2]
#define BOUNDARY_UPPER_TILE
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
uint8_t max_trafo_depth
MaxTrafoDepth.
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
int ff_hevc_sao_band_position_decode(HEVCContext *s)
static char * split(char *message, char delim)
uint8_t tiles_enabled_flag
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int ff_alloc_entries(AVCodecContext *avctx, int count)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int eo_class[3]
sao_eo_class
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
uint32_t vps_num_units_in_tick
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Views are alternated temporally.
struct HEVCContext * sList[MAX_NB_THREADS]
common internal API header
enum HEVCNALUnitType first_nal_type
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
uint8_t lists_modification_present_flag
AVBufferRef * tab_mvf_buf
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
uint8_t type_idx[3]
sao_type_idx
enum AVPictureType pict_type
Picture type of the frame.
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define FF_THREAD_FRAME
Decode more than one frame at once.
int max_transform_hierarchy_depth_inter
uint8_t * sao_pixel_buffer_v[3]
#define EPEL_EXTRA_BEFORE
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int offset_abs[3][4]
sao_offset_abs
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
int width
picture width / height.
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
uint8_t cu_qp_delta_enabled_flag
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Context Adaptive Binary Arithmetic Coder inline functions.
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
#define AV_EF_EXPLODE
abort decoding on minor error detection
void(* put_hevc_qpel_bi[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width)
static av_cold int hevc_decode_init(AVCodecContext *avctx)
uint8_t output_flag_present_flag
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
HW acceleration through CUDA.
#define HWACCEL_DXVA2(codec)
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
the normal 2^n-1 "JPEG" YUV ranges
uint8_t pic_slice_level_chroma_qp_offsets_present_flag
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
void ff_reset_entries(AVCodecContext *avctx)
int colour_description_present_flag
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
static const int8_t mv[256][2]
void ff_hevc_ps_uninit(HEVCParamSets *ps)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
enum AVPixelFormat pix_fmt
int ff_hevc_merge_idx_decode(HEVCContext *s)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
const uint8_t ff_hevc_pel_weight[65]
enum AVStereo3DType type
How views are packed within the video.
#define AV_LOG_INFO
Standard information.
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
uint16_t num_tile_columns
num_tile_columns_minus1 + 1
void ff_hevc_reset_sei(HEVCSEI *s)
Reset SEI values that are stored on the Context.
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Views are on top of each other.
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
int * ctb_addr_rs_to_ts
CtbAddrRSToTS.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
unsigned int log2_min_pu_size
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
unsigned int sps_id
seq_parameter_set_id
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
enum PredMode pred_mode
PredMode.
AVBufferRef * hwaccel_priv_buf
int num_extra_slice_header_bits
uint8_t * data
The data buffer.
int16_t y
vertical component of motion vector
#define EDGE_EMU_BUFFER_STRIDE
uint8_t num_long_term_ref_pics_sps
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
uint8_t cross_component_prediction_enabled_flag
uint32_t vui_num_units_in_tick
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
uint8_t deblocking_filter_control_present_flag
HEVCSEIFramePacking frame_packing
static unsigned int get_bits1(GetBitContext *s)
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
uint8_t * checksum_buf
used on BE to byteswap the lines for checksumming
#define AV_PIX_FMT_YUV420P10
uint8_t sps_temporal_mvp_enabled_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
Describe the class of an AVClass context structure.
void ff_hevc_bump_frame(HEVCContext *s)
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
static void skip_bits(GetBitContext *s, int n)
uint16_t max_pic_average_light_level
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
uint8_t chroma_qp_offset_list_enabled_flag
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
enum AVColorSpace colorspace
YUV colorspace type.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
static av_cold int hevc_init_context(AVCodecContext *avctx)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
int enable_parallel_tiles
#define HWACCEL_D3D11VA2(codec)
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
void(* add_residual[4])(uint8_t *dst, int16_t *res, ptrdiff_t stride)
int last_eos
last packet contains an EOS/EOB NAL
#define BOUNDARY_UPPER_SLICE
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data...
static int hevc_frame_start(HEVCContext *s)
unsigned int log2_min_tb_size
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
enum PartMode part_mode
PartMode.
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
uint8_t intra_pred_mode_c[4]
void(* put_hevc_qpel_uni[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
HEVCSEIPictureHash picture_hash
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
int tc_offset
tc_offset_div2 * 2
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
uint8_t transfer_characteristic
static enum AVPixelFormat pix_fmts[]
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
HEVCLocalContext * HEVClc
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
HEVCSEIPictureTiming picture_timing
Hardware surfaces for Direct3D11.
the normal 219*2^(n-8) "MPEG" YUV ranges
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
int eos
current packet contains an EOS/EOB NAL
static int hls_slice_header(HEVCContext *s)
#define BOUNDARY_LEFT_SLICE
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
int max_transform_hierarchy_depth_intra
discard all non reference
static int hls_slice_data(HEVCContext *s)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
static int ref[MAX_W *MAX_W]
uint16_t lt_ref_pic_poc_lsb_sps[HEVC_MAX_LONG_TERM_REF_PICS]
uint8_t weighted_pred_flag
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
int32_t * tab_slice_address
uint16_t num_tile_rows
num_tile_rows_minus1 + 1
int16_t offset_val[3][5]
SaoOffsetVal.
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
unsigned int * column_width
ColumnWidth.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
HEVCSEIAlternativeTransfer alternative_transfer
uint8_t * filter_slice_edges
uint8_t slice_header_extension_present_flag
uint16_t max_content_light_level
HEVCSEIDisplayOrientation display_orientation
unsigned properties
Properties of the stream that gets decoded.
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
AVBufferPool * tab_mvf_pool
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
int video_full_range_flag
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
static const uint8_t tab_mode_idx[]
uint8_t chroma_qp_offset_list_len_minus1
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
int ff_hevc_pred_mode_decode(HEVCContext *s)
#define HWACCEL_VAAPI(codec)
struct AVCodecInternal * internal
Private context used for internal data.
int ff_hevc_mpm_idx_decode(HEVCContext *s)
void(* put_hevc_epel_bi[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width)
int ff_hevc_pcm_flag_decode(HEVCContext *s)
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
int key_frame
1 -> keyframe, 0-> not
HEVCSEIA53Caption a53_caption
uint8_t long_term_ref_pics_present_flag
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
void(* put_hevc_epel_uni_w[10][2][2])(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width)
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
int diff_cu_qp_delta_depth
static int hls_cross_component_pred(HEVCContext *s, int idx)
Public header for MD5 hash function implementation.
#define atomic_init(obj, value)
static const AVOption options[]
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
#define av_malloc_array(a, b)
uint8_t context_initialized
int video_signal_type_present_flag
#define FFSWAP(type, a, b)
uint8_t deblocking_filter_override_enabled_flag
int beta_offset
beta_offset_div2 * 2
struct HEVCSPS::@73 temporal_layer[HEVC_MAX_SUB_LAYERS]
#define SUBDIVIDE(x, y, idx)
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts)
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
int depth
Number of bits in the component.
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
This structure stores compressed data.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
uint8_t separate_colour_plane_flag
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
uint16_t display_primaries[3][2]
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
uint8_t dependent_slice_segments_enabled_flag
int offset_sign[3][4]
sao_offset_sign
void(* put_hevc_epel_bi_w[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int ox0, int wx1, int ox1, intptr_t mx, intptr_t my, int width)
static av_cold int hevc_decode_free(AVCodecContext *avctx)