53 int16_t *
block,
int n,
int qscale)
55 int i,
level, nCoeffs;
56 const uint16_t *quant_matrix;
63 for(i=1;i<=nCoeffs;i++) {
69 level = (int)(level * qscale * quant_matrix[j]) >> 3;
70 level = (level - 1) | 1;
73 level = (int)(level * qscale * quant_matrix[j]) >> 3;
74 level = (level - 1) | 1;
82 int16_t *
block,
int n,
int qscale)
84 int i,
level, nCoeffs;
85 const uint16_t *quant_matrix;
90 for(i=0; i<=nCoeffs; i++) {
96 level = (((level << 1) + 1) * qscale *
97 ((int) (quant_matrix[j]))) >> 4;
98 level = (level - 1) | 1;
101 level = (((level << 1) + 1) * qscale *
102 ((int) (quant_matrix[j]))) >> 4;
103 level = (level - 1) | 1;
111 int16_t *
block,
int n,
int qscale)
113 int i,
level, nCoeffs;
114 const uint16_t *quant_matrix;
121 for(i=1;i<=nCoeffs;i++) {
127 level = (int)(level * qscale * quant_matrix[j]) >> 3;
130 level = (int)(level * qscale * quant_matrix[j]) >> 3;
138 int16_t *
block,
int n,
int qscale)
140 int i,
level, nCoeffs;
141 const uint16_t *quant_matrix;
150 for(i=1;i<=nCoeffs;i++) {
156 level = (int)(level * qscale * quant_matrix[j]) >> 3;
159 level = (int)(level * qscale * quant_matrix[j]) >> 3;
169 int16_t *
block,
int n,
int qscale)
171 int i,
level, nCoeffs;
172 const uint16_t *quant_matrix;
179 for(i=0; i<=nCoeffs; i++) {
185 level = (((level << 1) + 1) * qscale *
186 ((int) (quant_matrix[j]))) >> 4;
189 level = (((level << 1) + 1) * qscale *
190 ((int) (quant_matrix[j]))) >> 4;
200 int16_t *
block,
int n,
int qscale)
202 int i,
level, qmul, qadd;
211 qadd = (qscale - 1) | 1;
220 for(i=1; i<=nCoeffs; i++) {
224 level = level * qmul - qadd;
226 level = level * qmul + qadd;
234 int16_t *
block,
int n,
int qscale)
236 int i,
level, qmul, qadd;
241 qadd = (qscale - 1) | 1;
246 for(i=0; i<=nCoeffs; i++) {
250 level = level * qmul - qadd;
252 level = level * qmul + qadd;
261 int mb_x,
int mb_y,
int mb_intra,
int mb_skipped)
271 memcpy(s->
mv,
mv,
sizeof(*
mv));
284 "Interlaced error concealment is not fully implemented\n");
291 memset(dst + h*linesize, 128, 16);
297 memset(dst + h*linesize, 128, 8);
311 for (i=0; i<4; i++) {
404 int chroma_x_shift,
int chroma_y_shift,
405 int linesize,
int uvlinesize)
428 if (r < 0 || !pic->f->buf[0]) {
436 for (i = 0; pic->
f->
data[i]; i++) {
451 av_log(avctx,
AV_LOG_ERROR,
"alloc_frame_buffer() failed (hwaccel private data allocation)\n");
458 if (linesize && (linesize != pic->
f->
linesize[0] ||
461 "get_buffer() failed (stride changed)\n");
468 "get_buffer() failed (uv stride mismatch)\n");
477 "get_buffer() failed to allocate context scratch buffers.\n");
499 for (i = 0; i < 2; i++) {
506 int mb_stride,
int mb_width,
int mb_height,
int b8_stride)
508 const int big_mb_num = mb_stride * (mb_height + 1) + 1;
509 const int mb_array_size = mb_stride * mb_height;
510 const int b8_array_size = b8_stride * mb_height * 2;
531 int mv_size = 2 * (b8_array_size + 4) *
sizeof(int16_t);
532 int ref_index_size = 4 * mb_array_size;
534 for (i = 0; mv_size && i < 2; i++) {
551 #define MAKE_WRITABLE(table) \
554 (ret = av_buffer_make_writable(&pic->table)) < 0)\
565 for (i = 0; i < 2; i++) {
587 int chroma_x_shift,
int chroma_y_shift,
int out_format,
588 int mb_stride,
int mb_width,
int mb_height,
int b8_stride,
589 ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
604 chroma_x_shift, chroma_y_shift,
605 *linesize, *uvlinesize) < 0)
614 mb_stride, mb_width, mb_height, b8_stride);
631 for (i = 0; i < 2; i++) {
667 memset((
uint8_t*)pic + off, 0,
sizeof(*pic) - off);
674 #define UPDATE_TABLE(table)\
677 (!dst->table || dst->table->buffer != src->table->buffer)) {\
678 av_buffer_unref(&dst->table);\
679 dst->table = av_buffer_ref(src->table);\
681 ff_free_picture_tables(dst);\
682 return AVERROR(ENOMEM);\
693 for (i = 0; i < 2; i++) {
704 for (i = 0; i < 2; i++) {
757 int yc_size = y_size + 2 * c_size;
777 2 * 64 *
sizeof(
int), fail)
783 for (i = 0; i < 12; i++) {
794 yc_size *
sizeof(int16_t) * 16, fail);
827 #define COPY(a) bak->a = src->a
828 COPY(sc.edge_emu_buffer);
831 COPY(sc.rd_scratchpad);
832 COPY(sc.b_scratchpad);
833 COPY(sc.obmc_scratchpad);
861 for (i = 0; i < 12; i++) {
872 "scratch buffers.\n");
901 if (
s1->context_initialized){
933 if (
s1->picture[i].f->buf[0] &&
938 #define UPDATE_PICTURE(pic)\
940 ff_mpeg_unref_picture(s->avctx, &s->pic);\
941 if (s1->pic.f && s1->pic.f->buf[0])\
942 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
944 ret = update_picture_tables(&s->pic, &s1->pic);\
953 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
954 ((pic && pic >= old_ctx->picture && \
955 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
956 &new_ctx->picture[pic - old_ctx->picture] : NULL)
969 (
char *) &
s1->pb_field_time +
sizeof(
s1->pb_field_time) -
970 (
char *) &
s1->last_time_base);
980 if (
s1->bitstream_buffer) {
981 if (
s1->bitstream_buffer_size +
985 s1->allocated_bitstream_buffer_size);
993 s1->bitstream_buffer_size);
1002 &s->
sc,
s1->linesize) < 0) {
1004 "scratch buffers.\n");
1009 "be allocated due to unknown size.\n");
1014 (
char *) &
s1->rtp_mode - (
char *) &
s1->progressive_sequence);
1016 if (!
s1->first_field) {
1018 if (
s1->current_picture_ptr)
1112 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x,
y;
1136 yc_size = y_size + 2 * c_size;
1169 mb_array_size *
sizeof(
float), fail);
1171 mb_array_size *
sizeof(
float), fail);
1178 for (i = 0; i < 2; i++) {
1180 for (j = 0; j < 2; j++) {
1181 for (k = 0; k < 2; k++) {
1184 mv_table_size * 2 *
sizeof(int16_t),
1213 for (i = 0; i < yc_size; i++)
1276 for (i = 0; i < 2; i++) {
1277 for (j = 0; j < 2; j++) {
1278 for (k = 0; k < 2; k++) {
1329 "decoding to AV_PIX_FMT_NONE is not supported.\n");
1340 " reducing to %d\n", nb_slices, max_slices);
1341 nb_slices = max_slices;
1386 if (nb_slices > 1) {
1387 for (i = 0; i < nb_slices; i++) {
1396 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
1398 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1437 for (i = 0; i < 2; i++) {
1438 for (j = 0; j < 2; j++) {
1439 for (k = 0; k < 2; k++) {
1515 if (nb_slices > 1) {
1516 for (i = 0; i < nb_slices; i++) {
1527 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
1529 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1604 if (!picture[i].reference)
1611 if (!pic->
f->
buf[0])
1624 if (!picture[i].f->buf[0])
1635 "Internal error, picture buffer overflow\n");
1656 if (picture[ret].needs_realloc) {
1667 int i, h_chroma_shift, v_chroma_shift;
1671 for(i=0; i<frame->
height; i++)
1675 0x80, FF_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1677 0x80, FF_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1711 "releasing zombie picture\n");
1771 ff_dlog(s->
avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1781 int h_chroma_shift, v_chroma_shift;
1783 &h_chroma_shift, &v_chroma_shift);
1786 "allocating dummy last picture for B frame\n");
1789 "warning: first frame is no keyframe\n");
1792 "allocate dummy last picture for field based first keyframe\n");
1812 for(i=0; i<avctx->
height; i++)
1814 0x80, avctx->
width);
1818 0x80, FF_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1820 0x80, FF_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1825 for(i=0; i<avctx->
height; i++)
1855 #if 0 // BUFREF-FIXME
1879 for (i = 0; i < 4; i++) {
1922 static int clip_line(
int *sx,
int *sy,
int *ex,
int *ey,
int maxx)
1930 *sy = *ey + (*sy - *ey) * (int64_t)*ex / (*ex - *sx);
1937 *ey = *sy + (*ey - *sy) * (int64_t)(maxx - *sx) / (*ex - *sx);
1956 if (
clip_line(&sx, &sy, &ex, &ey, w - 1))
1958 if (
clip_line(&sy, &sx, &ey, &ex, h - 1))
1961 sx = av_clip(sx, 0, w - 1);
1962 sy = av_clip(sy, 0, h - 1);
1963 ex = av_clip(ex, 0, w - 1);
1964 ey = av_clip(ey, 0, h - 1);
1966 buf[sy * stride + sx] +=
color;
1975 f = ((ey - sy) << 16) / ex;
1976 for (x = 0; x <= ex; x++) {
1978 fr = (x * f) & 0xFFFF;
1979 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1980 if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1990 f = ((ex - sx) << 16) / ey;
1993 for(y= 0; y <= ey; y++){
1995 fr = (y*f) & 0xFFFF;
1996 buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1997 if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
2010 int ey,
int w,
int h,
int stride,
int color,
int tail,
int direction)
2019 sx = av_clip(sx, -100, w + 100);
2020 sy = av_clip(sy, -100, h + 100);
2021 ex = av_clip(ex, -100, w + 100);
2022 ey = av_clip(ey, -100, h + 100);
2027 if (dx * dx + dy * dy > 3 * 3) {
2041 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
2042 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
2044 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
2049 int dst_x,
int dst_y,
2050 int src_x,
int src_y,
2059 mb->
source = direction ? 1 : -1;
2068 uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
2070 int mb_width,
int mb_height,
int mb_stride,
int quarter_sample)
2073 const int shift = 1 + quarter_sample;
2075 const int mv_stride = (mb_width << mv_sample_log2) +
2077 int mb_x, mb_y, mbcount = 0;
2085 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2086 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2087 int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
2088 for (direction = 0; direction < 2; direction++) {
2092 for (i = 0; i < 4; i++) {
2093 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2094 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2095 int xy = (mb_x * 2 + (i & 1) +
2096 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2097 int mx = (motion_val[direction][xy][0] >>
shift) + sx;
2098 int my = (motion_val[direction][xy][1] >>
shift) + sy;
2099 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2101 }
else if (
IS_16X8(mb_type)) {
2102 for (i = 0; i < 2; i++) {
2103 int sx = mb_x * 16 + 8;
2104 int sy = mb_y * 16 + 4 + 8 * i;
2105 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2106 int mx = (motion_val[direction][xy][0] >>
shift);
2107 int my = (motion_val[direction][xy][1] >>
shift);
2112 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2114 }
else if (
IS_8X16(mb_type)) {
2115 for (i = 0; i < 2; i++) {
2116 int sx = mb_x * 16 + 4 + 8 * i;
2117 int sy = mb_y * 16 + 8;
2118 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2119 int mx = motion_val[direction][xy][0] >>
shift;
2120 int my = motion_val[direction][xy][1] >>
shift;
2125 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
2128 int sx = mb_x * 16 + 8;
2129 int sy = mb_y * 16 + 8;
2130 int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
2131 int mx = (motion_val[direction][xy][0]>>
shift) + sx;
2132 int my = (motion_val[direction][xy][1]>>
shift) + sy;
2133 mbcount +=
add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
2155 if (avctx->
hwaccel || !mbtype_table
2165 for (y = 0; y < mb_height; y++) {
2166 for (x = 0; x < mb_width; x++) {
2168 int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
2175 qscale_table[x + y * mb_stride]);
2178 int mb_type = mbtype_table[x + y * mb_stride];
2194 else if (
IS_GMC(mb_type))
2234 int h_chroma_shift, v_chroma_shift, block_height;
2236 const int shift = 1 + quarter_sample;
2242 const int mv_stride = (mb_width << mv_sample_log2) +
2253 ptr = pict->
data[0];
2255 block_height = 16 >> v_chroma_shift;
2257 for (mb_y = 0; mb_y < mb_height; mb_y++) {
2259 for (mb_x = 0; mb_x < mb_width; mb_x++) {
2260 const int mb_index = mb_x + mb_y * mb_stride;
2262 if ((avctx->
debug_mv) && motion_val[0]) {
2264 for (type = 0; type < 3; type++) {
2286 if (!
USES_LIST(mbtype_table[mb_index], direction))
2289 if (
IS_8X8(mbtype_table[mb_index])) {
2291 for (i = 0; i < 4; i++) {
2292 int sx = mb_x * 16 + 4 + 8 * (i & 1);
2293 int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2294 int xy = (mb_x * 2 + (i & 1) +
2295 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2296 int mx = (motion_val[direction][xy][0] >>
shift) + sx;
2297 int my = (motion_val[direction][xy][1] >>
shift) + sy;
2299 height, pict->
linesize[0], 100, 0, direction);
2301 }
else if (
IS_16X8(mbtype_table[mb_index])) {
2303 for (i = 0; i < 2; i++) {
2304 int sx = mb_x * 16 + 8;
2305 int sy = mb_y * 16 + 4 + 8 * i;
2306 int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2307 int mx = (motion_val[direction][xy][0] >>
shift);
2308 int my = (motion_val[direction][xy][1] >>
shift);
2313 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2314 height, pict->
linesize[0], 100, 0, direction);
2316 }
else if (
IS_8X16(mbtype_table[mb_index])) {
2318 for (i = 0; i < 2; i++) {
2319 int sx = mb_x * 16 + 4 + 8 * i;
2320 int sy = mb_y * 16 + 8;
2321 int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2322 int mx = motion_val[direction][xy][0] >>
shift;
2323 int my = motion_val[direction][xy][1] >>
shift;
2328 draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2329 height, pict->
linesize[0], 100, 0, direction);
2332 int sx= mb_x * 16 + 8;
2333 int sy= mb_y * 16 + 8;
2334 int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2335 int mx= (motion_val[direction][xy][0]>>
shift) + sx;
2336 int my= (motion_val[direction][xy][1]>>
shift) + sy;
2337 draw_arrow(ptr, sx, sy, mx, my, width, height, pict->
linesize[0], 100, 0, direction);
2343 uint64_t
c = (qscale_table[mb_index] * 128 / 31) *
2344 0x0101010101010101ULL;
2346 for (y = 0; y < block_height; y++) {
2347 *(uint64_t *)(pict->
data[1] + 8 * mb_x +
2348 (block_height * mb_y + y) *
2350 *(uint64_t *)(pict->
data[2] + 8 * mb_x +
2351 (block_height * mb_y + y) *
2357 int mb_type = mbtype_table[mb_index];
2360 #define COLOR(theta, r) \
2361 u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2362 v = (int)(128 + r * sin(theta * 3.141592 / 180));
2379 }
else if (
IS_GMC(mb_type)) {
2381 }
else if (
IS_SKIP(mb_type)) {
2392 u *= 0x0101010101010101ULL;
2393 v *= 0x0101010101010101ULL;
2394 for (y = 0; y < block_height; y++) {
2395 *(uint64_t *)(pict->
data[1] + 8 * mb_x +
2396 (block_height * mb_y + y) * pict->
linesize[1]) = u;
2397 *(uint64_t *)(pict->
data[2] + 8 * mb_x +
2398 (block_height * mb_y + y) * pict->
linesize[2]) = v;
2403 *(uint64_t *)(pict->
data[0] + 16 * mb_x + 0 +
2404 (16 * mb_y + 8) * pict->
linesize[0]) ^= 0x8080808080808080ULL;
2405 *(uint64_t *)(pict->
data[0] + 16 * mb_x + 8 +
2406 (16 * mb_y + 8) * pict->
linesize[0]) ^= 0x8080808080808080ULL;
2409 for (y = 0; y < 16; y++)
2410 pict->
data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2413 if (
IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2414 int dm = 1 << (mv_sample_log2 - 2);
2415 for (i = 0; i < 4; i++) {
2416 int sx = mb_x * 16 + 8 * (i & 1);
2417 int sy = mb_y * 16 + 8 * (i >> 1);
2418 int xy = (mb_x * 2 + (i & 1) +
2419 (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2422 if (mv[0] != mv[dm] ||
2423 mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2424 for (y = 0; y < 8; y++)
2425 pict->
data[0][sx + 4 + (sy + y) * pict->
linesize[0]] ^= 0x80;
2426 if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2427 *(uint64_t *)(pict->
data[0] + sx + (sy + 4) *
2428 pict->
linesize[0]) ^= 0x8080808080808080ULL;
2438 mbskip_table[mb_index] = 0;
2465 int field_based,
int field_select,
2466 int src_x,
int src_y,
2468 int h_edge_pos,
int v_edge_pos,
2470 int motion_x,
int motion_y)
2473 const int op_index =
FFMIN(lowres, 3);
2474 const int s_mask = (2 <<
lowres) - 1;
2483 sx = motion_x & s_mask;
2484 sy = motion_y & s_mask;
2485 src_x += motion_x >> lowres + 1;
2486 src_y += motion_y >> lowres + 1;
2488 src += src_y * stride + src_x;
2490 if ((
unsigned)src_x >
FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2491 (unsigned)src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2494 w + 1, (h + 1) << field_based,
2495 src_x, src_y << field_based,
2496 h_edge_pos, v_edge_pos);
2501 sx = (sx << 2) >>
lowres;
2502 sy = (sy << 2) >>
lowres;
2505 pix_op[op_index](dest,
src,
stride,
h, sx, sy);
2519 int motion_x,
int motion_y,
2522 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2523 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
2524 ptrdiff_t uvlinesize, linesize;
2527 const int block_s = 8>>
lowres;
2528 const int s_mask = (2 <<
lowres) - 1;
2541 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2544 sx = motion_x & s_mask;
2545 sy = motion_y & s_mask;
2546 src_x = s->
mb_x * 2 * block_s + (motion_x >> lowres + 1);
2547 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2550 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2551 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2552 uvsrc_x = src_x >> 1;
2553 uvsrc_y = src_y >> 1;
2558 uvsx = (2 * mx) & s_mask;
2559 uvsy = (2 * my) & s_mask;
2560 uvsrc_x = s->
mb_x * block_s + (mx >>
lowres);
2561 uvsrc_y = mb_y * block_s + (my >>
lowres);
2568 uvsrc_x = s->
mb_x * block_s + (mx >> lowres + 1);
2569 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2575 uvsy = motion_y & s_mask;
2577 uvsrc_x = s->
mb_x*block_s + (mx >> (lowres+1));
2580 uvsx = motion_x & s_mask;
2581 uvsy = motion_y & s_mask;
2588 ptr_y = ref_picture[0] + src_y * linesize + src_x;
2589 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2590 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2592 if ((
unsigned) src_x >
FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2593 (
unsigned) src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2595 linesize >> field_based, linesize >> field_based,
2596 17, 17 + field_based,
2597 src_x, src_y << field_based, h_edge_pos,
2604 uvlinesize >> field_based, uvlinesize >> field_based,
2606 uvsrc_x, uvsrc_y << field_based,
2607 h_edge_pos >> 1, v_edge_pos >> 1);
2609 uvlinesize >> field_based,uvlinesize >> field_based,
2611 uvsrc_x, uvsrc_y << field_based,
2612 h_edge_pos >> 1, v_edge_pos >> 1);
2631 sx = (sx << 2) >>
lowres;
2632 sy = (sy << 2) >>
lowres;
2633 pix_op[lowres - 1](dest_y, ptr_y, linesize,
h, sx, sy);
2637 uvsx = (uvsx << 2) >>
lowres;
2638 uvsy = (uvsy << 2) >>
lowres;
2640 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2641 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2654 const int op_index =
FFMIN(lowres, 3);
2655 const int block_s = 8 >>
lowres;
2656 const int s_mask = (2 <<
lowres) - 1;
2657 const int h_edge_pos = s->
h_edge_pos >> lowres + 1;
2658 const int v_edge_pos = s->
v_edge_pos >> lowres + 1;
2659 int emu = 0, src_x, src_y, sx, sy;
2675 src_x = s->
mb_x * block_s + (mx >> lowres + 1);
2676 src_y = s->
mb_y * block_s + (my >> lowres + 1);
2679 ptr = ref_picture[1] +
offset;
2680 if ((
unsigned) src_x >
FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2681 (unsigned) src_y >
FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2685 src_x, src_y, h_edge_pos, v_edge_pos);
2689 sx = (sx << 2) >>
lowres;
2690 sy = (sy << 2) >>
lowres;
2691 pix_op[op_index](dest_cb, ptr, s->
uvlinesize, block_s, sx, sy);
2693 ptr = ref_picture[2] +
offset;
2698 src_x, src_y, h_edge_pos, v_edge_pos);
2701 pix_op[op_index](dest_cr, ptr, s->
uvlinesize, block_s, sx, sy);
2718 int dir,
uint8_t **ref_picture,
2724 const int block_s = 8 >>
lowres;
2733 ref_picture, pix_op,
2734 s->
mv[dir][0][0], s->
mv[dir][0][1],
2740 for (i = 0; i < 4; i++) {
2743 ref_picture[0], 0, 0,
2744 (2 * mb_x + (i & 1)) * block_s,
2745 (2 * mb_y + (i >> 1)) * block_s,
2748 block_s, block_s, pix_op,
2749 s->
mv[dir][i][0], s->
mv[dir][i][1]);
2751 mx += s->
mv[dir][i][0];
2752 my += s->
mv[dir][i][1];
2764 ref_picture, pix_op,
2765 s->
mv[dir][0][0], s->
mv[dir][0][1],
2770 ref_picture, pix_op,
2771 s->
mv[dir][1][0], s->
mv[dir][1][1],
2781 ref_picture, pix_op,
2783 s->
mv[dir][0][1], 2 * block_s, mb_y >> 1);
2787 for (i = 0; i < 2; i++) {
2792 ref2picture = ref_picture;
2799 ref2picture, pix_op,
2800 s->
mv[dir][i][0], s->
mv[dir][i][1] +
2801 2 * block_s * i, block_s, mb_y >> 1);
2803 dest_y += 2 * block_s * s->
linesize;
2810 for (i = 0; i < 2; i++) {
2812 for (j = 0; j < 2; j++) {
2815 ref_picture, pix_op,
2816 s->
mv[dir][2 * i + j][0],
2817 s->
mv[dir][2 * i + j][1],
2823 for (i = 0; i < 2; i++) {
2826 ref_picture, pix_op,
2827 s->
mv[dir][2 * i][0],s->
mv[dir][2 * i][1],
2828 2 * block_s, mb_y >> 1);
2851 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->
quarter_sample;
2852 int my, off, i, mvs;
2871 for (i = 0; i < mvs; i++) {
2872 my = s->
mv[dir][i][1];
2873 my_max =
FFMAX(my_max, my);
2874 my_min =
FFMIN(my_min, my);
2877 off = ((
FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
2886 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
2902 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
2924 memset(s->
ac_val[0][xy ], 0, 32 *
sizeof(int16_t));
2925 memset(s->
ac_val[0][xy + wrap], 0, 32 *
sizeof(int16_t));
2938 memset(s->
ac_val[1][xy], 0, 16 *
sizeof(int16_t));
2939 memset(s->
ac_val[2][xy], 0, 16 *
sizeof(int16_t));
2956 int lowres_flag,
int is_mpeg12)
2971 for(j=0; j<64; j++){
2998 uint8_t *dest_y, *dest_cb, *dest_cr;
2999 int dct_linesize, dct_offset;
3005 const int block_size= lowres_flag ? 8>>s->
avctx->
lowres : 8;
3024 dct_offset = s->
interlaced_dct ? linesize : linesize * block_size;
3028 dest_cb= s->
dest[1];
3029 dest_cr= s->
dest[2];
3112 add_dct(s, block[0], 0, dest_y , dct_linesize);
3113 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
3114 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
3115 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
3119 add_dct(s, block[4], 4, dest_cb, uvlinesize);
3120 add_dct(s, block[5], 5, dest_cr, uvlinesize);
3124 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
3126 add_dct(s, block[4], 4, dest_cb, dct_linesize);
3127 add_dct(s, block[5], 5, dest_cr, dct_linesize);
3128 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
3129 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
3131 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
3132 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
3133 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
3134 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
3146 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->
qscale);
3147 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->
qscale);
3148 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->
qscale);
3165 s->
idsp.
idct_put(dest_y + block_size, dct_linesize, block[1]);
3166 s->
idsp.
idct_put(dest_y + dct_offset, dct_linesize, block[2]);
3167 s->
idsp.
idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
3176 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
3180 s->
idsp.
idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
3181 s->
idsp.
idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
3183 s->
idsp.
idct_put(dest_cb + block_size, dct_linesize, block[8]);
3184 s->
idsp.
idct_put(dest_cr + block_size, dct_linesize, block[9]);
3185 s->
idsp.
idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
3186 s->
idsp.
idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
3242 s->
dest[0] += s->
mb_y * linesize << mb_size;
3246 s->
dest[0] += (s->
mb_y>>1) * linesize << mb_size;
3270 for(i=0; i<=last; i++){
3271 const int j= scantable[i];
3276 for(i=0; i<=last; i++){
3277 const int j= scantable[i];
3278 const int perm_j= permutation[j];
3279 block[perm_j]= temp[j];
3318 else if (qscale > 31)
int bitstream_buffer_size
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
#define FF_DEBUG_DCT_COEFF
static int init_duplicate_context(MpegEncContext *s)
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
int16_t(* b_bidir_back_mv_table_base)[2]
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
#define MAX_PICTURE_COUNT
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
ScanTable intra_v_scantable
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
static int shift(int a, int b)
#define CONFIG_WMV2_ENCODER
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
This structure describes decoded (raw) audio or video data.
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
#define MAKE_WRITABLE(table)
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * mb_mean
Table for MB luminance.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int coded_width
Bitstream width / height, may be different from width/height e.g.
static int clip_line(int *sx, int *sy, int *ex, int *ey, int maxx)
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define AV_LOG_WARNING
Something somehow does not look correct.
int16_t src_x
Absolute source position.
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
uint8_t * coded_block_base
static int update_picture_tables(Picture *dst, Picture *src)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
uint16_t * mb_var
Table for MB variances.
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
void * opaque
for some private data of the user
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
static void gray_frame(AVFrame *frame)
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
#define HAVE_INTRINSICS_NEON
uint8_t * bitstream_buffer
static int find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
void(* clear_blocks)(int16_t *blocks)
int field_picture
whether or not the picture was encoded in separate fields
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
int16_t(* p_mv_table_base)[2]
static int make_tables_writable(Picture *pic)
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
uint32_t * score_map
map to store the scores
#define FF_ARRAY_ELEMS(a)
#define FF_DEBUG_VIS_MV_B_BACK
static void free_duplicate_context(MpegEncContext *s)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int mb_num
number of MBs of a picture
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
int h263_aic
Advanded INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
int encoding
true if we are encoding (vs decoding)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Macro definitions for various function/variable attributes.
int16_t(* b_back_mv_table_base)[2]
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int av_codec_is_encoder(const AVCodec *codec)
int alloc_mb_width
mb_width used to allocate tables
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
struct AVHWAccel * hwaccel
Hardware accelerator in use.
#define USES_LIST(a, list)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
static int alloc_frame_buffer(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int chroma_x_shift, int chroma_y_shift, int linesize, int uvlinesize)
Allocate a frame buffer.
const uint8_t ff_mpeg1_dc_scale_table[128]
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
enum OutputFormat out_format
output format
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
int ff_mpv_common_frame_size_change(MpegEncContext *s)
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Multithreading support functions.
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Motion estimation context.
qpel_mc_func(* qpel_put)[16]
int16_t dst_x
Absolute destination position.
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Picture current_picture
copy of the current picture structure.
void ff_mpv_common_init_ppc(MpegEncContext *s)
Structure to hold side data for an AVFrame.
#define PICT_BOTTOM_FIELD
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
int32_t source
Where the current macroblock comes from; negative value when it comes from the past, positive value when it comes from the future.
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
static int alloc_picture_tables(AVCodecContext *avctx, Picture *pic, int encoding, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride)
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
uint16_t pp_time
time distance between the last 2 p,s,i frames
AVBufferRef * mb_type_buf
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
int interlaced_frame
The content of the picture is interlaced.
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
static av_always_inline void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
high precision timer, useful to profile code
int16_t(*[2][2] p_field_mv_table_base)[2]
static void ff_update_block_index(MpegEncContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define ROUNDED_DIV(a, b)
AVBufferRef * mb_mean_buf
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
int intra_only
if true, only intra pictures are generated
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
int h263_plus
h263 plus headers
int slice_context_count
number of used thread_contexts
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int last_dc[3]
last DC values for MPEG1
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
int mb_skipped
MUST BE SET only during DECODING.
int partitioned_frame
is current frame partitioned
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
int frame_skip_threshold
frame skip threshold
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define CODEC_FLAG2_EXPORT_MVS
Export motion vectors through frame side data.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
uint8_t w
Width and height of the block.
#define FF_DEBUG_VIS_MV_B_FOR
int capabilities
Codec capabilities.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color)
Draw a line from (ex, ey) -> (sx, sy).
int low_delay
no reordering needed / has no b-frames
uint8_t *[2][2] b_field_select_table
static const uint8_t offset[127][2]
void ff_mpv_common_end(MpegEncContext *s)
Libavcodec external API header.
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
void ff_mpeg_flush(AVCodecContext *avctx)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
uint8_t * error_status_table
const uint8_t ff_alternate_horizontal_scan[64]
AVBufferRef * hwaccel_priv_buf
common internal API header
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color, int tail, int direction)
Draw an arrow from (ex, ey) -> (sx, sy).
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
AVBufferRef * motion_val_buf[2]
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
enum AVPictureType pict_type
Picture type of the frame.
#define UPDATE_PICTURE(pic)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
int overread
the number of bytes which where irreversibly read from the next frame
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed b frames
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture new_picture
copy of the source picture structure for encoding.
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
int width
picture width / height.
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for b-frame encodin...
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
#define FF_CEIL_RSHIFT(a, b)
unsigned int allocated_bitstream_buffer_size
void * hwaccel_picture_private
hardware accelerator private data
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
int16_t(*[2][2][2] b_field_mv_table_base)[2]
int16_t(* b_forw_mv_table_base)[2]
int16_t(*[12] pblocks)[64]
int block_last_index[12]
last non zero coefficient in block
uint8_t idct_permutation[64]
IDCT input permutation.
int mb_decision
macroblock decision mode
void(* idct_add)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
preferred ID for MPEG-1/2 video decoding
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
int first_field
is 1 for the first field of a field picture 0 otherwise
void * av_memdup(const void *p, size_t size)
Duplicate the buffer p.
int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static const int8_t mv[256][2]
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
void(* idct_put)(uint8_t *dest, int line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
#define MV_TYPE_16X16
1 vector for the whole mb
int frame_skip_factor
frame skip factor
static void clear_context(MpegEncContext *s)
uint16_t * mc_mb_var
Table for motion compensated MB variances.
AVBufferRef * qscale_table_buf
int16_t(* b_bidir_forw_mv_table_base)[2]
int coded_picture_number
picture number in bitstream order
uint16_t inter_matrix[64]
int alloc_mb_height
mb_height used to allocate tables
struct MpegEncContext * thread_context[MAX_THREADS]
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
void ff_free_picture_tables(Picture *pic)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
ptrdiff_t linesize
line size, in bytes, may be different from width
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
ScanTable intra_scantable
uint8_t * data
The data buffer.
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
int height
picture size. must be a multiple of 16
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
uint32_t state
contains the last few bytes in MSB order
Picture * picture
main picture buffer
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
BYTE int const BYTE int int int height
#define FF_THREAD_FRAME
Decode more than one frame at once.
ScanTable intra_h_scantable
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
int closed_gop
MPEG1/2 GOP is closed.
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
#define UPDATE_TABLE(table)
unsigned int avpriv_toupper4(unsigned int x)
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
static int ff_h263_round_chroma(int x)
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
static int add_mb(AVMotionVector *mb, uint32_t mb_type, int dst_x, int dst_y, int src_x, int src_y, int direction)
int f_code
forward MV resolution
int max_b_frames
max number of b-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int size
Size of data in bytes.
int h263_pred
use mpeg4/h263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
static int pic_is_unused(Picture *pic)
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
qpel_mc_func(* qpel_avg)[16]
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
int noise_reduction
noise reduction strength
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y)
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
A reference to a data buffer.
discard all non reference
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
GLint GLenum GLboolean GLsizei stride
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
uint64_t flags
Extra flag information.
#define FF_MB_DECISION_RD
rate distortion
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
AVBufferRef * mbskip_table_buf
const uint8_t ff_default_chroma_qscale_table[32]
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
static av_cold int dct_init(MpegEncContext *s)
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture last_picture
copy of the previous picture structure.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Picture * last_picture_ptr
pointer to the previous picture.
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (h263)
const uint8_t ff_alternate_vertical_scan[64]
static void release_unused_pictures(AVCodecContext *avctx, Picture *picture)
uint32_t * map
map to avoid duplicate evaluations
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
H264ChromaContext h264chroma
int16_t(* blocks)[12][64]
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
int slices
Number of slices.
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
void ff_mpv_frame_end(MpegEncContext *s)
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
uint8_t * obmc_scratchpad
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
int16_t(* block)[64]
points to one of the following blocks
ParseContext parse_context
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Picture next_picture
copy of the next picture structure.
AVBufferRef * mc_mb_var_buf
int key_frame
1 -> keyframe, 0-> not
#define CONFIG_WMV2_DECODER
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
static int init_er(MpegEncContext *s)
int chroma_qscale
chroma QP
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int frame_number
Frame counter, set by libavcodec.
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
uint32_t * mb_type
types and macros are defined in mpegutils.h
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
ScanTable inter_scantable
if inter == intra then intra should be used to reduce tha cache usage
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
int debug_mv
debug Code outside libavcodec should access this field using AVOptions
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
#define FF_DEBUG_VIS_MV_P_FOR
int16_t(* b_direct_mv_table_base)[2]
int b_code
backward MV resolution for B Frames (mpeg4)
int64_t mb_var_sum
sum of MB variance for current frame
void ff_mpv_report_decode_progress(MpegEncContext *s)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
AVBufferRef * ref_index_buf[2]