54 const uint8_t *val_table,
int nb_codes,
55 int use_static,
int is_ac)
58 uint16_t huff_code[256];
59 uint16_t huff_sym[256];
66 for (i = 0; i < 256; i++)
67 huff_sym[i] = i + 16 * is_ac;
70 huff_sym[0] = 16 * 256;
73 huff_code, 2, 2, huff_sym, 2, 2, use_static);
103 ht[i].
bits, ht[i].values, ht[i].codes,
104 0, ht[i].class == 1);
108 if (ht[i].
class < 2) {
112 ht[i].values, ht[i].length);
122 if (len > 14 && buf[12] == 1)
124 if (len > 14 && buf[12] == 2)
174 "error using external huffman table, switching back to internal\n");
222 for (i = 0; i < 64; i++) {
235 len -= 1 + 64 * (1+pr);
243 int len,
index,
i,
class, n, v, code_max;
265 for (i = 1; i <= 16; i++) {
270 if (len < n || n > 256)
274 for (i = 0; i < n; i++) {
285 class, index, code_max + 1);
286 if ((ret =
build_vlc(&s->
vlcs[
class][index], bits_table, val_table,
287 code_max + 1, 0,
class > 0)) < 0)
292 if ((ret =
build_vlc(&s->
vlcs[2][index], bits_table, val_table,
293 code_max + 1, 0, 0)) < 0)
297 for (i = 0; i < 16; i++)
299 for (i = 0; i < 256; i++)
319 if (bits > 16 || bits < 1) {
349 if (s->
buf_size && (width + 7) / 8 * ((height + 7) / 8) > s->
buf_size * 4LL)
353 if (nb_components <= 0 ||
359 "nb_components changing in interlaced picture\n");
363 if (s->
ls && !(bits <= 8 || nb_components == 1)) {
365 "JPEG-LS that is not <= 8 " 366 "bits/component or 16-bit gray");
369 if (len != 8 + 3 * nb_components) {
377 for (i = 0; i < nb_components; i++) {
383 if (h_count[i] > s->
h_max)
385 if (v_count[i] > s->
v_max)
392 if (!h_count[i] || !v_count[i]) {
394 "Invalid sampling factor in component %d %d:%d\n",
395 i, h_count[i], v_count[i]);
400 i, h_count[i], v_count[i],
403 if ( nb_components == 4
416 if (nb_components == 2) {
430 memcmp(s->
h_count, h_count,
sizeof(h_count)) ||
431 memcmp(s->
v_count, v_count,
sizeof(v_count))) {
437 memcpy(s->
h_count, h_count,
sizeof(h_count));
438 memcpy(s->
v_count, v_count,
sizeof(v_count));
469 if (s->
v_max == 1 && s->
h_max == 1 && s->
lossless==1 && (nb_components==3 || nb_components==4))
474 pix_fmt_id = ((unsigned)s->
h_count[0] << 28) | (s->
v_count[0] << 24) |
481 if (!(pix_fmt_id & 0xD0D0D0D0))
482 pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
483 if (!(pix_fmt_id & 0x0D0D0D0D))
484 pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
486 for (i = 0; i < 8; i++) {
487 int j = 6 + (i&1) - (i&6);
488 int is = (pix_fmt_id >> (4*
i)) & 0xF;
489 int js = (pix_fmt_id >> (4*j)) & 0xF;
491 if (is == 1 && js != 2 && (i < 2 || i > 5))
492 js = (pix_fmt_id >> ( 8 + 4*(i&1))) & 0xF;
493 if (is == 1 && js != 2 && (i < 2 || i > 5))
494 js = (pix_fmt_id >> (16 + 4*(i&1))) & 0xF;
496 if (is == 1 && js == 2) {
502 switch (pix_fmt_id) {
591 if (pix_fmt_id == 0x14111100)
632 if (pix_fmt_id == 0x42111100) {
636 }
else if (pix_fmt_id == 0x24111100) {
640 }
else if (pix_fmt_id == 0x23111100) {
677 else if (s->
bits <= 8)
693 #if CONFIG_MJPEG_NVDEC_HWACCEL 696 #if CONFIG_MJPEG_VAAPI_HWACCEL 724 for (i = 0; i < 4; i++)
743 int bw = (width + s->
h_max * 8 - 1) / (s->
h_max * 8);
744 int bh = (height + s->
v_max * 8 - 1) / (s->
v_max * 8);
777 if (code < 0 || code > 16) {
779 "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
780 0, dc_index, &s->
vlcs[0][dc_index]);
792 int dc_index,
int ac_index, uint16_t *quant_matrix)
798 if (val == 0xfffff) {
802 val = val * (unsigned)quant_matrix[0] + s->
last_dc[component];
803 val = av_clip_int16(val);
813 i += ((unsigned)code) >> 4;
821 int sign = (~cache) >> 31;
822 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
832 block[j] = level * quant_matrix[
i];
841 int component,
int dc_index,
842 uint16_t *quant_matrix,
int Al)
847 if (val == 0xfffff) {
851 val = (val * (quant_matrix[0] << Al)) + s->
last_dc[component];
859 uint8_t *last_nnz,
int ac_index,
860 uint16_t *quant_matrix,
861 int ss,
int se,
int Al,
int *EOBRUN)
873 for (i = ss; ; i++) {
877 run = ((unsigned) code) >> 4;
886 int sign = (~cache) >> 31;
887 level = (
NEG_USR32(sign ^ cache,code) ^ sign) - sign;
895 block[j] = level * (quant_matrix[
se] << Al);
902 block[j] = level * (quant_matrix[
i] << Al);
931 #define REFINE_BIT(j) { \ 932 UPDATE_CACHE(re, &s->gb); \ 933 sign = block[j] >> 15; \ 934 block[j] += SHOW_UBITS(re, &s->gb, 1) * \ 935 ((quant_matrix[i] ^ sign) - sign) << Al; \ 936 LAST_SKIP_BITS(re, &s->gb, 1); \ 944 av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \ 949 j = s->scantable.permutated[i]; \ 952 else if (run-- == 0) \ 959 int ac_index, uint16_t *quant_matrix,
960 int ss,
int se,
int Al,
int *EOBRUN)
963 int last =
FFMIN(se, *last_nnz);
974 run = ((unsigned) code) >> 4;
981 block[j] = ((quant_matrix[
i] << Al) ^ val) -
val;
989 run = ((unsigned) code) >> 4;
1010 for (; i <= last; i++) {
1031 for (i = 0; i < nb_components; i++)
1045 for (i = 0; i < nb_components; i++)
1062 int left[4], top[4], topleft[4];
1063 const int linesize = s->
linesize[0];
1064 const int mask = ((1 << s->
bits) - 1) << point_transform;
1065 int resync_mb_y = 0;
1066 int resync_mb_x = 0;
1085 width = s->
mb_width / nb_components;
1095 for (i = 0; i < 4; i++)
1098 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1102 ptr += linesize >> 1;
1104 for (i = 0; i < 4; i++)
1105 top[i] = left[i] = topleft[i] =
buffer[0][i];
1108 for (i = 0; i < 6; i++)
1109 vpred[i] = 1 << (s->
bits-1);
1112 for (mb_x = 0; mb_x <
width; mb_x++) {
1113 int modified_predictor = predictor;
1125 top[i] = left[i]= topleft[i]= 1 << (s->
bits - 1);
1127 if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1128 modified_predictor = 1;
1130 for (i=0;i<nb_components;i++) {
1133 topleft[
i] = top[
i];
1140 if (!s->
bayer || mb_x) {
1144 pred = vpred[
i] -
dc;
1147 PREDICT(pred, topleft[i], top[i], pred, modified_predictor);
1150 mask & (pred + (unsigned)(dc * (1 << point_transform)));
1159 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1160 ptr[4*mb_x + 2] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1161 ptr[4*mb_x + 1] =
buffer[mb_x][1] + ptr[4*mb_x + 2];
1162 ptr[4*mb_x + 3] =
buffer[mb_x][2] + ptr[4*mb_x + 2];
1163 ptr[4*mb_x + 0] =
buffer[mb_x][3];
1166 for(i=0; i<nb_components; i++) {
1169 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1172 }
else if(s->
bits == 9) {
1175 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1176 ((uint16_t*)ptr)[4*mb_x+
c] =
buffer[mb_x][
i];
1180 }
else if (s->
rct) {
1181 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1182 ptr[3*mb_x + 1] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1183 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1184 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1187 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1189 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1190 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1192 }
else if (s->
bayer) {
1193 if (nb_components == 1) {
1195 for (mb_x = 0; mb_x <
width; mb_x++)
1196 ((uint16_t*)ptr)[mb_x] =
buffer[mb_x][0];
1197 }
else if (nb_components == 2) {
1198 for (mb_x = 0; mb_x <
width; mb_x++) {
1199 ((uint16_t*)ptr)[2*mb_x + 0] =
buffer[mb_x][0];
1200 ((uint16_t*)ptr)[2*mb_x + 1] =
buffer[mb_x][1];
1204 for(i=0; i<nb_components; i++) {
1207 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1210 }
else if(s->
bits == 9) {
1213 for(mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1214 ((uint16_t*)ptr)[3*mb_x+2-
c] =
buffer[mb_x][
i];
1224 int point_transform,
int nb_components)
1226 int i, mb_x, mb_y,
mask;
1228 int resync_mb_y = 0;
1229 int resync_mb_x = 0;
1231 point_transform += bits - s->
bits;
1232 mask = ((1 << s->
bits) - 1) << point_transform;
1234 av_assert0(nb_components>=1 && nb_components<=4);
1236 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1237 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1248 if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->
interlaced){
1249 int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1250 int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1251 for (i = 0; i < nb_components; i++) {
1254 int n,
h, v, x, y,
c, j, linesize;
1263 if(bits>8) linesize /= 2;
1265 for(j=0; j<n; j++) {
1271 if ( h * mb_x + x >= s->
width 1272 || v * mb_y + y >= s->
height) {
1274 }
else if (bits<=8) {
1275 ptr = s->
picture_ptr->
data[
c] + (linesize * (v * mb_y + y)) + (h * mb_x + x);
1277 if(x==0 && leftcol){
1278 pred= 1 << (bits - 1);
1283 if(x==0 && leftcol){
1284 pred= ptr[-linesize];
1286 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1291 ptr += linesize >> 1;
1293 *ptr= pred + ((unsigned)dc << point_transform);
1295 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1297 if(x==0 && leftcol){
1298 pred= 1 << (bits - 1);
1303 if(x==0 && leftcol){
1304 pred= ptr16[-linesize];
1306 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1311 ptr16 += linesize >> 1;
1313 *ptr16= pred + ((unsigned)dc << point_transform);
1322 for (i = 0; i < nb_components; i++) {
1325 int n,
h, v, x, y,
c, j, linesize,
dc;
1334 if(bits>8) linesize /= 2;
1336 for (j = 0; j < n; j++) {
1342 if ( h * mb_x + x >= s->
width 1343 || v * mb_y + y >= s->
height) {
1345 }
else if (bits<=8) {
1347 (linesize * (v * mb_y + y)) +
1349 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1352 *ptr = pred + ((unsigned)dc << point_transform);
1354 ptr16 = (uint16_t*)(s->
picture_ptr->
data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x));
1355 PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1358 *ptr16= pred + ((unsigned)dc << point_transform);
1379 int linesize,
int lowres)
1384 case 1:
copy_block4(dst, src, linesize, linesize, 4);
1386 case 2:
copy_block2(dst, src, linesize, linesize, 2);
1388 case 3: *dst = *
src;
1395 int block_x, block_y;
1398 for (block_y=0; block_y<
size; block_y++)
1399 for (block_x=0; block_x<
size; block_x++)
1400 *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->
bits;
1402 for (block_y=0; block_y<
size; block_y++)
1403 for (block_x=0; block_x<
size; block_x++)
1404 *(ptr + block_x + block_y*linesize) <<= 8 - s->
bits;
1409 int Al,
const uint8_t *mb_bitmask,
1410 int mb_bitmask_size,
1413 int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1418 int bytes_per_pixel = 1 + (s->
bits > 8);
1435 for (i = 0; i < nb_components; i++) {
1438 reference_data[
c] = reference ? reference->
data[
c] :
NULL;
1443 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1444 for (mb_x = 0; mb_x < s->
mb_width; mb_x++) {
1455 for (i = 0; i < nb_components; i++) {
1457 int n,
h, v, x, y,
c, j;
1465 for (j = 0; j < n; j++) {
1466 block_offset = (((linesize[
c] * (v * mb_y + y) * 8) +
1467 (h * mb_x + x) * 8 * bytes_per_pixel) >> s->
avctx->
lowres);
1470 block_offset += linesize[
c] >> 1;
1471 if ( 8*(h * mb_x + x) < ((c == 1) || (c == 2) ? chroma_width : s->
width)
1472 && 8*(v * mb_y + y) < ((c == 1) || (c == 2) ? chroma_height : s->
height)) {
1473 ptr = data[
c] + block_offset;
1488 "error y=%d x=%d\n", mb_y, mb_x);
1508 "error y=%d x=%d\n", mb_y, mb_x);
1512 ff_dlog(s->
avctx,
"mb: %d %d processed\n", mb_y, mb_x);
1515 (v * mb_y + y) * 8, (h * mb_x + x) * 8);
1530 int se,
int Ah,
int Al)
1538 if (se < ss || se > 63) {
1549 for (mb_y = 0; mb_y < s->
mb_height; mb_y++) {
1557 for (mb_x = 0; mb_x < s->
mb_width; mb_x++,
block++, last_nnz++) {
1564 quant_matrix, ss, se, Al, &EOBRUN);
1567 quant_matrix, ss, se, Al, &EOBRUN);
1570 "error y=%d x=%d\n", mb_y, mb_x);
1585 const int bytes_per_pixel = 1 + (s->
bits > 8);
1586 const int block_size = s->
lossless ? 1 : 8;
1593 int mb_width = (s->
width + h * block_size - 1) / (h * block_size);
1594 int mb_height = (s->
height + v * block_size - 1) / (v * block_size);
1600 data += linesize >> 1;
1602 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1606 for (mb_x = 0; mb_x < mb_width; mb_x++,
block++) {
1617 int mb_bitmask_size,
const AVFrame *reference)
1619 int len, nb_components,
i,
h, v, predictor, point_transform;
1621 const int block_size = s->
lossless ? 1 : 8;
1622 int ilv, prev_shift;
1626 "Can not process SOS before SOF, skipping\n");
1644 "decode_sos: nb_components (%d)",
1648 if (len != 6 + 2 * nb_components) {
1652 for (i = 0; i < nb_components; i++) {
1661 "decode_sos: index(%d) out of components\n", index);
1675 index = (index+2)%3;
1695 prev_shift = point_transform = 0;
1697 if (nb_components > 1) {
1701 }
else if (!s->
ls) {
1704 s->
mb_width = (s->
width + h * block_size - 1) / (h * block_size);
1713 s->
lossless ?
"lossless" :
"sequential DCT", s->
rgb ?
"RGB" :
"",
1723 for (i = 0; i < nb_components; i++)
1744 point_transform, ilv)) < 0)
1753 nb_components)) < 0)
1762 point_transform)) < 0)
1766 prev_shift, point_transform,
1767 mb_bitmask, mb_bitmask_size, reference)) < 0)
1848 int t_w, t_h, v1, v2;
1866 "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1877 if (len -10 - (t_w * t_h * 3) > 0)
1878 len -= t_w * t_h * 3;
1905 "Pegasus lossless jpeg header found\n");
1935 if (
id ==
AV_RL32(
"colr") && len > 0) {
1942 if (
id ==
AV_RL32(
"xfrm") && len > 0) {
1971 }
else if (type == 1) {
1983 if (!(flags & 0x04)) {
1993 int ret, le, ifd_offset, bytes_read;
2049 unsigned nummarkers;
2069 if (nummarkers == 0) {
2072 }
else if (s->
iccnum != 0 && nummarkers != s->
iccnum) {
2075 }
else if (seqno > nummarkers) {
2116 "mjpeg: error, decode_app parser read over the end\n");
2132 for (i = 0; i < len - 2; i++)
2134 if (i > 0 && cbuf[i - 1] ==
'\n')
2143 if (!strncmp(cbuf,
"AVID", 4)) {
2145 }
else if (!strcmp(cbuf,
"CS=ITU601"))
2147 else if ((!strncmp(cbuf,
"Intel(R) JPEG Library, version 1", 32) && s->
avctx->
codec_tag) ||
2148 (!strncmp(cbuf,
"Metasoft MJPEG Codec", 20)))
2150 else if (!strcmp(cbuf,
"MULTISCOPE II")) {
2170 buf_ptr = *pbuf_ptr;
2171 while (buf_end - buf_ptr > 1) {
2174 if ((v == 0xff) && (v2 >=
SOF0) && (v2 <=
COM) && buf_ptr < buf_end) {
2183 ff_dlog(
NULL,
"find_marker skipped %d bytes\n", skipped);
2184 *pbuf_ptr = buf_ptr;
2190 const uint8_t **unescaped_buf_ptr,
2191 int *unescaped_buf_size)
2201 if (start_code ==
SOS && !s->
ls) {
2206 #define copy_data_segment(skip) do { \ 2207 ptrdiff_t length = (ptr - src) - (skip); \ 2209 memcpy(dst, src, length); \ 2219 while (ptr < buf_end) {
2224 while (ptr < buf_end && x == 0xff) {
2239 if (x < RST0 || x >
RST7) {
2249 #undef copy_data_segment 2251 *unescaped_buf_ptr = s->
buffer;
2252 *unescaped_buf_size = dst - s->
buffer;
2253 memset(s->
buffer + *unescaped_buf_size, 0,
2257 (buf_end - *buf_ptr) - (dst - s->
buffer));
2258 }
else if (start_code ==
SOS && s->
ls) {
2266 while (src + t < buf_end) {
2269 while ((src + t < buf_end) && x == 0xff)
2284 if (x == 0xFF &&
b < t) {
2296 *unescaped_buf_ptr = dst;
2297 *unescaped_buf_size = (bit_count + 7) >> 3;
2298 memset(s->
buffer + *unescaped_buf_size, 0,
2301 *unescaped_buf_ptr = *buf_ptr;
2302 *unescaped_buf_size = buf_end - *buf_ptr;
2313 for (i = 0; i < s->
iccnum; i++)
2327 int buf_size = avpkt->
size;
2329 const uint8_t *buf_end, *buf_ptr;
2330 const uint8_t *unescaped_buf_ptr;
2332 int unescaped_buf_size;
2348 buf_end = buf + buf_size;
2349 while (buf_ptr < buf_end) {
2353 &unescaped_buf_size);
2355 if (start_code < 0) {
2357 }
else if (unescaped_buf_size > INT_MAX / 8) {
2359 "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2360 start_code, unescaped_buf_size, buf_size);
2364 start_code, buf_end - buf_ptr);
2378 if (start_code >=
RST0 && start_code <=
RST7) {
2380 "restart marker: %d\n", start_code & 0x0f);
2382 }
else if (start_code >=
APP0 && start_code <=
APP15) {
2387 }
else if (start_code ==
COM) {
2391 }
else if (start_code ==
DQT) {
2400 (start_code ==
SOF48 || start_code ==
LSE)) {
2406 switch(start_code) {
2421 switch (start_code) {
2437 if (start_code ==
SOF0)
2486 "Found EOI before any SOF, ignoring\n");
2497 goto the_end_no_picture;
2515 int qpw = (s->
width + 15) / 16;
2518 memset(qp_table_buf->
data, qp, qpw);
2556 "mjpeg: unsupported coding type (%x)\n", start_code);
2564 "marker parser used %d bytes (%d bits)\n",
2613 for (i = 0; i <
h; i++) {
2615 if (is16bit) ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 2];
2616 else line[w - 1] = line[(w - 1) / 2];
2617 for (index = w - 2; index > 0; index--) {
2619 ((uint16_t*)line)[
index] = (((uint16_t*)line)[index / 2] + ((uint16_t*)line)[(index + 1) / 2]) >> 1;
2621 line[
index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
2625 ((uint16_t*)line)[w - 1] = ((uint16_t*)line)[(w - 1) / 3];
2627 ((uint16_t*)line)[w - 2] = ((uint16_t*)line)[w - 1];
2629 line[w - 1] = line[(w - 1) / 3];
2631 line[w - 2] = line[w - 1];
2633 for (index = w - 3; index > 0; index--) {
2634 line[
index] = (line[index / 3] + line[(index + 1) / 3] + line[(index + 2) / 3] + 1) / 3;
2673 for (i = h - 1;
i; i--) {
2676 if (s->
upscale_v[p] != 2 && (src1 == src2 || i == h - 1)) {
2677 memcpy(dst, src1, w);
2679 for (index = 0; index <
w; index++)
2680 dst[index] = (src1[index] + src2[index]) >> 1;
2697 if(index && index<3){
2703 for (i=0; i<h/2; i++) {
2705 FFSWAP(
int, dst[j], dst2[j]);
2716 for (i=0; i<
h; i++) {
2719 for (index=0; index<4; index++) {
2723 for (j=0; j<
w; j++) {
2725 int r = dst[0][j] * k;
2726 int g = dst[1][j] * k;
2727 int b = dst[2][j] * k;
2728 dst[0][j] = g*257 >> 16;
2729 dst[1][j] = b*257 >> 16;
2730 dst[2][j] = r*257 >> 16;
2739 for (i=0; i<
h; i++) {
2742 for (index=0; index<4; index++) {
2746 for (j=0; j<
w; j++) {
2748 int r = (255 - dst[0][j]) * k;
2749 int g = (128 - dst[1][j]) * k;
2750 int b = (128 - dst[2][j]) * k;
2751 dst[0][j] = r*257 >> 16;
2752 dst[1][j] = (g*257 >> 16) + 128;
2753 dst[2][j] = (b*257 >> 16) + 128;
2775 for (i = 0; i < s->
iccnum; i++)
2785 for (i = 0; i < s->
iccnum; i++) {
2798 return buf_ptr - buf;
2821 for (i = 0; i < 3; i++) {
2822 for (j = 0; j < 4; j++)
2844 #if CONFIG_MJPEG_DECODER 2845 #define OFFSET(x) offsetof(MJpegDecodeContext, x) 2846 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM 2848 {
"extern_huff",
"Use external huffman table.",
2853 static const AVClass mjpegdec_class = {
2872 .priv_class = &mjpegdec_class,
2877 #if CONFIG_MJPEG_NVDEC_HWACCEL 2880 #if CONFIG_MJPEG_VAAPI_HWACCEL 2887 #if CONFIG_THP_DECODER int block_stride[MAX_COMPONENTS]
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
const struct AVCodec * codec
const AVPixFmtDescriptor * pix_desc
!< stereoscopic information (cached, since it is read before frame allocation)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int v_count[MAX_COMPONENTS]
#define se(name, range_min, range_max)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
static void flush(AVCodecContext *avctx)
enum AVPixelFormat hwaccel_sw_pix_fmt
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static av_cold int init(AVCodecContext *avctx)
#define AV_PIX_FMT_RGBA64
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
size_t raw_image_buffer_size
void(* clear_block)(int16_t *block)
#define avpriv_request_sample(...)
int h_scount[MAX_COMPONENTS]
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
static int mjpeg_decode_com(MJpegDecodeContext *s)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
TIFF constants & data structures.
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
#define HWACCEL_NVDEC(codec)
int qscale[4]
quantizer scale calculated from quant_matrixes
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
const char * av_default_item_name(void *ptr)
Return the context name.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define copy_data_segment(skip)
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
int dc_index[MAX_COMPONENTS]
int linesize[MAX_COMPONENTS]
linesize << interlaced
Views are next to each other.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
MJPEG encoder and decoder.
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
int comp_index[MAX_COMPONENTS]
static void reset_icc_profile(MJpegDecodeContext *s)
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
enum AVDiscard skip_frame
Skip decoding for selected frames.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
const uint8_t * raw_image_buffer
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int mjpeg_decode_dri(MJpegDecodeContext *s)
8 bits with AV_PIX_FMT_RGB32 palette
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
#define FF_DEBUG_PICT_INFO
uint16_t(* ljpeg_buffer)[4]
unsigned int ljpeg_buffer_size
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define FF_CODEC_PROPERTY_LOSSLESS
#define FF_PROFILE_MJPEG_JPEG_LS
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
uint8_t * last_nnz[MAX_COMPONENTS]
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
Structure to hold side data for an AVFrame.
int quant_sindex[MAX_COMPONENTS]
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
static int get_bits_count(const GetBitContext *s)
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
int h_count[MAX_COMPONENTS]
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Video is not stereoscopic (and metadata has to be there).
#define AV_PIX_FMT_YUVA420P16
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
#define PREDICT(ret, topleft, top, left, predictor)
static int aligned(int val)
static int get_bits_left(GetBitContext *gb)
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
AVDictionary * exif_metadata
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
#define UPDATE_CACHE(name, gb)
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int flags
Additional information about the frame packing.
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
static const uint16_t mask[17]
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
#define PTRDIFF_SPECIFIER
int nb_blocks[MAX_COMPONENTS]
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Views are packed per line, as if interlaced.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
#define AV_PIX_FMT_YUVA444P16
#define av_fourcc2str(fourcc)
int flags
AV_CODEC_FLAG_*.
simple assert() macros that are a bit more flexible than ISO C assert().
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
static const uint8_t offset[127][2]
#define CLOSE_READER(name, gb)
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
#define ss(width, name, subs,...)
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define AV_PIX_FMT_GRAY16
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
int component_id[MAX_COMPONENTS]
static int mjpeg_decode_app(MJpegDecodeContext *s)
uint8_t raw_huffman_lengths[2][4][16]
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define LAST_SKIP_BITS(name, gb, num)
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
int v_scount[MAX_COMPONENTS]
#define AV_EF_EXPLODE
abort decoding on minor error detection
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
uint8_t idct_permutation[64]
IDCT input permutation.
packed RGB 8:8:8, 24bpp, BGRBGR...
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
#define is(width, name, range_min, range_max, subs,...)
HW acceleration through CUDA.
#define SHOW_UBITS(name, gb, num)
#define FF_ARRAY_ELEMS(a)
the normal 2^n-1 "JPEG" YUV ranges
static const float pred[4]
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_PIX_FMT_YUV420P16
static av_always_inline int bytestream2_tell(GetByteContext *g)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
enum AVStereo3DType type
How views are packed within the video.
#define AV_LOG_INFO
Standard information.
Libavcodec external API header.
Views are on top of each other.
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
main external API structure.
uint8_t * data
The data buffer.
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
#define OPEN_READER(name, gb)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
const uint8_t avpriv_mjpeg_val_dc[12]
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
static unsigned int get_bits1(GetBitContext *s)
static void init_idct(AVCodecContext *avctx)
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
Describe the class of an AVClass context structure.
static void skip_bits(GetBitContext *s, int n)
static const AVProfile profiles[]
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
int ac_index[MAX_COMPONENTS]
enum AVColorSpace colorspace
YUV colorspace type.
Rational number (pair of numerator and denominator).
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define GET_CACHE(name, gb)
const uint8_t ff_zigzag_direct[64]
uint64_t coefs_finished[MAX_COMPONENTS]
bitmask of which coefs have been completely decoded (progressive mode)
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
#define CONFIG_JPEGLS_DECODER
enum AVPixelFormat hwaccel_pix_fmt
static enum AVPixelFormat pix_fmts[]
uint8_t raw_huffman_values[2][4][256]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
JPEG-LS extension parameters.
#define flags(name, subs,...)
size_t raw_scan_buffer_size
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
the normal 219*2^(n-8) "MPEG" YUV ranges
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
const AVProfile ff_mjpeg_profiles[]
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
A reference to a data buffer.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const OptionDef options[]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
planar GBRA 4:4:4:4 32bpp
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
unsigned properties
Properties of the stream that gets decoded.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
const uint8_t * raw_scan_buffer
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
#define FF_DEBUG_STARTCODE
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
int got_picture
we found a SOF and picture is valid, too.
#define HWACCEL_VAAPI(codec)
const uint8_t avpriv_mjpeg_val_ac_luminance[]
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
int16_t(*[MAX_COMPONENTS] blocks)[64]
intermediate sums (progressive mode)
void * hwaccel_picture_private
VLC_TYPE(* table)[2]
code, bits
Y , 16bpp, little-endian.
int key_frame
1 -> keyframe, 0-> not
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
int last_dc[MAX_COMPONENTS]
static const uint8_t * align_get_bits(GetBitContext *s)
static int init_default_huffman_tables(MJpegDecodeContext *s)
static void decode_flush(AVCodecContext *avctx)
int frame_number
Frame counter, set by libavcodec.
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVFieldOrder field_order
Field order.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
#define FFSWAP(type, a, b)
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
#define FF_QSCALE_TYPE_MPEG1
#define MKTAG(a, b, c, d)
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
This structure stores compressed data.
uint16_t quant_matrixes[4][64]
void ff_free_vlc(VLC *vlc)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_YUV422P16
int step
Number of elements between 2 horizontally consecutive pixels.
#define AV_CEIL_RSHIFT(a, b)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().