Go to the documentation of this file.
54 const uint8_t *val_table,
int nb_codes,
55 int use_static,
int is_ac)
58 uint16_t huff_code[256];
59 uint16_t huff_sym[256];
66 for (
i = 0;
i < 256;
i++)
67 huff_sym[
i] =
i + 16 * is_ac;
70 huff_sym[0] = 16 * 256;
73 huff_code, 2, 2, huff_sym, 2, 2, use_static);
103 ht[
i].bits, ht[
i].values, ht[
i].codes,
104 0, ht[
i].class == 1);
108 if (ht[
i].
class < 2) {
109 memcpy(
s->raw_huffman_lengths[ht[
i].class][ht[
i].index],
111 memcpy(
s->raw_huffman_values[ht[
i].class][ht[
i].index],
112 ht[
i].values, ht[
i].length);
122 if (
len > 14 && buf[12] == 1)
123 s->interlace_polarity = 1;
124 if (
len > 14 && buf[12] == 2)
125 s->interlace_polarity = 0;
144 if (!
s->picture_ptr) {
148 s->picture_ptr =
s->picture;
158 s->first_picture = 1;
168 if (
s->extern_huff) {
174 "error using external huffman table, switching back to internal\n");
179 s->interlace_polarity = 1;
183 s->interlace_polarity = 1;
222 for (
i = 0;
i < 64;
i++) {
224 if (
s->quant_matrixes[
index][
i] == 0) {
232 s->quant_matrixes[
index][8]) >> 1;
235 len -= 1 + 64 * (1+pr);
243 int len,
index,
i,
class, n, v, code_max;
265 for (
i = 1;
i <= 16;
i++) {
270 if (len < n || n > 256)
274 for (
i = 0;
i < n;
i++) {
285 class,
index, code_max + 1);
287 code_max + 1, 0,
class > 0)) < 0)
293 code_max + 1, 0, 0)) < 0)
297 for (
i = 0;
i < 16;
i++)
298 s->raw_huffman_lengths[
class][
index][
i] = bits_table[
i + 1];
300 s->raw_huffman_values[
class][
index][
i] = val_table[
i];
313 memset(
s->upscale_h, 0,
sizeof(
s->upscale_h));
314 memset(
s->upscale_v, 0,
sizeof(
s->upscale_v));
324 if (
s->avctx->bits_per_raw_sample !=
bits) {
326 s->avctx->bits_per_raw_sample =
bits;
331 if (
bits == 9 && !
s->pegasus_rct)
334 if(
s->lossless &&
s->avctx->lowres){
343 if (
s->interlaced &&
s->width ==
width &&
s->height ==
height + 1)
349 if (
s->buf_size && (
width + 7) / 8 * ((
height + 7) / 8) >
s->buf_size * 4LL)
353 if (nb_components <= 0 ||
356 if (
s->interlaced && (
s->bottom_field == !
s->interlace_polarity)) {
357 if (nb_components !=
s->nb_components) {
359 "nb_components changing in interlaced picture\n");
363 if (
s->ls && !(
bits <= 8 || nb_components == 1)) {
365 "JPEG-LS that is not <= 8 "
366 "bits/component or 16-bit gray");
369 if (
len != 8 + 3 * nb_components) {
370 av_log(
s->avctx,
AV_LOG_ERROR,
"decode_sof0: error, len(%d) mismatch %d components\n",
len, nb_components);
374 s->nb_components = nb_components;
377 for (
i = 0;
i < nb_components;
i++) {
383 if (h_count[
i] >
s->h_max)
384 s->h_max = h_count[
i];
385 if (v_count[
i] >
s->v_max)
386 s->v_max = v_count[
i];
388 if (
s->quant_index[
i] >= 4) {
392 if (!h_count[
i] || !v_count[
i]) {
394 "Invalid sampling factor in component %d %d:%d\n",
395 i, h_count[
i], v_count[
i]);
400 i, h_count[
i], v_count[
i],
401 s->component_id[
i],
s->quant_index[
i]);
403 if ( nb_components == 4
404 &&
s->component_id[0] ==
'C' - 1
405 &&
s->component_id[1] ==
'M' - 1
406 &&
s->component_id[2] ==
'Y' - 1
407 &&
s->component_id[3] ==
'K' - 1)
408 s->adobe_transform = 0;
410 if (
s->ls && (
s->h_max > 1 ||
s->v_max > 1)) {
416 if (nb_components == 2) {
430 memcmp(
s->h_count, h_count,
sizeof(h_count)) ||
431 memcmp(
s->v_count, v_count,
sizeof(v_count))) {
437 memcpy(
s->h_count, h_count,
sizeof(h_count));
438 memcpy(
s->v_count, v_count,
sizeof(v_count));
443 if (
s->first_picture &&
444 (
s->multiscope != 2 ||
s->avctx->time_base.den >= 25 *
s->avctx->time_base.num) &&
445 s->org_height != 0 &&
446 s->height < ((
s->org_height * 3) / 4)) {
448 s->bottom_field =
s->interlace_polarity;
449 s->picture_ptr->interlaced_frame = 1;
450 s->picture_ptr->top_field_first = !
s->interlace_polarity;
458 s->first_picture = 0;
463 if (
s->got_picture &&
s->interlaced && (
s->bottom_field == !
s->interlace_polarity)) {
464 if (
s->progressive) {
469 if (
s->v_max == 1 &&
s->h_max == 1 &&
s->lossless==1 && (nb_components==3 || nb_components==4))
471 else if (!
s->lossless)
474 pix_fmt_id = ((unsigned)
s->h_count[0] << 28) | (
s->v_count[0] << 24) |
475 (
s->h_count[1] << 20) | (
s->v_count[1] << 16) |
476 (
s->h_count[2] << 12) | (
s->v_count[2] << 8) |
477 (
s->h_count[3] << 4) |
s->v_count[3];
481 if (!(pix_fmt_id & 0xD0D0D0D0))
482 pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
483 if (!(pix_fmt_id & 0x0D0D0D0D))
484 pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
486 for (
i = 0;
i < 8;
i++) {
487 int j = 6 + (
i&1) - (
i&6);
488 int is = (pix_fmt_id >> (4*
i)) & 0xF;
489 int js = (pix_fmt_id >> (4*j)) & 0xF;
491 if (
is == 1 && js != 2 && (i < 2 || i > 5))
492 js = (pix_fmt_id >> ( 8 + 4*(
i&1))) & 0xF;
493 if (
is == 1 && js != 2 && (i < 2 || i > 5))
494 js = (pix_fmt_id >> (16 + 4*(
i&1))) & 0xF;
496 if (
is == 1 && js == 2) {
497 if (
i & 1)
s->upscale_h[j/2] = 1;
498 else s->upscale_v[j/2] = 1;
502 switch (pix_fmt_id) {
512 if (
s->adobe_transform == 0
513 ||
s->component_id[0] ==
'R' - 1 &&
s->component_id[1] ==
'G' - 1 &&
s->component_id[2] ==
'B' - 1) {
527 if (
s->adobe_transform == 0 &&
s->bits <= 8) {
538 if (
s->adobe_transform == 0 &&
s->bits <= 8) {
540 s->upscale_v[1] =
s->upscale_v[2] = 1;
541 s->upscale_h[1] =
s->upscale_h[2] = 1;
542 }
else if (
s->adobe_transform == 2 &&
s->bits <= 8) {
544 s->upscale_v[1] =
s->upscale_v[2] = 1;
545 s->upscale_h[1] =
s->upscale_h[2] = 1;
585 if (
s->component_id[0] ==
'Q' &&
s->component_id[1] ==
'F' &&
s->component_id[2] ==
'A') {
589 s->upscale_v[0] =
s->upscale_v[1] = 1;
591 if (pix_fmt_id == 0x14111100)
592 s->upscale_v[1] =
s->upscale_v[2] = 1;
600 if (
s->component_id[0] ==
'Q' &&
s->component_id[1] ==
'F' &&
s->component_id[2] ==
'A') {
604 s->upscale_h[0] =
s->upscale_h[1] = 1;
616 s->upscale_h[1] =
s->upscale_h[2] = 2;
632 if (pix_fmt_id == 0x42111100) {
635 s->upscale_h[1] =
s->upscale_h[2] = 1;
636 }
else if (pix_fmt_id == 0x24111100) {
639 s->upscale_v[1] =
s->upscale_v[2] = 1;
640 }
else if (pix_fmt_id == 0x23111100) {
643 s->upscale_v[1] =
s->upscale_v[2] = 2;
655 memset(
s->upscale_h, 0,
sizeof(
s->upscale_h));
656 memset(
s->upscale_v, 0,
sizeof(
s->upscale_v));
668 memset(
s->upscale_h, 0,
sizeof(
s->upscale_h));
669 memset(
s->upscale_v, 0,
sizeof(
s->upscale_v));
670 if (
s->nb_components == 3) {
672 }
else if (
s->nb_components != 1) {
675 }
else if (
s->palette_index &&
s->bits <= 8)
677 else if (
s->bits <= 8)
689 if (
s->avctx->pix_fmt ==
s->hwaccel_sw_pix_fmt && !size_change) {
690 s->avctx->pix_fmt =
s->hwaccel_pix_fmt;
693 #if CONFIG_MJPEG_NVDEC_HWACCEL
696 #if CONFIG_MJPEG_VAAPI_HWACCEL
703 if (
s->hwaccel_pix_fmt < 0)
706 s->hwaccel_sw_pix_fmt =
s->avctx->pix_fmt;
707 s->avctx->pix_fmt =
s->hwaccel_pix_fmt;
712 s->picture_ptr->key_frame = 1;
721 s->picture_ptr->key_frame = 1;
724 for (
i = 0;
i < 4;
i++)
725 s->linesize[
i] =
s->picture_ptr->linesize[
i] <<
s->interlaced;
727 ff_dlog(
s->avctx,
"%d %d %d %d %d %d\n",
728 s->width,
s->height,
s->linesize[0],
s->linesize[1],
729 s->interlaced,
s->avctx->height);
733 if ((
s->rgb && !
s->lossless && !
s->ls) ||
734 (!
s->rgb &&
s->ls &&
s->nb_components > 1) ||
742 if (
s->progressive) {
743 int bw = (
width +
s->h_max * 8 - 1) / (
s->h_max * 8);
744 int bh = (
height +
s->v_max * 8 - 1) / (
s->v_max * 8);
745 for (
i = 0;
i <
s->nb_components;
i++) {
746 int size = bw * bh *
s->h_count[
i] *
s->v_count[
i];
751 if (!
s->blocks[
i] || !
s->last_nnz[
i])
753 s->block_stride[
i] = bw *
s->h_count[
i];
755 memset(
s->coefs_finished, 0,
sizeof(
s->coefs_finished));
758 if (
s->avctx->hwaccel) {
759 s->hwaccel_picture_private =
760 av_mallocz(
s->avctx->hwaccel->frame_priv_data_size);
761 if (!
s->hwaccel_picture_private)
764 ret =
s->avctx->hwaccel->start_frame(
s->avctx,
s->raw_image_buffer,
765 s->raw_image_buffer_size);
777 if (code < 0 || code > 16) {
779 "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
780 0, dc_index, &
s->vlcs[0][dc_index]);
792 int dc_index,
int ac_index, uint16_t *quant_matrix)
798 if (
val == 0xfffff) {
802 val =
val * (unsigned)quant_matrix[0] +
s->last_dc[component];
804 s->last_dc[component] =
val;
813 i += ((unsigned)
code) >> 4;
821 int sign = (~cache) >> 31;
831 j =
s->scantable.permutated[
i];
841 int component,
int dc_index,
842 uint16_t *quant_matrix,
int Al)
845 s->bdsp.clear_block(
block);
847 if (
val == 0xfffff) {
851 val = (
val * (quant_matrix[0] << Al)) +
s->last_dc[component];
852 s->last_dc[component] =
val;
859 uint8_t *last_nnz,
int ac_index,
860 uint16_t *quant_matrix,
861 int ss,
int se,
int Al,
int *EOBRUN)
873 for (
i =
ss; ;
i++) {
886 int sign = (~cache) >> 31;
894 j =
s->scantable.permutated[
se];
901 j =
s->scantable.permutated[
i];
931 #define REFINE_BIT(j) { \
932 UPDATE_CACHE(re, &s->gb); \
933 sign = block[j] >> 15; \
934 block[j] += SHOW_UBITS(re, &s->gb, 1) * \
935 ((quant_matrix[i] ^ sign) - sign) << Al; \
936 LAST_SKIP_BITS(re, &s->gb, 1); \
944 av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
949 j = s->scantable.permutated[i]; \
952 else if (run-- == 0) \
959 int ac_index, uint16_t *quant_matrix,
960 int ss,
int se,
int Al,
int *EOBRUN)
963 int last =
FFMIN(
se, *last_nnz);
979 j =
s->scantable.permutated[
i];
1010 for (;
i <= last;
i++) {
1011 j =
s->scantable.permutated[
i];
1027 if (
s->restart_interval) {
1031 for (
i = 0;
i < nb_components;
i++)
1032 s->last_dc[
i] = (4 <<
s->bits);
1037 if (
s->restart_count == 0) {
1045 for (
i = 0;
i < nb_components;
i++)
1046 s->last_dc[
i] = (4 <<
s->bits);
1062 int left[4], top[4], topleft[4];
1063 const int linesize =
s->linesize[0];
1064 const int mask = ((1 <<
s->bits) - 1) << point_transform;
1065 int resync_mb_y = 0;
1066 int resync_mb_x = 0;
1069 if (!
s->bayer &&
s->nb_components < 3)
1071 if (
s->bayer &&
s->nb_components > 2)
1073 if (
s->nb_components <= 0 ||
s->nb_components > 4)
1075 if (
s->v_max != 1 ||
s->h_max != 1 || !
s->lossless)
1079 s->restart_count =
s->restart_interval;
1081 if (
s->restart_interval == 0)
1082 s->restart_interval = INT_MAX;
1085 width =
s->mb_width / nb_components;
1090 if (!
s->ljpeg_buffer)
1095 for (
i = 0;
i < 4;
i++)
1098 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1099 uint8_t *ptr =
s->picture_ptr->data[0] + (linesize * mb_y);
1101 if (
s->interlaced &&
s->bottom_field)
1102 ptr += linesize >> 1;
1104 for (
i = 0;
i < 4;
i++)
1107 if ((mb_y *
s->width) %
s->restart_interval == 0) {
1108 for (
i = 0;
i < 6;
i++)
1109 vpred[
i] = 1 << (
s->bits-1);
1112 for (mb_x = 0; mb_x <
width; mb_x++) {
1113 int modified_predictor = predictor;
1120 if (
s->restart_interval && !
s->restart_count){
1121 s->restart_count =
s->restart_interval;
1125 top[
i] =
left[
i]= topleft[
i]= 1 << (
s->bits - 1);
1127 if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
1128 modified_predictor = 1;
1130 for (
i=0;
i<nb_components;
i++) {
1133 topleft[
i] = top[
i];
1140 if (!
s->bayer || mb_x) {
1150 mask & (
pred + (unsigned)(
dc * (1 << point_transform)));
1153 if (
s->restart_interval && !--
s->restart_count) {
1158 if (
s->rct &&
s->nb_components == 4) {
1159 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1160 ptr[4*mb_x + 2] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1161 ptr[4*mb_x + 1] =
buffer[mb_x][1] + ptr[4*mb_x + 2];
1162 ptr[4*mb_x + 3] =
buffer[mb_x][2] + ptr[4*mb_x + 2];
1163 ptr[4*mb_x + 0] =
buffer[mb_x][3];
1165 }
else if (
s->nb_components == 4) {
1166 for(
i=0;
i<nb_components;
i++) {
1167 int c=
s->comp_index[
i];
1169 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1172 }
else if(
s->bits == 9) {
1175 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1176 ((uint16_t*)ptr)[4*mb_x+
c] =
buffer[mb_x][
i];
1180 }
else if (
s->rct) {
1181 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1182 ptr[3*mb_x + 1] =
buffer[mb_x][0] - ((
buffer[mb_x][1] +
buffer[mb_x][2] - 0x200) >> 2);
1183 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1184 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1186 }
else if (
s->pegasus_rct) {
1187 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1189 ptr[3*mb_x + 0] =
buffer[mb_x][1] + ptr[3*mb_x + 1];
1190 ptr[3*mb_x + 2] =
buffer[mb_x][2] + ptr[3*mb_x + 1];
1192 }
else if (
s->bayer) {
1193 if (nb_components == 1) {
1195 for (mb_x = 0; mb_x <
width; mb_x++)
1196 ((uint16_t*)ptr)[mb_x] =
buffer[mb_x][0];
1197 }
else if (nb_components == 2) {
1198 for (mb_x = 0; mb_x <
width; mb_x++) {
1199 ((uint16_t*)ptr)[2*mb_x + 0] =
buffer[mb_x][0];
1200 ((uint16_t*)ptr)[2*mb_x + 1] =
buffer[mb_x][1];
1204 for(
i=0;
i<nb_components;
i++) {
1205 int c=
s->comp_index[
i];
1207 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1210 }
else if(
s->bits == 9) {
1213 for(mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1214 ((uint16_t*)ptr)[3*mb_x+2-
c] =
buffer[mb_x][
i];
1224 int point_transform,
int nb_components)
1226 int i, mb_x, mb_y,
mask;
1227 int bits= (
s->bits+7)&~7;
1228 int resync_mb_y = 0;
1229 int resync_mb_x = 0;
1231 point_transform +=
bits -
s->bits;
1232 mask = ((1 <<
s->bits) - 1) << point_transform;
1234 av_assert0(nb_components>=1 && nb_components<=4);
1236 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1237 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1242 if (
s->restart_interval && !
s->restart_count){
1243 s->restart_count =
s->restart_interval;
1248 if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->
interlaced){
1249 int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
1250 int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
1251 for (
i = 0;
i < nb_components;
i++) {
1254 int n,
h, v,
x, y,
c, j, linesize;
1255 n =
s->nb_blocks[
i];
1256 c =
s->comp_index[
i];
1261 linesize=
s->linesize[
c];
1263 if(
bits>8) linesize /= 2;
1265 for(j=0; j<n; j++) {
1271 if (
h * mb_x +
x >=
s->width
1272 || v * mb_y + y >=
s->height) {
1274 }
else if (
bits<=8) {
1275 ptr =
s->picture_ptr->data[
c] + (linesize * (v * mb_y + y)) + (
h * mb_x +
x);
1277 if(
x==0 && leftcol){
1283 if(
x==0 && leftcol){
1284 pred= ptr[-linesize];
1286 PREDICT(
pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1290 if (
s->interlaced &&
s->bottom_field)
1291 ptr += linesize >> 1;
1293 *ptr=
pred + ((unsigned)
dc << point_transform);
1295 ptr16 = (uint16_t*)(
s->picture_ptr->data[
c] + 2*(linesize * (v * mb_y + y)) + 2*(
h * mb_x +
x));
1297 if(
x==0 && leftcol){
1303 if(
x==0 && leftcol){
1304 pred= ptr16[-linesize];
1306 PREDICT(
pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1310 if (
s->interlaced &&
s->bottom_field)
1311 ptr16 += linesize >> 1;
1313 *ptr16=
pred + ((unsigned)
dc << point_transform);
1322 for (
i = 0;
i < nb_components;
i++) {
1325 int n,
h, v,
x, y,
c, j, linesize,
dc;
1326 n =
s->nb_blocks[
i];
1327 c =
s->comp_index[
i];
1332 linesize =
s->linesize[
c];
1334 if(
bits>8) linesize /= 2;
1336 for (j = 0; j < n; j++) {
1342 if (
h * mb_x +
x >=
s->width
1343 || v * mb_y + y >=
s->height) {
1345 }
else if (
bits<=8) {
1346 ptr =
s->picture_ptr->data[
c] +
1347 (linesize * (v * mb_y + y)) +
1349 PREDICT(
pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
1352 *ptr =
pred + ((unsigned)
dc << point_transform);
1354 ptr16 = (uint16_t*)(
s->picture_ptr->data[
c] + 2*(linesize * (v * mb_y + y)) + 2*(
h * mb_x +
x));
1355 PREDICT(
pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
1358 *ptr16=
pred + ((unsigned)
dc << point_transform);
1368 if (
s->restart_interval && !--
s->restart_count) {
1379 int linesize,
int lowres)
1382 case 0:
s->hdsp.put_pixels_tab[1][0](dst,
src, linesize, 8);
1388 case 3: *dst = *
src;
1395 int block_x, block_y;
1396 int size = 8 >>
s->avctx->lowres;
1398 for (block_y=0; block_y<
size; block_y++)
1399 for (block_x=0; block_x<
size; block_x++)
1400 *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 -
s->bits;
1402 for (block_y=0; block_y<
size; block_y++)
1403 for (block_x=0; block_x<
size; block_x++)
1404 *(ptr + block_x + block_y*linesize) <<= 8 -
s->bits;
1409 int Al,
const uint8_t *mb_bitmask,
1410 int mb_bitmask_size,
1413 int i, mb_x, mb_y, chroma_h_shift, chroma_v_shift, chroma_width, chroma_height;
1418 int bytes_per_pixel = 1 + (
s->bits > 8);
1421 if (mb_bitmask_size != (
s->mb_width *
s->mb_height + 7)>>3) {
1425 init_get_bits(&mb_bitmask_gb, mb_bitmask,
s->mb_width *
s->mb_height);
1428 s->restart_count = 0;
1435 for (
i = 0;
i < nb_components;
i++) {
1436 int c =
s->comp_index[
i];
1437 data[
c] =
s->picture_ptr->data[
c];
1438 reference_data[
c] = reference ? reference->
data[
c] :
NULL;
1439 linesize[
c] =
s->linesize[
c];
1440 s->coefs_finished[
c] |= 1;
1443 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1444 for (mb_x = 0; mb_x <
s->mb_width; mb_x++) {
1447 if (
s->restart_interval && !
s->restart_count)
1448 s->restart_count =
s->restart_interval;
1455 for (
i = 0;
i < nb_components;
i++) {
1457 int n,
h, v,
x, y,
c, j;
1459 n =
s->nb_blocks[
i];
1460 c =
s->comp_index[
i];
1465 for (j = 0; j < n; j++) {
1466 block_offset = (((linesize[
c] * (v * mb_y + y) * 8) +
1467 (
h * mb_x +
x) * 8 * bytes_per_pixel) >>
s->avctx->lowres);
1469 if (
s->interlaced &&
s->bottom_field)
1470 block_offset += linesize[
c] >> 1;
1471 if ( 8*(
h * mb_x +
x) < ((
c == 1) || (
c == 2) ? chroma_width :
s->width)
1472 && 8*(v * mb_y + y) < ((
c == 1) || (
c == 2) ? chroma_height :
s->height)) {
1473 ptr =
data[
c] + block_offset;
1476 if (!
s->progressive) {
1480 linesize[
c],
s->avctx->lowres);
1483 s->bdsp.clear_block(
s->block);
1485 s->dc_index[
i],
s->ac_index[
i],
1486 s->quant_matrixes[
s->quant_sindex[
i]]) < 0) {
1488 "error y=%d x=%d\n", mb_y, mb_x);
1492 s->idsp.idct_put(ptr, linesize[
c],
s->block);
1498 int block_idx =
s->block_stride[
c] * (v * mb_y + y) +
1500 int16_t *
block =
s->blocks[
c][block_idx];
1503 s->quant_matrixes[
s->quant_sindex[
i]][0] << Al;
1505 s->quant_matrixes[
s->quant_sindex[
i]],
1508 "error y=%d x=%d\n", mb_y, mb_x);
1512 ff_dlog(
s->avctx,
"mb: %d %d processed\n", mb_y, mb_x);
1513 ff_dlog(
s->avctx,
"%d %d %d %d %d %d %d %d \n",
1514 mb_x, mb_y,
x, y,
c,
s->bottom_field,
1515 (v * mb_y + y) * 8, (
h * mb_x +
x) * 8);
1530 int se,
int Ah,
int Al)
1534 int c =
s->comp_index[0];
1535 uint16_t *quant_matrix =
s->quant_matrixes[
s->quant_sindex[0]];
1538 if (se < ss || se > 63) {
1545 s->coefs_finished[
c] |= (2ULL <<
se) - (1ULL <<
ss);
1547 s->restart_count = 0;
1549 for (mb_y = 0; mb_y <
s->mb_height; mb_y++) {
1550 int block_idx = mb_y *
s->block_stride[
c];
1551 int16_t (*
block)[64] = &
s->blocks[
c][block_idx];
1552 uint8_t *last_nnz = &
s->last_nnz[
c][block_idx];
1554 av_log(
s->avctx,
AV_LOG_ERROR,
"bitstream truncated in mjpeg_decode_scan_progressive_ac\n");
1557 for (mb_x = 0; mb_x <
s->mb_width; mb_x++,
block++, last_nnz++) {
1559 if (
s->restart_interval && !
s->restart_count)
1560 s->restart_count =
s->restart_interval;
1564 quant_matrix,
ss,
se, Al, &EOBRUN);
1567 quant_matrix,
ss,
se, Al, &EOBRUN);
1570 "error y=%d x=%d\n", mb_y, mb_x);
1585 const int bytes_per_pixel = 1 + (
s->bits > 8);
1586 const int block_size =
s->lossless ? 1 : 8;
1588 for (
c = 0;
c <
s->nb_components;
c++) {
1590 int linesize =
s->linesize[
c];
1591 int h =
s->h_max /
s->h_count[
c];
1592 int v =
s->v_max /
s->v_count[
c];
1593 int mb_width = (
s->width +
h * block_size - 1) / (
h * block_size);
1594 int mb_height = (
s->height + v * block_size - 1) / (v * block_size);
1596 if (~
s->coefs_finished[
c])
1599 if (
s->interlaced &&
s->bottom_field)
1600 data += linesize >> 1;
1602 for (mb_y = 0; mb_y < mb_height; mb_y++) {
1603 uint8_t *ptr =
data + (mb_y * linesize * 8 >>
s->avctx->lowres);
1604 int block_idx = mb_y *
s->block_stride[
c];
1605 int16_t (*
block)[64] = &
s->blocks[
c][block_idx];
1606 for (mb_x = 0; mb_x < mb_width; mb_x++,
block++) {
1607 s->idsp.idct_put(ptr, linesize, *
block);
1610 ptr += bytes_per_pixel*8 >>
s->avctx->lowres;
1617 int mb_bitmask_size,
const AVFrame *reference)
1619 int len, nb_components,
i,
h, v, predictor, point_transform;
1621 const int block_size =
s->lossless ? 1 : 8;
1622 int ilv, prev_shift;
1624 if (!
s->got_picture) {
1626 "Can not process SOS before SOF, skipping\n");
1631 if (reference->
width !=
s->picture_ptr->width ||
1632 reference->
height !=
s->picture_ptr->height ||
1633 reference->
format !=
s->picture_ptr->format) {
1644 "decode_sos: nb_components (%d)",
1648 if (
len != 6 + 2 * nb_components) {
1652 for (
i = 0;
i < nb_components;
i++) {
1657 if (
id ==
s->component_id[
index])
1659 if (
index ==
s->nb_components) {
1661 "decode_sos: index(%d) out of components\n",
index);
1665 if (
s->avctx->codec_tag ==
MKTAG(
'M',
'T',
'S',
'J')
1666 && nb_components == 3 &&
s->nb_components == 3 &&
i)
1669 s->quant_sindex[
i] =
s->quant_index[
index];
1671 s->h_scount[
i] =
s->h_count[
index];
1672 s->v_scount[
i] =
s->v_count[
index];
1674 if((nb_components == 1 || nb_components == 3) &&
s->nb_components == 3 &&
s->avctx->pix_fmt ==
AV_PIX_FMT_GBR24P)
1682 if (
s->dc_index[
i] < 0 ||
s->ac_index[
i] < 0 ||
1683 s->dc_index[
i] >= 4 ||
s->ac_index[
i] >= 4)
1685 if (!
s->vlcs[0][
s->dc_index[
i]].table || !(
s->progressive ?
s->vlcs[2][
s->ac_index[0]].table :
s->vlcs[1][
s->ac_index[
i]].table))
1691 if(
s->avctx->codec_tag !=
AV_RL32(
"CJPG")){
1695 prev_shift = point_transform = 0;
1697 if (nb_components > 1) {
1699 s->mb_width = (
s->width +
s->h_max * block_size - 1) / (
s->h_max * block_size);
1700 s->mb_height = (
s->height +
s->v_max * block_size - 1) / (
s->v_max * block_size);
1701 }
else if (!
s->ls) {
1702 h =
s->h_max /
s->h_scount[0];
1703 v =
s->v_max /
s->v_scount[0];
1704 s->mb_width = (
s->width +
h * block_size - 1) / (
h * block_size);
1705 s->mb_height = (
s->height + v * block_size - 1) / (v * block_size);
1706 s->nb_blocks[0] = 1;
1713 s->lossless ?
"lossless" :
"sequential DCT",
s->rgb ?
"RGB" :
"",
1714 predictor, point_transform, ilv,
s->bits,
s->mjpb_skiptosod,
1715 s->pegasus_rct ?
"PRCT" : (
s->rct ?
"RCT" :
""), nb_components);
1719 for (
i =
s->mjpb_skiptosod;
i > 0;
i--)
1723 for (
i = 0;
i < nb_components;
i++)
1724 s->last_dc[
i] = (4 <<
s->bits);
1726 if (
s->avctx->hwaccel) {
1729 s->raw_scan_buffer_size >= bytes_to_start);
1731 ret =
s->avctx->hwaccel->decode_slice(
s->avctx,
1732 s->raw_scan_buffer + bytes_to_start,
1733 s->raw_scan_buffer_size - bytes_to_start);
1737 }
else if (
s->lossless) {
1744 point_transform, ilv)) < 0)
1747 if (
s->rgb ||
s->bayer) {
1753 nb_components)) < 0)
1758 if (
s->progressive && predictor) {
1762 point_transform)) < 0)
1766 prev_shift, point_transform,
1767 mb_bitmask, mb_bitmask_size, reference)) < 0)
1772 if (
s->interlaced &&
1781 s->bottom_field ^= 1;
1799 s->restart_count = 0;
1801 s->restart_interval);
1848 int t_w, t_h, v1, v2;
1856 s->avctx->sample_aspect_ratio.num =
get_bits(&
s->gb, 16);
1857 s->avctx->sample_aspect_ratio.den =
get_bits(&
s->gb, 16);
1858 if (
s->avctx->sample_aspect_ratio.num <= 0
1859 ||
s->avctx->sample_aspect_ratio.den <= 0) {
1860 s->avctx->sample_aspect_ratio.num = 0;
1861 s->avctx->sample_aspect_ratio.den = 1;
1866 "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
1868 s->avctx->sample_aspect_ratio.num,
1869 s->avctx->sample_aspect_ratio.den);
1877 if (
len -10 - (t_w * t_h * 3) > 0)
1878 len -= t_w * t_h * 3;
1895 av_log(
s->avctx,
AV_LOG_INFO,
"mjpeg: Adobe header found, transform=%d\n",
s->adobe_transform);
1902 int pegasus_rct =
s->pegasus_rct;
1905 "Pegasus lossless jpeg header found\n");
1925 if (rgb !=
s->rgb || pegasus_rct !=
s->pegasus_rct) {
1931 s->pegasus_rct = pegasus_rct;
1971 }
else if (
type == 1) {
1983 if (!(
flags & 0x04)) {
1993 int ret, le, ifd_offset, bytes_read;
2026 if ((
s->start_code ==
APP1) && (
len > (0x28 - 8))) {
2049 unsigned nummarkers;
2069 if (nummarkers == 0) {
2072 }
else if (
s->iccnum != 0 && nummarkers !=
s->iccnum) {
2075 }
else if (seqno > nummarkers) {
2081 if (
s->iccnum == 0) {
2082 s->iccdata =
av_mallocz(nummarkers *
sizeof(*(
s->iccdata)));
2083 s->iccdatalens =
av_mallocz(nummarkers *
sizeof(*(
s->iccdatalens)));
2084 if (!
s->iccdata || !
s->iccdatalens) {
2088 s->iccnum = nummarkers;
2091 if (
s->iccdata[seqno - 1]) {
2096 s->iccdatalens[seqno - 1] =
len;
2098 if (!
s->iccdata[seqno - 1]) {
2108 if (
s->iccread >
s->iccnum)
2116 "mjpeg: error, decode_app parser read over the end\n");
2132 for (
i = 0;
i <
len - 2;
i++)
2134 if (
i > 0 && cbuf[
i - 1] ==
'\n')
2143 if (!strncmp(cbuf,
"AVID", 4)) {
2145 }
else if (!strcmp(cbuf,
"CS=ITU601"))
2147 else if ((!strncmp(cbuf,
"Intel(R) JPEG Library, version 1", 32) &&
s->avctx->codec_tag) ||
2148 (!strncmp(cbuf,
"Metasoft MJPEG Codec", 20)))
2150 else if (!strcmp(cbuf,
"MULTISCOPE II")) {
2151 s->avctx->sample_aspect_ratio = (
AVRational) { 1, 2 };
2170 buf_ptr = *pbuf_ptr;
2171 while (buf_end - buf_ptr > 1) {
2174 if ((v == 0xff) && (v2 >=
SOF0) && (v2 <=
COM) && buf_ptr < buf_end) {
2183 ff_dlog(
NULL,
"find_marker skipped %d bytes\n", skipped);
2184 *pbuf_ptr = buf_ptr;
2190 const uint8_t **unescaped_buf_ptr,
2191 int *unescaped_buf_size)
2206 #define copy_data_segment(skip) do { \
2207 ptrdiff_t length = (ptr - src) - (skip); \
2209 memcpy(dst, src, length); \
2219 while (ptr < buf_end) {
2224 while (ptr < buf_end &&
x == 0xff) {
2239 if (x < RST0 || x >
RST7) {
2249 #undef copy_data_segment
2251 *unescaped_buf_ptr =
s->buffer;
2252 *unescaped_buf_size = dst -
s->buffer;
2253 memset(
s->buffer + *unescaped_buf_size, 0,
2257 (buf_end - *buf_ptr) - (dst -
s->buffer));
2266 while (
src + t < buf_end) {
2269 while ((
src + t < buf_end) &&
x == 0xff)
2284 if (
x == 0xFF &&
b < t) {
2296 *unescaped_buf_ptr = dst;
2297 *unescaped_buf_size = (bit_count + 7) >> 3;
2298 memset(
s->buffer + *unescaped_buf_size, 0,
2301 *unescaped_buf_ptr = *buf_ptr;
2302 *unescaped_buf_size = buf_end - *buf_ptr;
2313 for (
i = 0;
i <
s->iccnum;
i++)
2327 int buf_size = avpkt->
size;
2329 const uint8_t *buf_end, *buf_ptr;
2330 const uint8_t *unescaped_buf_ptr;
2332 int unescaped_buf_size;
2338 s->buf_size = buf_size;
2342 s->adobe_transform = -1;
2348 buf_end = buf + buf_size;
2349 while (buf_ptr < buf_end) {
2353 &unescaped_buf_size);
2357 }
else if (unescaped_buf_size > INT_MAX / 8) {
2359 "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
2423 s->restart_interval = 0;
2424 s->restart_count = 0;
2425 s->raw_image_buffer = buf_ptr;
2426 s->raw_image_buffer_size = buf_end - buf_ptr;
2481 s->progressive &&
s->cur_scan &&
s->got_picture)
2484 if (!
s->got_picture) {
2486 "Found EOI before any SOF, ignoring\n");
2489 if (
s->interlaced) {
2490 s->bottom_field ^= 1;
2492 if (
s->bottom_field == !
s->interlace_polarity)
2497 goto the_end_no_picture;
2499 if (
s->avctx->hwaccel) {
2500 ret =
s->avctx->hwaccel->end_frame(
s->avctx);
2515 int qpw = (
s->width + 15) / 16;
2518 memset(qp_table_buf->
data, qp, qpw);
2528 s->raw_scan_buffer = buf_ptr;
2529 s->raw_scan_buffer_size = buf_end - buf_ptr;
2556 "mjpeg: unsupported coding type (%x)\n",
start_code);
2564 "marker parser used %d bytes (%d bits)\n",
2567 if (
s->got_picture &&
s->cur_scan) {
2600 for (p = 0; p<
s->nb_components; p++) {
2604 if (!
s->upscale_h[p])
2610 if (
s->upscale_v[p] == 1)
2613 for (
i = 0;
i <
h;
i++) {
2614 if (
s->upscale_h[p] == 1) {
2615 if (is16bit) ((uint16_t*)
line)[
w - 1] = ((uint16_t*)
line)[(
w - 1) / 2];
2623 }
else if (
s->upscale_h[p] == 2) {
2625 ((uint16_t*)
line)[
w - 1] = ((uint16_t*)
line)[(
w - 1) / 3];
2627 ((uint16_t*)
line)[
w - 2] = ((uint16_t*)
line)[
w - 1];
2637 line +=
s->linesize[p];
2662 for (p = 0; p <
s->nb_components; p++) {
2666 if (!
s->upscale_v[p])
2672 dst = &((
uint8_t *)
s->picture_ptr->data[p])[(
h - 1) *
s->linesize[p]];
2674 uint8_t *
src1 = &((
uint8_t *)
s->picture_ptr->data[p])[
i *
s->upscale_v[p] / (
s->upscale_v[p] + 1) *
s->linesize[p]];
2675 uint8_t *src2 = &((
uint8_t *)
s->picture_ptr->data[p])[(
i + 1) *
s->upscale_v[p] / (
s->upscale_v[p] + 1) *
s->linesize[p]];
2676 if (
s->upscale_v[p] != 2 && (
src1 == src2 ||
i ==
h - 1)) {
2677 memcpy(dst,
src1,
w);
2682 dst -=
s->linesize[p];
2686 if (
s->flipped && !
s->rgb) {
2695 int w =
s->picture_ptr->width;
2696 int h =
s->picture_ptr->height;
2703 for (
i=0;
i<
h/2;
i++) {
2705 FFSWAP(
int, dst[j], dst2[j]);
2706 dst +=
s->picture_ptr->linesize[
index];
2707 dst2 -=
s->picture_ptr->linesize[
index];
2713 int w =
s->picture_ptr->width;
2714 int h =
s->picture_ptr->height;
2716 for (
i=0;
i<
h;
i++) {
2721 +
s->picture_ptr->linesize[
index]*
i;
2723 for (j=0; j<
w; j++) {
2725 int r = dst[0][j] * k;
2726 int g = dst[1][j] * k;
2727 int b = dst[2][j] * k;
2728 dst[0][j] =
g*257 >> 16;
2729 dst[1][j] =
b*257 >> 16;
2730 dst[2][j] =
r*257 >> 16;
2736 int w =
s->picture_ptr->width;
2737 int h =
s->picture_ptr->height;
2739 for (
i=0;
i<
h;
i++) {
2744 +
s->picture_ptr->linesize[
index]*
i;
2746 for (j=0; j<
w; j++) {
2748 int r = (255 - dst[0][j]) * k;
2749 int g = (128 - dst[1][j]) * k;
2750 int b = (128 - dst[2][j]) * k;
2751 dst[0][j] =
r*257 >> 16;
2752 dst[1][j] = (
g*257 >> 16) + 128;
2753 dst[2][j] = (
b*257 >> 16) + 128;
2762 stereo->
type =
s->stereo3d->type;
2763 stereo->
flags =
s->stereo3d->flags;
2768 if (
s->iccnum != 0 &&
s->iccnum ==
s->iccread) {
2775 for (
i = 0;
i <
s->iccnum;
i++)
2776 total_size +=
s->iccdatalens[
i];
2785 for (
i = 0;
i <
s->iccnum;
i++) {
2798 return buf_ptr - buf;
2806 if (
s->interlaced &&
s->bottom_field == !
s->interlace_polarity &&
s->got_picture && !avctx->
frame_number) {
2812 s->picture_ptr =
NULL;
2813 }
else if (
s->picture_ptr)
2819 s->ljpeg_buffer_size = 0;
2821 for (
i = 0;
i < 3;
i++) {
2822 for (j = 0; j < 4; j++)
2844 #if CONFIG_MJPEG_DECODER
2845 #define OFFSET(x) offsetof(MJpegDecodeContext, x)
2846 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2848 {
"extern_huff",
"Use external huffman table.",
2853 static const AVClass mjpegdec_class = {
2872 .priv_class = &mjpegdec_class,
2877 #if CONFIG_MJPEG_NVDEC_HWACCEL
2880 #if CONFIG_MJPEG_VAAPI_HWACCEL
2887 #if CONFIG_THP_DECODER
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
void ff_mjpeg_build_huffman_codes(uint8_t *huff_size, uint16_t *huff_code, const uint8_t *bits_table, const uint8_t *val_table)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
#define FFSWAP(type, a, b)
static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s, uint8_t *dst, const uint8_t *src, int linesize, int lowres)
The official guide to swscale for confused that is
static void decode_flush(AVCodecContext *avctx)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
uint8_t * data
The data buffer.
#define MKTAG(a, b, c, d)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
#define GET_VLC(code, name, gb, table, bits, max_depth)
If the vlc code is invalid and max_depth=1, then no bits will be removed.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define se(name, range_min, range_max)
static int get_bits_count(const GetBitContext *s)
static void init_idct(AVCodecContext *avctx)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
const uint8_t avpriv_mjpeg_val_ac_luminance[]
#define AV_PIX_FMT_YUVA420P16
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
#define FF_PROFILE_MJPEG_JPEG_LS
enum AVFieldOrder field_order
Field order.
static int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
int step
Number of elements between 2 horizontally consecutive pixels.
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static int build_vlc(VLC *vlc, const uint8_t *bits_table, const uint8_t *val_table, int nb_codes, int use_static, int is_ac)
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
#define UPDATE_CACHE(name, gb)
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define FF_DEBUG_PICT_INFO
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define GET_CACHE(name, gb)
static void skip_bits(GetBitContext *s, int n)
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor, int point_transform, int nb_components)
static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
const struct AVCodec * codec
av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
enum AVDiscard skip_frame
Skip decoding for selected frames.
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
#define AV_PIX_FMT_YUVA444P16
#define FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
static int mjpeg_decode_com(MJpegDecodeContext *s)
static int init_default_huffman_tables(MJpegDecodeContext *s)
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_PIX_FMT_GRAY16
#define ss(width, name, subs,...)
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
const AVProfile ff_mjpeg_profiles[]
int ff_exif_decode_ifd(void *logctx, GetByteContext *gbytes, int le, int depth, AVDictionary **metadata)
static int aligned(int val)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, uint16_t *quant_matrix, int Al)
#define AV_PIX_FMT_YUV422P16
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define FF_CODEC_PROPERTY_LOSSLESS
#define FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT
static const uint16_t mask[17]
static int handle_rstn(MJpegDecodeContext *s, int nb_components)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define CLOSE_READER(name, gb)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static void parse_avid(MJpegDecodeContext *s, uint8_t *buf, int len)
const uint8_t avpriv_mjpeg_val_dc[12]
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
#define FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, int point_transform, int ilv)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUV420P16
static void reset_icc_profile(MJpegDecodeContext *s)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
void ff_free_vlc(VLC *vlc)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int flags
Additional information about the frame packing.
@ AVDISCARD_ALL
discard all
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_RGBA64
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
#define PTRDIFF_SPECIFIER
static void flush(AVCodecContext *avctx)
static void mjpeg_idct_scan_progressive_ac(MJpegDecodeContext *s)
static void copy_block2(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Rational number (pair of numerator and denominator).
int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
const char * av_default_item_name(void *ptr)
Return the context name.
static unsigned int get_bits1(GetBitContext *s)
@ AV_PICTURE_TYPE_I
Intra.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define LAST_SKIP_BITS(name, gb, num)
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah, int Al, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
#define CONFIG_JPEGLS_DECODER
static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss, int se, int Ah, int Al)
int ff_init_vlc_sparse(VLC *vlc_arg, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_EXPLODE
abort decoding on minor error detection
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_tell(GetByteContext *g)
#define copy_data_segment(skip)
const OptionDef options[]
static void copy_mb(CinepakEncContext *s, uint8_t *a_data[4], int a_linesize[4], uint8_t *b_data[4], int b_linesize[4])
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int ff_jpegls_decode_lse(MJpegDecodeContext *s)
Decode LSE block with initialization parameters.
#define FF_QSCALE_TYPE_MPEG1
static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block, uint8_t *last_nnz, int ac_index, uint16_t *quant_matrix, int ss, int se, int Al, int *EOBRUN)
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask, int mb_bitmask_size, const AVFrame *reference)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define OPEN_READER(name, gb)
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int get_xbits(GetBitContext *s, int n)
Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
#define HWACCEL_NVDEC(codec)
static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
#define AV_LOG_INFO
Standard information.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
static void copy_block4(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int decode_block(MJpegDecodeContext *s, int16_t *block, int component, int dc_index, int ac_index, uint16_t *quant_matrix)
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
#define FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
static int mjpeg_decode_dri(MJpegDecodeContext *s)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_DEBUG_STARTCODE
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
const char * name
Name of the codec implementation.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
const uint8_t ff_zigzag_direct[64]
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static const float pred[4]
AVStereo3D * av_stereo3d_alloc(void)
Allocate an AVStereo3D structure and set its fields to default values.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
enum AVStereo3DType type
How views are packed within the video.
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
static const uint8_t * align_get_bits(GetBitContext *s)
@ LSE
JPEG-LS extension parameters.
#define AV_INPUT_BUFFER_PADDING_SIZE
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
#define FF_ARRAY_ELEMS(a)
int ff_mjpeg_find_marker(MJpegDecodeContext *s, const uint8_t **buf_ptr, const uint8_t *buf_end, const uint8_t **unescaped_buf_ptr, int *unescaped_buf_size)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
main external API structure.
#define SHOW_UBITS(name, gb, num)
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
static const AVProfile profiles[]
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
A reference to a data buffer.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static int mjpeg_decode_app(MJpegDecodeContext *s)
int frame_number
Frame counter, set by libavcodec.
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
#define avpriv_request_sample(...)
Structure to hold side data for an AVFrame.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define HWACCEL_VAAPI(codec)
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
#define PREDICT(ret, topleft, top, left, predictor)
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
#define av_fourcc2str(fourcc)