Go to the documentation of this file.
76 #define NMSEDEC_BITS 7
77 #define NMSEDEC_FRACBITS (NMSEDEC_BITS-1)
78 #define WMSEDEC_SHIFT 13
79 #define LAMBDA_SCALE (100000000LL << (WMSEDEC_SHIFT - 13))
90 {{10000, 19650, 41770, 84030, 169000, 338400, 676900, 1353000, 2706000, 5409000},
91 {20220, 39890, 83550, 170400, 342700, 686300, 1373000, 2746000, 5490000},
92 {20220, 39890, 83550, 170400, 342700, 686300, 1373000, 2746000, 5490000},
93 {20800, 38650, 83070, 171800, 347100, 695900, 1393000, 2786000, 5572000}},
95 {{10000, 15000, 27500, 53750, 106800, 213400, 426700, 853300, 1707000, 3413000},
96 {10380, 15920, 29190, 57030, 113300, 226400, 452500, 904800, 1809000},
97 {10380, 15920, 29190, 57030, 113300, 226400, 452500, 904800, 1809000},
98 { 7186, 9218, 15860, 30430, 60190, 120100, 240000, 479700, 959300}}
140 static void nspaces(FILE *fd,
int n)
142 while(n--) putc(
' ', fd);
154 int tileno, compno, reslevelno, bandno, precno;
155 fprintf(fd,
"XSiz = %d, YSiz = %d, tile_width = %d, tile_height = %d\n"
156 "numXtiles = %d, numYtiles = %d, ncomponents = %d\n"
158 s->width,
s->height,
s->tile_width,
s->tile_height,
159 s->numXtiles,
s->numYtiles,
s->ncomponents);
160 for (tileno = 0; tileno <
s->numXtiles *
s->numYtiles; tileno++){
163 fprintf(fd,
"tile %d:\n", tileno);
164 for(compno = 0; compno <
s->ncomponents; compno++){
167 fprintf(fd,
"component %d:\n", compno);
169 fprintf(fd,
"x0 = %d, x1 = %d, y0 = %d, y1 = %d\n",
171 for(reslevelno = 0; reslevelno <
s->nreslevels; reslevelno++){
174 fprintf(fd,
"reslevel %d:\n", reslevelno);
176 fprintf(fd,
"x0 = %d, x1 = %d, y0 = %d, y1 = %d, nbands = %d\n",
177 reslevel->x0, reslevel->x1, reslevel->y0,
178 reslevel->y1, reslevel->
nbands);
179 for(bandno = 0; bandno < reslevel->
nbands; bandno++){
182 fprintf(fd,
"band %d:\n", bandno);
184 fprintf(fd,
"x0 = %d, x1 = %d, y0 = %d, y1 = %d,"
185 "codeblock_width = %d, codeblock_height = %d cblknx = %d cblkny = %d\n",
188 band->codeblock_width, band->codeblock_height,
189 band->cblknx, band->cblkny);
193 fprintf(fd,
"prec %d:\n", precno);
195 fprintf(fd,
"xi0 = %d, xi1 = %d, yi0 = %d, yi1 = %d\n",
196 prec->xi0, prec->xi1, prec->yi0, prec->yi1);
211 if (
s->bit_index == 8)
213 s->bit_index = *
s->buf == 0xff;
216 *
s->buf |=
val << (7 -
s->bit_index++);
242 int sp = 1, curval = 0;
256 if (stack[
sp]->
val >= threshold){
262 curval = stack[
sp]->
val;
283 if (
s->buf_end -
s->buf < 40 + 3 *
s->ncomponents)
287 bytestream_put_be16(&
s->buf, 38 + 3 *
s->ncomponents);
288 bytestream_put_be16(&
s->buf, 0);
289 bytestream_put_be32(&
s->buf,
s->width);
290 bytestream_put_be32(&
s->buf,
s->height);
291 bytestream_put_be32(&
s->buf, 0);
292 bytestream_put_be32(&
s->buf, 0);
294 bytestream_put_be32(&
s->buf,
s->tile_width);
295 bytestream_put_be32(&
s->buf,
s->tile_height);
296 bytestream_put_be32(&
s->buf, 0);
297 bytestream_put_be32(&
s->buf, 0);
298 bytestream_put_be16(&
s->buf,
s->ncomponents);
300 for (
i = 0;
i <
s->ncomponents;
i++){
301 bytestream_put_byte(&
s->buf, 7);
302 bytestream_put_byte(&
s->buf,
i?1<<
s->chroma_shift[0]:1);
303 bytestream_put_byte(&
s->buf,
i?1<<
s->chroma_shift[1]:1);
312 if (
s->buf_end -
s->buf < 14)
316 bytestream_put_be16(&
s->buf, 12);
317 bytestream_put_byte(&
s->buf, 0);
319 bytestream_put_byte(&
s->buf, 0);
320 bytestream_put_be16(&
s->buf, 1);
322 bytestream_put_byte(&
s->buf, 0);
324 bytestream_put_byte(&
s->buf, 0);
327 bytestream_put_byte(&
s->buf, codsty->
nreslevels - 1);
330 bytestream_put_byte(&
s->buf, 0);
346 if (
s->buf_end -
s->buf <
size + 2)
350 bytestream_put_be16(&
s->buf,
size);
354 bytestream_put_byte(&
s->buf, qntsty->
expn[
i] << 3);
357 bytestream_put_be16(&
s->buf, (qntsty->
expn[
i] << 11) | qntsty->
mant[
i]);
368 if (
s->buf_end -
s->buf <
size + 2)
372 bytestream_put_be16(&
s->buf,
size);
373 bytestream_put_be16(&
s->buf, 1);
384 if (
s->buf_end -
s->buf < 12)
388 bytestream_put_be16(&
s->buf, 10);
389 bytestream_put_be16(&
s->buf, tileno);
392 bytestream_put_be32(&
s->buf, 0);
394 bytestream_put_byte(&
s->buf, 0);
395 bytestream_put_byte(&
s->buf, 1);
406 int tileno, tilex, tiley, compno;
416 for (tileno = 0, tiley = 0; tiley <
s->numYtiles; tiley++)
417 for (tilex = 0; tilex <
s->numXtiles; tilex++, tileno++){
423 for (compno = 0; compno <
s->ncomponents; compno++){
427 comp->coord[0][0] =
comp->coord_o[0][0] = tilex *
s->tile_width;
428 comp->coord[0][1] =
comp->coord_o[0][1] =
FFMIN((tilex+1)*
s->tile_width,
s->width);
429 comp->coord[1][0] =
comp->coord_o[1][0] = tiley *
s->tile_height;
430 comp->coord[1][1] =
comp->coord_o[1][1] =
FFMIN((tiley+1)*
s->tile_height,
s->height);
432 for (
i = 0;
i < 2;
i++)
433 for (j = 0; j < 2; j++)
440 compno?1<<
s->chroma_shift[0]:1,
441 compno?1<<
s->chroma_shift[1]:1,
452 int tileno, compno,
i, y,
x;
454 for (tileno = 0; tileno <
s->numXtiles *
s->numYtiles; tileno++){
457 for (compno = 0; compno <
s->ncomponents; compno++){
459 int *dst =
comp->i_data;
460 line =
s->picture->data[compno]
461 +
comp->coord[1][0] *
s->picture->linesize[compno]
463 for (y =
comp->coord[1][0]; y < comp->coord[1][1]; y++){
465 for (
x =
comp->coord[0][0]; x < comp->coord[0][1];
x++)
466 *dst++ = *ptr++ - (1 << 7);
467 line +=
s->picture->linesize[compno];
471 line =
s->picture->data[0] + tile->
comp[0].
coord[1][0] *
s->picture->linesize[0]
475 for (y = tile->
comp[0].
coord[1][0]; y < tile->
comp[0].coord[1][1]; y++){
477 for (
x = tile->
comp[0].
coord[0][0]; x < tile->
comp[0].coord[0][1];
x++,
i++){
478 for (compno = 0; compno <
s->ncomponents; compno++){
482 line +=
s->picture->linesize[0];
490 int compno, reslevelno, bandno;
494 for (compno = 0; compno <
s->ncomponents; compno++){
496 for (reslevelno = 0; reslevelno < codsty->
nreslevels; reslevelno++){
497 int nbands, lev = codsty->
nreslevels - reslevelno - 1;
498 nbands = reslevelno ? 3 : 1;
499 for (bandno = 0; bandno < nbands; bandno++, gbandno++){
503 int bandpos = bandno + (reslevelno>0),
506 mant = (11 - log < 0 ? ss >> log - 11 :
ss << 11 - log) & 0x7ff;
507 expn =
s->cbps[compno] - log + 13;
509 expn = ((bandno&2)>>1) + (reslevelno>0) +
s->cbps[compno];
511 qntsty->
expn[gbandno] = expn;
512 qntsty->
mant[gbandno] = mant;
529 (1 << 13) - (
a *
a << 11), 0);
553 for (y0 = 0; y0 <
height; y0 += 4)
555 for (y = y0; y <
height && y < y0+4; y++){
575 for (y0 = 0; y0 <
height; y0 += 4)
577 for (y = y0; y <
height && y < y0+4; y++)
589 for (y0 = 0; y0 <
height; y0 += 4)
599 for (rlen = 0; rlen < 4; rlen++)
600 if (
t1->data[(y0+rlen) *
t1->stride +
x] &
mask)
607 for (y = y0 + rlen; y < y0 + 4; y++){
612 if (
t1->data[(y) *
t1->stride +
x] &
mask){
623 for (y = y0; y < y0 + 4 && y <
height; y++){
627 if (
t1->data[(y) *
t1->stride +
x] &
mask){
644 int pass_t = 2, passno,
x, y,
max=0, nmsedec, bpno;
647 memset(
t1->flags, 0,
t1->stride * (
height + 2) *
sizeof(*
t1->flags));
649 for (y = 0; y <
height; y++){
651 if (
t1->data[(y) *
t1->stride +
x] < 0){
653 t1->data[(y) *
t1->stride +
x] = -
t1->data[(y) *
t1->stride +
x];
670 for (passno = 0; bpno >= 0; passno++){
683 wmsedec += (int64_t)nmsedec << (2*bpno);
718 int bandno, empty = 1;
727 for (bandno = 0; bandno < rlevel->
nbands; bandno++){
741 for (bandno = 0; bandno < rlevel->
nbands; bandno++){
762 int pad = 0, llen, length;
765 if (
s->buf_end -
s->buf < 20)
791 for (bandno = 0; bandno < rlevel->
nbands; bandno++){
797 for (
xi = 0;
xi < cblknw;
xi++){
815 int compno, reslevelno,
ret;
821 for (reslevelno = 0; reslevelno < codsty->
nreslevels; reslevelno++){
822 for (compno = 0; compno <
s->ncomponents; compno++){
839 for (passno = 0; passno < cblk->
npasses; passno++){
848 if (((dd * dwt_norm) >>
WMSEDEC_SHIFT) * dwt_norm >= dr * lambda)
856 int precno, compno, reslevelno, bandno, cblkno, lev;
859 for (compno = 0; compno <
s->ncomponents; compno++){
862 for (reslevelno = 0, lev = codsty->
nreslevels-1; reslevelno < codsty->nreslevels; reslevelno++, lev--){
866 for (bandno = 0; bandno < reslevel->
nbands ; bandno++){
867 int bandpos = bandno + (reslevelno > 0);
885 int compno, reslevelno, bandno,
ret;
888 for (compno = 0; compno <
s->ncomponents; compno++){
898 for (reslevelno = 0; reslevelno < codsty->
nreslevels; reslevelno++){
901 for (bandno = 0; bandno < reslevel->
nbands ; bandno++){
904 int cblkx, cblky, cblkno=0, xx0, x0, xx1, y0, yy0, yy1, bandpos;
905 yy0 = bandno == 0 ? 0 :
comp->reslevel[reslevelno-1].coord[1][1] -
comp->reslevel[reslevelno-1].coord[1][0];
913 bandpos = bandno + (reslevelno > 0);
916 if (reslevelno == 0 || bandno == 1)
919 xx0 =
comp->reslevel[reslevelno-1].coord[0][1] -
comp->reslevel[reslevelno-1].coord[0][0];
927 for (y = yy0; y < yy1; y++){
928 int *ptr =
t1.data + (y-yy0)*
t1.stride;
929 for (
x = xx0;
x < xx1;
x++){
934 for (y = yy0; y < yy1; y++){
935 int *ptr =
t1.data + (y-yy0)*
t1.stride;
936 for (
x = xx0;
x < xx1;
x++){
937 *ptr = (
comp->i_data[(
comp->coord[0][1] -
comp->coord[0][0]) * y +
x]);
950 bandpos, codsty->
nreslevels - reslevelno - 1);
975 for (tileno = 0; tileno <
s->numXtiles *
s->numYtiles; tileno++){
976 for (compno = 0; compno <
s->ncomponents; compno++){
988 for (tileno = 0; tileno <
s->numXtiles *
s->numYtiles; tileno++){
990 for (compno = 0; compno <
s->ncomponents; compno++)
1001 const AVFrame *pict,
int *got_packet)
1005 uint8_t *chunkstart, *jp2cstart, *jp2hstart;
1024 bytestream_put_be32(&
s->buf, 0x0000000C);
1025 bytestream_put_be32(&
s->buf, 0x6A502020);
1026 bytestream_put_be32(&
s->buf, 0x0D0A870A);
1028 chunkstart =
s->buf;
1029 bytestream_put_be32(&
s->buf, 0);
1032 bytestream_put_be32(&
s->buf, 0);
1037 bytestream_put_be32(&
s->buf, 0);
1040 chunkstart =
s->buf;
1041 bytestream_put_be32(&
s->buf, 0);
1043 bytestream_put_be32(&
s->buf, avctx->
height);
1044 bytestream_put_be32(&
s->buf, avctx->
width);
1045 bytestream_put_be16(&
s->buf,
s->ncomponents);
1046 bytestream_put_byte(&
s->buf,
s->cbps[0]);
1047 bytestream_put_byte(&
s->buf, 7);
1048 bytestream_put_byte(&
s->buf, 0);
1049 bytestream_put_byte(&
s->buf, 0);
1052 chunkstart =
s->buf;
1053 bytestream_put_be32(&
s->buf, 0);
1055 bytestream_put_byte(&
s->buf, 1);
1056 bytestream_put_byte(&
s->buf, 0);
1057 bytestream_put_byte(&
s->buf, 0);
1059 bytestream_put_be32(&
s->buf, 16);
1060 }
else if (
s->ncomponents == 1) {
1061 bytestream_put_be32(&
s->buf, 17);
1063 bytestream_put_be32(&
s->buf, 18);
1069 chunkstart =
s->buf;
1070 bytestream_put_be32(&
s->buf, 0);
1073 bytestream_put_byte(&
s->buf, 3);
1074 bytestream_put_be24(&
s->buf, 0x070707);
1080 chunkstart =
s->buf;
1081 bytestream_put_be32(&
s->buf, 0);
1083 for (
i = 0;
i < 3;
i++) {
1084 bytestream_put_be16(&
s->buf, 0);
1085 bytestream_put_byte(&
s->buf, 1);
1086 bytestream_put_byte(&
s->buf,
i);
1093 bytestream_put_be32(&
s->buf, 0);
1097 if (
s->buf_end -
s->buf < 2)
1109 for (tileno = 0; tileno <
s->numXtiles *
s->numYtiles; tileno++){
1111 if (!(psotptr =
put_sot(
s, tileno)))
1113 if (
s->buf_end -
s->buf < 2)
1118 bytestream_put_be32(&psotptr,
s->buf - psotptr + 6);
1120 if (
s->buf_end -
s->buf < 2)
1145 #if FF_API_PRIVATE_OPT
1170 if ((
s->tile_width & (
s->tile_width -1)) ||
1171 (
s->tile_height & (
s->tile_height-1))) {
1183 for (
i = 0;
i < 3;
i++)
1194 s->chroma_shift,
s->chroma_shift + 1);
1222 #define OFFSET(x) offsetof(Jpeg2000EncoderContext, x)
1223 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
static int encode_packets(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno)
AVPixelFormat
Pixel format.
static void tag_tree_code(Jpeg2000EncoderContext *s, Jpeg2000TgtNode *node, int threshold)
code the value stored in node
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static const AVOption options[]
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
int ff_jpeg2000_init_component(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty, Jpeg2000QuantStyle *qntsty, int cbps, int dx, int dy, AVCodecContext *avctx)
static av_cold int j2kenc_init(AVCodecContext *avctx)
static av_cold int end(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
static const AVClass j2k_class
Jpeg2000TgtNode * zerobits
int ff_dwt_encode(DWTContext *s, void *t)
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
struct Jpeg2000TgtNode * parent
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
static int ff_jpeg2000_getrefctxno(int flag)
static int j2kenc_destroy(AVCodecContext *avctx)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define bit(string, value)
static void tag_tree_update(Jpeg2000TgtNode *node)
update the value in node
attribute_deprecated int prediction_method
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
uint8_t log2_prec_heights[JPEG2000_MAX_RESLEVELS]
static double val(void *priv, double ch)
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static void j2k_flush(Jpeg2000EncoderContext *s)
flush the bitstream
#define ss(width, name, subs,...)
static int put_cod(Jpeg2000EncoderContext *s)
static const uint16_t mask[17]
static void init_luts(void)
static int lut_nmsedec_sig0[1<< NMSEDEC_BITS]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
static int getcut(Jpeg2000Cblk *cblk, int64_t lambda, int dwt_norm)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define xi(width, name, var, range_min, range_max, subs,...)
static void encode_sigpass(Jpeg2000T1Context *t1, int width, int height, int bandno, int *nmsedec, int bpno)
#define AV_INPUT_BUFFER_MIN_SIZE
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
uint8_t log2_prec_widths[JPEG2000_MAX_RESLEVELS]
void av_cold ff_mqc_init_context_tables(void)
MQ-coder Initialize context tables (QE, NLPS, NMPS)
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static void put_num(Jpeg2000EncoderContext *s, int num, int n)
put n least significant bits of a number num
Jpeg2000CodingStyle codsty
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
uint16_t log2_cblk_height
static void truncpasses(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile)
static int getnmsedec_sig(int x, int bpno)
#define JPEG2000_T1_SIG_NB
void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
static int getnmsedec_ref(int x, int bpno)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Jpeg2000TgtNode * cblkincl
static void encode_clnpass(Jpeg2000T1Context *t1, int width, int height, int bandno, int *nmsedec, int bpno)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int encode_tile(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static int ff_jpeg2000_getsgnctxno(int flag, int *xorbit)
int flags
A combination of AV_PKT_FLAG values.
static void reinit(Jpeg2000EncoderContext *s)
void ff_jpeg2000_reinit(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
static av_always_inline void bytestream_put_buffer(uint8_t **b, const uint8_t *src, unsigned int size)
static uint8_t * put_sot(Jpeg2000EncoderContext *s, int tileno)
#define i(width, name, range_min, range_max)
static void cleanup(Jpeg2000EncoderContext *s)
uint8_t expn[JPEG2000_MAX_DECLEVELS *3]
#define av_malloc_array(a, b)
static const int dwt_norms[2][4][10]
static void encode_cblk(Jpeg2000EncoderContext *s, Jpeg2000T1Context *t1, Jpeg2000Cblk *cblk, Jpeg2000Tile *tile, int width, int height, int bandpos, int lev)
void ff_mqc_initenc(MqcState *mqc, uint8_t *bp)
initialize the encoder
const char * name
Name of the codec implementation.
static int ff_jpeg2000_getsigctxno(int flag, int bandno)
#define WMSEDEC_SHIFT
must be >= 13
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define JPEG2000_MAX_PASSES
static int lut_nmsedec_ref[1<< NMSEDEC_BITS]
uint16_t mant[JPEG2000_MAX_DECLEVELS *3]
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
static int put_qcd(Jpeg2000EncoderContext *s, int compno)
static const float pred[4]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static void encode_refpass(Jpeg2000T1Context *t1, int width, int height, int *nmsedec, int bpno)
static int lut_nmsedec_sig[1<< NMSEDEC_BITS]
main external API structure.
static void copy_frame(Jpeg2000EncoderContext *s)
static int ff_jpeg2000_ceildiv(int a, int b)
static int ff_jpeg2000_ceildivpow2(int a, int b)
static int put_siz(Jpeg2000EncoderContext *s)
AVCodec ff_jpeg2000_encoder
void av_cold ff_jpeg2000_init_tier1_luts(void)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
Jpeg2000QuantStyle qntsty
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static int put_com(Jpeg2000EncoderContext *s, int compno)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
int ff_mqc_flush_to(MqcState *mqc, uint8_t *dst, int *dst_len)
static void putnumpasses(Jpeg2000EncoderContext *s, int n)
This structure stores compressed data.
void ff_jpeg2000_set_significance(Jpeg2000T1Context *t1, int x, int y, int negative)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int width
picture width / height.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static void update_size(uint8_t *size, const uint8_t *end)
void ff_mqc_encode(MqcState *mqc, uint8_t *cxstate, int d)
code bit d with context cx
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int init_tiles(Jpeg2000EncoderContext *s)
compute the sizes of tiles, resolution levels, bands, etc.
static void init_quantization(Jpeg2000EncoderContext *s)
static int lut_nmsedec_ref0[1<< NMSEDEC_BITS]
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
static int encode_packet(Jpeg2000EncoderContext *s, Jpeg2000ResLevel *rlevel, int precno, uint8_t *expn, int numgbits)