Go to the documentation of this file.
57 for (
i = e - 1;
i >= 0;
i--)
77 while (i < state->error_sum) {
83 ff_dlog(
NULL,
"v:%d bias:%d error:%d drift:%d count:%d k:%d",
86 v ^= ((2 *
state->drift +
state->count) >> 31);
109 #define RENAME(name) name
115 #define RENAME(name) name ## 32
119 int w,
int h,
int stride,
int plane_index,
124 sample[0] =
s->sample_buffer + 3;
125 sample[1] =
s->sample_buffer +
w + 6 + 3;
129 memset(
s->sample_buffer, 0, 2 * (
w + 6) *
sizeof(*
s->sample_buffer));
131 for (y = 0; y <
h; y++) {
140 if (
s->avctx->bits_per_raw_sample <= 8) {
144 for (
x = 0;
x <
w;
x++)
150 if (
s->packed_at_lsb) {
151 for (
x = 0;
x <
w;
x++) {
155 for (
x = 0;
x <
w;
x++) {
156 ((uint16_t*)(
src +
stride*y))[
x*pixel_stride] =
sample[1][
x] << (16 -
s->avctx->bits_per_raw_sample) | ((uint16_t **)
sample)[1][
x] >> (2 *
s->avctx->bits_per_raw_sample - 16);
178 fs->slice_x /=
f->num_h_slices;
179 fs->slice_y /=
f->num_v_slices;
180 fs->slice_width =
fs->slice_width /
f->num_h_slices -
fs->slice_x;
181 fs->slice_height =
fs->slice_height/
f->num_v_slices -
fs->slice_y;
182 if ((
unsigned)
fs->slice_width >
f->width || (unsigned)
fs->slice_height >
f->height)
184 if ( (
unsigned)
fs->slice_x + (uint64_t)
fs->slice_width >
f->width
185 || (
unsigned)
fs->slice_y + (uint64_t)
fs->slice_height >
f->height)
188 for (
i = 0;
i <
f->plane_count;
i++) {
191 if (idx >= (
unsigned)
f->quant_table_count) {
208 f->cur->interlaced_frame = 1;
209 f->cur->top_field_first = 1;
210 }
else if (ps == 2) {
211 f->cur->interlaced_frame = 1;
212 f->cur->top_field_first = 0;
213 }
else if (ps == 3) {
214 f->cur->interlaced_frame = 0;
220 f->cur->sample_aspect_ratio) < 0) {
222 f->cur->sample_aspect_ratio.num,
223 f->cur->sample_aspect_ratio.den);
224 f->cur->sample_aspect_ratio = (
AVRational){ 0, 1 };
227 if (
fs->version > 3) {
230 if (
fs->slice_coding_mode != 1) {
233 if ((uint64_t)
fs->slice_rct_by_coef + (uint64_t)
fs->slice_rct_ry_coef > 4) {
252 for( si=0;
fs !=
f->slice_context[si]; si ++)
267 for (
i = 0;
i <
f->plane_count;
i++) {
273 memcpy(pdst, psrc,
sizeof(*pdst));
287 fs->slice_rct_by_coef = 1;
288 fs->slice_rct_ry_coef = 1;
290 if (
f->version > 2) {
294 fs->slice_x =
fs->slice_y =
fs->slice_height =
fs->slice_width = 0;
295 fs->slice_damaged = 1;
301 if (
f->cur->key_frame ||
fs->slice_reset_contexts)
310 if (
f->version == 3 &&
f->micro_version > 1 ||
f->version > 3)
312 fs->ac_byte_count =
f->version > 2 || (!
x && !y) ?
fs->c.bytestream -
fs->c.bytestream_start - 1 : 0;
314 fs->c.bytestream_start +
fs->ac_byte_count,
315 (
fs->c.bytestream_end -
fs->c.bytestream_start -
fs->ac_byte_count) * 8);
319 if (
f->colorspace == 0 && (
f->chroma_planes || !
fs->transparency)) {
322 const int cx =
x >>
f->chroma_h_shift;
323 const int cy = y >>
f->chroma_v_shift;
326 if (
f->chroma_planes) {
330 if (
fs->transparency)
332 }
else if (
f->colorspace == 0) {
335 }
else if (
f->use32bit) {
351 v =
fs->c.bytestream_end -
fs->c.bytestream - 2 - 5*
f->ec;
354 fs->slice_damaged = 1;
373 for (v = 0;
i < 128; v++) {
385 for (
i = 1;
i < 128;
i++)
398 for (
i = 0;
i < 5;
i++) {
418 memset(state2, 128,
sizeof(state2));
425 if (
f->version < 2) {
429 if (
f->version > 2) {
430 c->bytestream_end -= 4;
432 if (
f->micro_version < 0)
438 for (
i = 1;
i < 256;
i++)
448 f->plane_count = 1 + (
f->chroma_planes ||
f->version<4) +
f->transparency;
452 if (
f->chroma_h_shift > 4
U ||
f->chroma_v_shift > 4
U) {
454 f->chroma_h_shift,
f->chroma_v_shift);
458 if (
f->num_h_slices > (
unsigned)
f->width || !
f->num_h_slices ||
459 f->num_v_slices > (
unsigned)
f->height || !
f->num_v_slices
468 f->quant_table_count = 0;
472 for (
i = 0;
i <
f->quant_table_count;
i++) {
474 if (
f->context_count[
i] < 0) {
482 for (
i = 0;
i <
f->quant_table_count;
i++)
484 for (j = 0; j <
f->context_count[
i]; j++)
486 int pred = j ?
f->initial_states[
i][j - 1][k] : 128;
487 f->initial_states[
i][j][k] =
492 if (
f->version > 2) {
494 if (
f->micro_version > 2)
498 if (
f->version > 2) {
501 f->avctx->extradata,
f->avctx->extradata_size);
502 if (v ||
f->avctx->extradata_size < 4) {
506 crc =
AV_RB32(
f->avctx->extradata +
f->avctx->extradata_size - 4);
511 "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
512 f->version,
f->micro_version,
515 f->avctx->bits_per_raw_sample,
516 f->chroma_planes,
f->chroma_h_shift,
f->chroma_v_shift,
518 f->num_h_slices,
f->num_v_slices,
519 f->quant_table_count,
535 if (
f->version < 2) {
546 for (
i = 1;
i < 256;
i++) {
548 if (st < 1 || st > 255) {
552 f->state_transition[
i] = st;
565 if (
f->plane_count) {
590 f->plane_count = 2 +
f->transparency;
593 if (
f->colorspace == 0) {
594 if (!
f->transparency && !
f->chroma_planes) {
595 if (
f->avctx->bits_per_raw_sample <= 8)
597 else if (
f->avctx->bits_per_raw_sample == 9) {
598 f->packed_at_lsb = 1;
600 }
else if (
f->avctx->bits_per_raw_sample == 10) {
601 f->packed_at_lsb = 1;
603 }
else if (
f->avctx->bits_per_raw_sample == 12) {
604 f->packed_at_lsb = 1;
606 }
else if (
f->avctx->bits_per_raw_sample == 16) {
607 f->packed_at_lsb = 1;
609 }
else if (
f->avctx->bits_per_raw_sample < 16) {
613 }
else if (
f->transparency && !
f->chroma_planes) {
614 if (
f->avctx->bits_per_raw_sample <= 8)
618 }
else if (
f->avctx->bits_per_raw_sample<=8 && !
f->transparency) {
619 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
627 }
else if (
f->avctx->bits_per_raw_sample <= 8 &&
f->transparency) {
628 switch(16*
f->chroma_h_shift +
f->chroma_v_shift) {
633 }
else if (
f->avctx->bits_per_raw_sample == 9 && !
f->transparency) {
634 f->packed_at_lsb = 1;
635 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
640 }
else if (
f->avctx->bits_per_raw_sample == 9 &&
f->transparency) {
641 f->packed_at_lsb = 1;
642 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
647 }
else if (
f->avctx->bits_per_raw_sample == 10 && !
f->transparency) {
648 f->packed_at_lsb = 1;
649 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
655 }
else if (
f->avctx->bits_per_raw_sample == 10 &&
f->transparency) {
656 f->packed_at_lsb = 1;
657 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
662 }
else if (
f->avctx->bits_per_raw_sample == 12 && !
f->transparency) {
663 f->packed_at_lsb = 1;
664 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
670 }
else if (
f->avctx->bits_per_raw_sample == 14 && !
f->transparency) {
671 f->packed_at_lsb = 1;
672 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
677 }
else if (
f->avctx->bits_per_raw_sample == 16 && !
f->transparency){
678 f->packed_at_lsb = 1;
679 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
684 }
else if (
f->avctx->bits_per_raw_sample == 16 &&
f->transparency){
685 f->packed_at_lsb = 1;
686 switch(16 *
f->chroma_h_shift +
f->chroma_v_shift) {
692 }
else if (
f->colorspace == 1) {
693 if (
f->chroma_h_shift ||
f->chroma_v_shift) {
695 "chroma subsampling not supported in this colorspace\n");
698 if (
f->avctx->bits_per_raw_sample <= 8 && !
f->transparency)
700 else if (
f->avctx->bits_per_raw_sample <= 8 &&
f->transparency)
702 else if (
f->avctx->bits_per_raw_sample == 9 && !
f->transparency)
704 else if (
f->avctx->bits_per_raw_sample == 10 && !
f->transparency)
706 else if (
f->avctx->bits_per_raw_sample == 10 &&
f->transparency)
708 else if (
f->avctx->bits_per_raw_sample == 12 && !
f->transparency)
710 else if (
f->avctx->bits_per_raw_sample == 12 &&
f->transparency)
712 else if (
f->avctx->bits_per_raw_sample == 14 && !
f->transparency)
714 else if (
f->avctx->bits_per_raw_sample == 16 && !
f->transparency) {
718 else if (
f->avctx->bits_per_raw_sample == 16 &&
f->transparency) {
732 f->chroma_h_shift,
f->chroma_v_shift,
f->avctx->pix_fmt);
733 if (
f->version < 2) {
739 f->slice_count =
f->max_slice_count;
740 }
else if (
f->version < 3) {
743 const uint8_t *p =
c->bytestream_end;
744 for (
f->slice_count = 0;
745 f->slice_count <
MAX_SLICES && 3 + 5*!!
f->ec < p -
c->bytestream_start;
747 int trailer = 3 + 5*!!
f->ec;
749 if (
size + trailer > p -
c->bytestream_start)
754 if (
f->slice_count > (
unsigned)
MAX_SLICES ||
f->slice_count <= 0 ||
f->slice_count >
f->max_slice_count) {
755 av_log(
f->avctx,
AV_LOG_ERROR,
"slice count %d is invalid (max=%d)\n",
f->slice_count,
f->max_slice_count);
759 for (j = 0; j <
f->slice_count; j++) {
762 fs->packed_at_lsb =
f->packed_at_lsb;
764 fs->slice_damaged = 0;
766 if (
f->version == 2) {
772 fs->slice_x /=
f->num_h_slices;
773 fs->slice_y /=
f->num_v_slices;
774 fs->slice_width =
fs->slice_width /
f->num_h_slices -
fs->slice_x;
775 fs->slice_height =
fs->slice_height /
f->num_v_slices -
fs->slice_y;
776 if ((
unsigned)
fs->slice_width >
f->width ||
777 (unsigned)
fs->slice_height >
f->height)
779 if ( (
unsigned)
fs->slice_x + (uint64_t)
fs->slice_width >
f->width
780 || (
unsigned)
fs->slice_y + (uint64_t)
fs->slice_height >
f->height)
784 for (
i = 0;
i <
f->plane_count;
i++) {
787 if (
f->version == 2) {
789 if (idx > (
unsigned)
f->quant_table_count) {
791 "quant_table_index out of range\n");
802 if (
f->version <= 2) {
835 int buf_size = avpkt->
size;
843 if (
f->last_picture.f)
847 f->cur = p =
f->picture.f;
868 if (!
f->key_frame_ok) {
870 "Cannot decode non-keyframe without valid keyframe\n");
881 f->version, p->
key_frame,
f->ac,
f->ec,
f->slice_count,
f->avctx->bits_per_raw_sample);
885 buf_p = buf + buf_size;
886 for (
i =
f->slice_count - 1;
i >= 0;
i--) {
888 int trailer = 3 + 5*!!
f->ec;
891 if (
i ||
f->version > 2) v =
AV_RB24(buf_p-trailer) + trailer;
892 else v = buf_p -
c->bytestream_start;
893 if (buf_p -
c->bytestream_start < v) {
912 fs->slice_damaged = 1;
922 fs->c.bytestream_end = buf_p + v;
930 &
f->slice_context[0],
935 for (
i =
f->slice_count - 1;
i >= 0;
i--) {
938 if (
fs->slice_damaged &&
f->last_picture.f->data[0]) {
943 for (j = 0; j <
desc->nb_components; j++) {
944 int pixshift =
desc->comp[j].depth > 8;
945 int sh = (j == 1 || j == 2) ?
f->chroma_h_shift : 0;
946 int sv = (j == 1 || j == 2) ?
f->chroma_v_shift : 0;
948 (
fs->slice_y >> sv) + ((
fs->slice_x >> sh) << pixshift);
949 src[j] =
f->last_picture.f->data[j] +
f->last_picture.f->linesize[j] *
950 (
fs->slice_y >> sv) + ((
fs->slice_x >> sh) << pixshift);
956 src[1] =
f->last_picture.f->data[1];
959 f->last_picture.f->linesize,
969 if (
f->last_picture.f)
1025 memcpy(fdst,
fsrc,
sizeof(*fdst));
#define AV_PIX_FMT_YUVA422P16
static int read_extra_header(FFV1Context *f)
#define AV_PIX_FMT_GBRAP16
#define AV_LOG_WARNING
Something somehow does not look correct.
static void update_vlc_state(VlcState *const state, const int v)
static av_cold int init(AVCodecContext *avctx)
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int decode_slice(AVCodecContext *c, void *arg)
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
#define FFSWAP(type, a, b)
static int is_input_end(FFV1Context *s)
int context_count[MAX_QUANT_TABLES]
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AV_PIX_FMT_YUVA422P9
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
This structure describes decoded (raw) audio or video data.
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
#define AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P10
enum AVFieldOrder field_order
Field order.
int top_field_first
If the content is interlaced, is top field displayed first.
enum AVPictureType last_picture
int step
Number of elements between 2 horizontally consecutive pixels.
#define AV_PIX_FMT_YUV420P10
uint8_t(* state)[CONTEXT_SIZE]
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
struct FFV1Context * slice_context[MAX_SLICES]
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
av_cold int ff_ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
#define AC_RANGE_CUSTOM_TAB
#define AV_PIX_FMT_YUVA422P10
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
#define FF_DEBUG_PICT_INFO
static av_always_inline int RENAME() decode_line(FFV1Context *s, int w, TYPE *sample[2], int plane_index, int bits)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUVA444P16
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
#define AV_PIX_FMT_YUV422P9
int key_frame
1 -> keyframe, 0-> not
#define AV_PIX_FMT_GRAY16
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
#define AV_PIX_FMT_YUV444P10
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_YUV422P16
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define AV_PIX_FMT_GBRAP10
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
void ff_ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_GBRAP12
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static double av_q2d(AVRational a)
Convert an AVRational to a double.
PlaneContext plane[MAX_PLANES]
#define av_assert0(cond)
assert() equivalent, that is always enabled.
struct FFV1Context * fsrc
#define AV_PIX_FMT_YUV420P9
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
#define AV_PIX_FMT_YUV420P16
static av_always_inline int fold(int diff, int bits)
int ac
1=range coder <-> 0=golomb rice
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_GRAY10
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define AV_PIX_FMT_GBRP16
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
static int read_header(FFV1Context *f)
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
#define fs(width, name, subs,...)
Rational number (pair of numerator and denominator).
@ AV_PICTURE_TYPE_I
Intra.
#define AV_PIX_FMT_YUV440P10
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define AV_PIX_FMT_YUV422P12
av_cold int ff_ffv1_close(AVCodecContext *avctx)
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define AV_PIX_FMT_YUV444P12
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define AV_PIX_FMT_YUVA444P10
static float quant_table[96]
uint8_t(*[MAX_QUANT_TABLES] initial_states)[32]
static const struct @315 planes[]
int interlaced_frame
The content of the picture is interlaced.
static int decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride)
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define AV_PIX_FMT_GBRP12
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
const char * name
Name of the codec implementation.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
static int get_rac(RangeCoder *c, uint8_t *const state)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV444P9
#define MAX_CONTEXT_INPUTS
static const float pred[4]
#define AV_PIX_FMT_0RGB32
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
#define AV_PIX_FMT_YUVA444P9
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
#define AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV422P14
int ff_ffv1_allocate_initial_states(FFV1Context *f)
static int RENAME() decode_rgb_frame(FFV1Context *s, uint8_t *src[4], int w, int h, int stride[4])
main external API structure.
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
static av_cold int decode_init(AVCodecContext *avctx)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV444P14
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
#define AV_PIX_FMT_GRAY12
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14