Go to the documentation of this file.
36 #define BITSTREAM_READER_LE
45 #define FORMAT_SIMPLE 1
46 #define FORMAT_ENCRYPTED 2
80 crc =
av_crc(
s->crc_table, 0xFFFFFFFFU, buf, buf_size);
81 if (CRC != (crc ^ 0xFFFFFFFFU)) {
91 uint64_t crc = UINT64_MAX, poly = 0x42F0E1EBA9EA3693
U;
96 crc ^= (uint64_t)*
pass++ << 56;
97 for (
i = 0;
i < 8;
i++)
98 crc = (crc << 1) ^ (poly & (((int64_t) crc) >> 63));
101 return crc ^ UINT64_MAX;
110 if (!
s->decode_buffer)
113 s->decode_buffer =
NULL;
152 av_log(avctx,
AV_LOG_ERROR,
"Missing password for encrypted stream. Please use the -password option\n");
158 if (
s->channels > 1 &&
s->channels < 9)
166 if (
s->channels == 0 ||
s->channels > 16) {
195 s->last_frame_length =
s->data_length %
s->frame_length;
196 total_frames =
s->data_length /
s->frame_length +
197 (
s->last_frame_length ? 1 : 0);
203 s->data_length,
s->frame_length,
s->last_frame_length, total_frames);
205 if(
s->frame_length >= UINT_MAX / (
s->channels *
sizeof(
int32_t))){
220 int *got_frame_ptr,
AVPacket *avpkt)
225 int buf_size = avpkt->
size;
229 int cur_chan = 0, framelen =
s->frame_length;
242 frame->nb_samples = framelen;
251 for (
i = 0;
i <
s->channels;
i++) {
253 s->ch_ctx[
i].predictor = 0;
257 for (
i = 0;
i < 8;
i++)
264 for (p =
s->decode_buffer; (
int32_t*)p < s->decode_buffer + (framelen *
s->channels); p++) {
265 int32_t *predictor = &
s->ch_ctx[cur_chan].predictor;
267 TTARice *rice = &
s->ch_ctx[cur_chan].rice;
268 uint32_t unary, depth, k;
321 #define PRED(x, k) (int32_t)((((uint64_t)(x) << (k)) - (x)) >> (k))
323 case 1: *p +=
PRED(*predictor, 4);
break;
325 case 3: *p +=
PRED(*predictor, 5);
break;
326 case 4: *p += *predictor;
break;
331 if (cur_chan < (
s->channels-1))
335 if (
s->channels > 1) {
337 for (*p += *
r / 2;
r > (
int32_t*)p -
s->channels;
r--)
344 frame->nb_samples = framelen =
s->last_frame_length;
361 for (p =
s->decode_buffer; (
int32_t*)p <
s->decode_buffer + (framelen *
s->channels); p++)
367 for (p =
s->decode_buffer; (
int32_t*)p <
s->decode_buffer + (framelen *
s->channels); p++)
374 for (
i = 0;
i < framelen *
s->channels;
i++)
377 s->decode_buffer =
NULL;
388 s->decode_buffer =
NULL;
397 s->decode_buffer =
NULL;
403 #define OFFSET(x) offsetof(TTAContext, x)
404 #define DEC (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM)
static void error(const char *err)
static int tta_check_crc(TTAContext *s, const uint8_t *buf, int buf_size)
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
static av_cold int init(AVCodecContext *avctx)
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint64_t channel_layout
Audio channel layout.
int sample_rate
samples per second
static uint64_t tta_check_crc64(uint8_t *pass)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
static av_cold int end(AVCodecContext *avctx)
void ff_tta_filter_init(TTAFilter *c, int32_t shift)
This structure describes decoded (raw) audio or video data.
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new samples
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
void ff_tta_rice_init(TTARice *c, uint32_t k0, uint32_t k1)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static const AVClass tta_decoder_class
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
#define AV_CH_LAYOUT_STEREO
av_cold void ff_ttadsp_init(TTADSPContext *c)
#define AV_CH_LAYOUT_QUAD
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define AV_CH_LOW_FREQUENCY
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static av_cold int tta_decode_init(AVCodecContext *avctx)
static const int64_t tta_channel_layouts[7]
static int allocate_buffers(AVCodecContext *avctx)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
const char * av_default_item_name(void *ptr)
Return the context name.
#define AV_EF_EXPLODE
abort decoding on minor error detection
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
enum AVSampleFormat sample_fmt
audio sample format
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
#define AV_CH_LAYOUT_5POINT1_BACK
const uint32_t ff_tta_shift_1[]
int channels
number of audio channels
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
const uint32_t *const ff_tta_shift_16
const uint8_t ff_tta_filter_configs[]
#define av_malloc_array(a, b)
static int tta_decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *avpkt)
#define AV_CH_BACK_CENTER
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
#define AV_CH_LAYOUT_7POINT1_WIDE
@ AV_SAMPLE_FMT_S16
signed 16 bits
const char * name
Name of the codec implementation.
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
static const uint8_t * align_get_bits(GetBitContext *s)
static av_cold int tta_decode_close(AVCodecContext *avctx)
main external API structure.
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
static av_const int sign_extend(int val, unsigned bits)
This structure stores compressed data.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const AVOption options[]
@ AV_SAMPLE_FMT_S32
signed 32 bits