Go to the documentation of this file.
96 s->param = (1.0f -
s->param) /
s->param;
112 float a = 0.15f,
b = 0.50f,
c = 0.10f, d = 0.20f, e = 0.02f,
f = 0.30f;
113 return (
in * (
in *
a +
b *
c) + d * e) / (
in * (
in *
a +
b) + d *
f) - e /
f;
116 static float mobius(
float in,
float j,
double peak)
123 a = -j * j * (peak - 1.0f) / (j * j - 2.0
f * j + peak);
124 b = (j * j - 2.0f * j * peak + peak) /
FFMAX(peak - 1.0
f, 1e-6);
126 return (
b *
b + 2.0
f *
b * j + j * j) / (
b -
a) * (
in +
a) / (
in +
b);
129 #define MIX(x,y,a) (x) * (1 - (a)) + (y) * (a)
133 const float *r_in = (
const float *)(
in->data[0] +
x *
desc->comp[0].step + y *
in->linesize[0]);
134 const float *b_in = (
const float *)(
in->data[1] +
x *
desc->comp[1].step + y *
in->linesize[1]);
135 const float *g_in = (
const float *)(
in->data[2] +
x *
desc->comp[2].step + y *
in->linesize[2]);
136 float *r_out = (
float *)(
out->data[0] +
x *
desc->comp[0].step + y *
out->linesize[0]);
137 float *b_out = (
float *)(
out->data[1] +
x *
desc->comp[1].step + y *
out->linesize[1]);
138 float *g_out = (
float *)(
out->data[2] +
x *
desc->comp[2].step + y *
out->linesize[2]);
148 float luma =
s->coeffs->cr * *r_in +
s->coeffs->cg * *g_in +
s->coeffs->cb * *b_in;
149 float overbright =
FFMAX(luma -
s->desat, 1e-6) /
FFMAX(luma, 1e-6);
150 *r_out =
MIX(*r_in, luma, overbright);
151 *g_out =
MIX(*g_in, luma, overbright);
152 *b_out =
MIX(*b_in, luma, overbright);
167 sig = sig *
s->param / peak;
170 sig = sig > 0.05f ? pow(sig / peak, 1.0
f /
s->param)
171 : sig * pow(0.05
f / peak, 1.0
f /
s->param) / 0.05f;
174 sig = av_clipf(sig *
s->param, 0, 1.0f);
180 sig = sig / (sig +
s->param) * (peak +
s->param) / peak;
183 sig =
mobius(sig,
s->param, peak);
189 *r_out *= sig / sig_orig;
190 *g_out *= sig / sig_orig;
191 *b_out *= sig / sig_orig;
207 const int slice_start = (
in->height * jobnr) / nb_jobs;
208 const int slice_end = (
in->height * (jobnr+1)) / nb_jobs;
209 double peak =
td->peak;
211 for (
int y = slice_start; y <
slice_end; y++)
212 for (
int x = 0;
x <
out->width;
x++)
228 double peak =
s->peak;
230 if (!
desc || !odesc) {
283 in->data[3],
in->linesize[3],
284 out->linesize[3], outlink->
h);
286 for (y = 0; y <
out->height; y++) {
287 for (
x = 0;
x <
out->width;
x++) {
301 #define OFFSET(x) offsetof(TonemapContext, x)
302 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
343 .priv_class = &tonemap_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
static float hable(float in)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static enum AVPixelFormat pix_fmts[]
@ AVCOL_SPC_NB
Not part of ABI.
int step
Number of elements between 2 horizontally consecutive pixels.
static const AVOption tonemap_options[]
double ff_determine_signal_peak(AVFrame *in)
static const AVFilterPad tonemap_inputs[]
const char * name
Filter name.
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
static const AVFilterPad tonemap_outputs[]
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
const char * av_color_space_name(enum AVColorSpace space)
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
static void tonemap(TonemapContext *s, AVFrame *out, const AVFrame *in, const AVPixFmtDescriptor *desc, int x, int y, double peak)
A filter pad used for either input or output.
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
static int tonemap_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const AVFilterPad outputs[]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static av_cold int init(AVFilterContext *ctx)
static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB]
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static int query_formats(AVFilterContext *ctx)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define AV_PIX_FMT_GBRPF32
int format
agreed upon media format
void ff_update_hdr_metadata(AVFrame *in, double peak)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
@ AVCOL_SPC_SMPTE240M
functionally identical to above
AVFILTER_DEFINE_CLASS(tonemap)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
int w
agreed upon image width
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Used for passing data between threads.
const char * name
Pad name.
const AVPixFmtDescriptor * desc
const struct LumaCoefficients * coeffs
static float mobius(float in, float j, double peak)
int h
agreed upon image height
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
#define AV_PIX_FMT_GBRAPF32
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
enum TonemapAlgorithm tonemap
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
static int filter_frame(AVFilterLink *link, AVFrame *in)
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B