Go to the documentation of this file.
32 #define MAX_FRAMES 240
34 #define NUM_CHANNELS 3
58 #define OFFSET(x) offsetof(PhotosensitivityContext, x)
59 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
64 {
"threshold",
"set detection threshold factor (lower is stricter)",
OFFSET(threshold_multiplier),
AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX,
FLAGS },
65 {
"t",
"set detection threshold factor (lower is stricter)",
OFFSET(threshold_multiplier),
AV_OPT_TYPE_FLOAT, {.dbl=1}, 0.1, FLT_MAX,
FLAGS },
93 #define NUM_CELLS (GRID_SIZE * GRID_SIZE)
97 int cell, gx, gy, x0, x1, y0, y1,
x, y,
c, area;
103 const int slice_start = (
NUM_CELLS * jobnr) / nb_jobs;
106 int width =
td->in->width,
height =
td->in->height, linesize =
td->in->linesize[0], skip =
td->skip;
109 for (cell = slice_start; cell <
slice_end; cell++) {
121 for (y = y0; y < y1; y += skip) {
123 for (
x = x0;
x < x1;
x += skip) {
133 area = ((x1 - x0 + skip - 1) / skip) * ((y1 - y0 + skip - 1) / skip);
137 td->out->grid[gy][gx][
c] = sum[
c];
165 const uint16_t s_mul =
td->s_mul;
166 const uint16_t t_mul = 0x100 - s_mul;
167 const int slice_start = (
td->target->height * jobnr) / nb_jobs;
168 const int slice_end = (
td->target->height * (jobnr+1)) / nb_jobs;
169 const int linesize =
td->target->linesize[0];
171 for (y = slice_start; y <
slice_end; y++) {
172 t =
td->target->data[0] + y *
td->target->linesize[0];
173 s =
td->source->data[0] + y *
td->source->linesize[0];
174 for (
x = 0;
x < linesize;
x++) {
175 *t = (*t * t_mul + *
s * s_mul) >> 8;
187 td.s_mul = (uint16_t)(
factor * 0x100);
193 int badness,
x, y,
c;
198 badness +=
abs((
int)
a->grid[y][
x][
c] - (
int)
b->grid[y][
x][
c]);
220 int this_badness, current_badness, fixed_badness, new_badness,
i, res;
233 for (
i = 1;
i <
s->nb_frames;
i++)
234 current_badness +=
i *
s->history[(
s->history_pos +
i) %
s->nb_frames];
235 current_badness /=
s->nb_frames;
239 new_badness = current_badness + this_badness;
241 current_badness, new_badness,
s->badness_threshold,
242 100 * new_badness /
s->badness_threshold, new_badness < s->badness_threshold ?
"OK" :
"EXCEEDED");
244 fixed_badness = new_badness;
245 if (new_badness < s->badness_threshold || !
s->last_frame_av ||
s->bypass) {
248 s->last_frame_av =
src =
in;
249 s->last_frame_e = ef;
250 s->history[
s->history_pos] = this_badness;
252 factor = (float)(
s->badness_threshold - current_badness) / (new_badness - current_badness);
255 s->history[
s->history_pos] = 0;
266 fixed_badness = current_badness + this_badness;
268 current_badness, fixed_badness,
s->badness_threshold,
269 100 * new_badness /
s->badness_threshold,
factor);
270 s->last_frame_e = ef;
271 s->history[
s->history_pos] = this_badness;
273 src =
s->last_frame_av;
276 s->history_pos = (
s->history_pos + 1) %
s->nb_frames;
285 metadata = &
out->metadata;
333 .
name =
"photosensitivity",
334 .description =
NULL_IF_CONFIG_SMALL(
"Filter out photosensitive epilepsy seizure-inducing flashes."),
336 .priv_class = &photosensitivity_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
AVPixelFormat
Pixel format.
AVFILTER_DEFINE_CLASS(photosensitivity)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int config_input(AVFilterLink *inlink)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
static int query_formats(AVFilterContext *ctx)
#define AV_LOG_VERBOSE
Detailed information.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
const char * name
Filter name.
AVFormatInternal * internal
An opaque field for libavformat internal usage.
A link between two filters.
static int convert_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static av_cold void uninit(AVFilterContext *ctx)
static const AVOption photosensitivity_options[]
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
PhotosensitivityFrame last_frame_e
AVFilter ff_vf_photosensitivity
A filter pad used for either input or output.
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
uint8_t grid[GRID_SIZE][GRID_SIZE][4]
static const AVFilterPad outputs[]
static void convert_frame(AVFilterContext *ctx, AVFrame *in, PhotosensitivityFrame *out, int skip)
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
PhotosensitivityFrame * out
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static void blend_frame(AVFilterContext *ctx, AVFrame *target, AVFrame *source, float factor)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Pad name.
static int blend_frame_partial(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static const int factor[16]
static const AVFilterPad inputs[]
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
static int get_badness(PhotosensitivityFrame *a, PhotosensitivityFrame *b)
float threshold_multiplier