FFmpeg  4.3
vf_tonemap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Vittorio Giovara <vittorio.giovara@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * tonemap algorithms
24  */
25 
26 #include <float.h>
27 #include <stdio.h>
28 #include <string.h>
29 
30 #include "libavutil/imgutils.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/intreadwrite.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 
36 #include "avfilter.h"
37 #include "colorspace.h"
38 #include "formats.h"
39 #include "internal.h"
40 #include "video.h"
41 
51 };
52 
54  [AVCOL_SPC_FCC] = { 0.30, 0.59, 0.11 },
55  [AVCOL_SPC_BT470BG] = { 0.299, 0.587, 0.114 },
56  [AVCOL_SPC_SMPTE170M] = { 0.299, 0.587, 0.114 },
57  [AVCOL_SPC_BT709] = { 0.2126, 0.7152, 0.0722 },
58  [AVCOL_SPC_SMPTE240M] = { 0.212, 0.701, 0.087 },
59  [AVCOL_SPC_BT2020_NCL] = { 0.2627, 0.6780, 0.0593 },
60  [AVCOL_SPC_BT2020_CL] = { 0.2627, 0.6780, 0.0593 },
61 };
62 
63 typedef struct TonemapContext {
64  const AVClass *class;
65 
67  double param;
68  double desat;
69  double peak;
70 
71  const struct LumaCoefficients *coeffs;
73 
74 static const enum AVPixelFormat pix_fmts[] = {
78 };
79 
81 {
83 }
84 
86 {
87  TonemapContext *s = ctx->priv;
88 
89  switch(s->tonemap) {
90  case TONEMAP_GAMMA:
91  if (isnan(s->param))
92  s->param = 1.8f;
93  break;
94  case TONEMAP_REINHARD:
95  if (!isnan(s->param))
96  s->param = (1.0f - s->param) / s->param;
97  break;
98  case TONEMAP_MOBIUS:
99  if (isnan(s->param))
100  s->param = 0.3f;
101  break;
102  }
103 
104  if (isnan(s->param))
105  s->param = 1.0f;
106 
107  return 0;
108 }
109 
110 static float hable(float in)
111 {
112  float a = 0.15f, b = 0.50f, c = 0.10f, d = 0.20f, e = 0.02f, f = 0.30f;
113  return (in * (in * a + b * c) + d * e) / (in * (in * a + b) + d * f) - e / f;
114 }
115 
116 static float mobius(float in, float j, double peak)
117 {
118  float a, b;
119 
120  if (in <= j)
121  return in;
122 
123  a = -j * j * (peak - 1.0f) / (j * j - 2.0f * j + peak);
124  b = (j * j - 2.0f * j * peak + peak) / FFMAX(peak - 1.0f, 1e-6);
125 
126  return (b * b + 2.0f * b * j + j * j) / (b - a) * (in + a) / (in + b);
127 }
128 
129 #define MIX(x,y,a) (x) * (1 - (a)) + (y) * (a)
130 static void tonemap(TonemapContext *s, AVFrame *out, const AVFrame *in,
131  const AVPixFmtDescriptor *desc, int x, int y, double peak)
132 {
133  const float *r_in = (const float *)(in->data[0] + x * desc->comp[0].step + y * in->linesize[0]);
134  const float *b_in = (const float *)(in->data[1] + x * desc->comp[1].step + y * in->linesize[1]);
135  const float *g_in = (const float *)(in->data[2] + x * desc->comp[2].step + y * in->linesize[2]);
136  float *r_out = (float *)(out->data[0] + x * desc->comp[0].step + y * out->linesize[0]);
137  float *b_out = (float *)(out->data[1] + x * desc->comp[1].step + y * out->linesize[1]);
138  float *g_out = (float *)(out->data[2] + x * desc->comp[2].step + y * out->linesize[2]);
139  float sig, sig_orig;
140 
141  /* load values */
142  *r_out = *r_in;
143  *b_out = *b_in;
144  *g_out = *g_in;
145 
146  /* desaturate to prevent unnatural colors */
147  if (s->desat > 0) {
148  float luma = s->coeffs->cr * *r_in + s->coeffs->cg * *g_in + s->coeffs->cb * *b_in;
149  float overbright = FFMAX(luma - s->desat, 1e-6) / FFMAX(luma, 1e-6);
150  *r_out = MIX(*r_in, luma, overbright);
151  *g_out = MIX(*g_in, luma, overbright);
152  *b_out = MIX(*b_in, luma, overbright);
153  }
154 
155  /* pick the brightest component, reducing the value range as necessary
156  * to keep the entire signal in range and preventing discoloration due to
157  * out-of-bounds clipping */
158  sig = FFMAX(FFMAX3(*r_out, *g_out, *b_out), 1e-6);
159  sig_orig = sig;
160 
161  switch(s->tonemap) {
162  default:
163  case TONEMAP_NONE:
164  // do nothing
165  break;
166  case TONEMAP_LINEAR:
167  sig = sig * s->param / peak;
168  break;
169  case TONEMAP_GAMMA:
170  sig = sig > 0.05f ? pow(sig / peak, 1.0f / s->param)
171  : sig * pow(0.05f / peak, 1.0f / s->param) / 0.05f;
172  break;
173  case TONEMAP_CLIP:
174  sig = av_clipf(sig * s->param, 0, 1.0f);
175  break;
176  case TONEMAP_HABLE:
177  sig = hable(sig) / hable(peak);
178  break;
179  case TONEMAP_REINHARD:
180  sig = sig / (sig + s->param) * (peak + s->param) / peak;
181  break;
182  case TONEMAP_MOBIUS:
183  sig = mobius(sig, s->param, peak);
184  break;
185  }
186 
187  /* apply the computed scale factor to the color,
188  * linearly to prevent discoloration */
189  *r_out *= sig / sig_orig;
190  *g_out *= sig / sig_orig;
191  *b_out *= sig / sig_orig;
192 }
193 
194 typedef struct ThreadData {
195  AVFrame *in, *out;
197  double peak;
198 } ThreadData;
199 
200 static int tonemap_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
201 {
202  TonemapContext *s = ctx->priv;
203  ThreadData *td = arg;
204  AVFrame *in = td->in;
205  AVFrame *out = td->out;
206  const AVPixFmtDescriptor *desc = td->desc;
207  const int slice_start = (in->height * jobnr) / nb_jobs;
208  const int slice_end = (in->height * (jobnr+1)) / nb_jobs;
209  double peak = td->peak;
210 
211  for (int y = slice_start; y < slice_end; y++)
212  for (int x = 0; x < out->width; x++)
213  tonemap(s, out, in, desc, x, y, peak);
214 
215  return 0;
216 }
217 
219 {
220  AVFilterContext *ctx = link->dst;
221  TonemapContext *s = ctx->priv;
222  AVFilterLink *outlink = ctx->outputs[0];
223  ThreadData td;
224  AVFrame *out;
226  const AVPixFmtDescriptor *odesc = av_pix_fmt_desc_get(outlink->format);
227  int ret, x, y;
228  double peak = s->peak;
229 
230  if (!desc || !odesc) {
231  av_frame_free(&in);
232  return AVERROR_BUG;
233  }
234 
235  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
236  if (!out) {
237  av_frame_free(&in);
238  return AVERROR(ENOMEM);
239  }
240 
242  if (ret < 0) {
243  av_frame_free(&in);
244  av_frame_free(&out);
245  return ret;
246  }
247 
248  /* input and output transfer will be linear */
249  if (in->color_trc == AVCOL_TRC_UNSPECIFIED) {
250  av_log(s, AV_LOG_WARNING, "Untagged transfer, assuming linear light\n");
251  out->color_trc = AVCOL_TRC_LINEAR;
252  } else if (in->color_trc != AVCOL_TRC_LINEAR)
253  av_log(s, AV_LOG_WARNING, "Tonemapping works on linear light only\n");
254 
255  /* read peak from side data if not passed in */
256  if (!peak) {
258  av_log(s, AV_LOG_DEBUG, "Computed signal peak: %f\n", peak);
259  }
260 
261  /* load original color space even if pixel format is RGB to compute overbrights */
262  s->coeffs = &luma_coefficients[in->colorspace];
263  if (s->desat > 0 && (in->colorspace == AVCOL_SPC_UNSPECIFIED || !s->coeffs)) {
264  if (in->colorspace == AVCOL_SPC_UNSPECIFIED)
265  av_log(s, AV_LOG_WARNING, "Missing color space information, ");
266  else if (!s->coeffs)
267  av_log(s, AV_LOG_WARNING, "Unsupported color space '%s', ",
268  av_color_space_name(in->colorspace));
269  av_log(s, AV_LOG_WARNING, "desaturation is disabled\n");
270  s->desat = 0;
271  }
272 
273  /* do the tone map */
274  td.out = out;
275  td.in = in;
276  td.desc = desc;
277  td.peak = peak;
279 
280  /* copy/generate alpha if needed */
281  if (desc->flags & AV_PIX_FMT_FLAG_ALPHA && odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
282  av_image_copy_plane(out->data[3], out->linesize[3],
283  in->data[3], in->linesize[3],
284  out->linesize[3], outlink->h);
285  } else if (odesc->flags & AV_PIX_FMT_FLAG_ALPHA) {
286  for (y = 0; y < out->height; y++) {
287  for (x = 0; x < out->width; x++) {
288  AV_WN32(out->data[3] + x * odesc->comp[3].step + y * out->linesize[3],
289  av_float2int(1.0f));
290  }
291  }
292  }
293 
294  av_frame_free(&in);
295 
297 
298  return ff_filter_frame(outlink, out);
299 }
300 
301 #define OFFSET(x) offsetof(TonemapContext, x)
302 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
303 static const AVOption tonemap_options[] = {
304  { "tonemap", "tonemap algorithm selection", OFFSET(tonemap), AV_OPT_TYPE_INT, {.i64 = TONEMAP_NONE}, TONEMAP_NONE, TONEMAP_MAX - 1, FLAGS, "tonemap" },
305  { "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_NONE}, 0, 0, FLAGS, "tonemap" },
306  { "linear", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_LINEAR}, 0, 0, FLAGS, "tonemap" },
307  { "gamma", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_GAMMA}, 0, 0, FLAGS, "tonemap" },
308  { "clip", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_CLIP}, 0, 0, FLAGS, "tonemap" },
309  { "reinhard", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_REINHARD}, 0, 0, FLAGS, "tonemap" },
310  { "hable", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_HABLE}, 0, 0, FLAGS, "tonemap" },
311  { "mobius", 0, 0, AV_OPT_TYPE_CONST, {.i64 = TONEMAP_MOBIUS}, 0, 0, FLAGS, "tonemap" },
312  { "param", "tonemap parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, {.dbl = NAN}, DBL_MIN, DBL_MAX, FLAGS },
313  { "desat", "desaturation strength", OFFSET(desat), AV_OPT_TYPE_DOUBLE, {.dbl = 2}, 0, DBL_MAX, FLAGS },
314  { "peak", "signal peak override", OFFSET(peak), AV_OPT_TYPE_DOUBLE, {.dbl = 0}, 0, DBL_MAX, FLAGS },
315  { NULL }
316 };
317 
319 
320 static const AVFilterPad tonemap_inputs[] = {
321  {
322  .name = "default",
323  .type = AVMEDIA_TYPE_VIDEO,
324  .filter_frame = filter_frame,
325  },
326  { NULL }
327 };
328 
329 static const AVFilterPad tonemap_outputs[] = {
330  {
331  .name = "default",
332  .type = AVMEDIA_TYPE_VIDEO,
333  },
334  { NULL }
335 };
336 
338  .name = "tonemap",
339  .description = NULL_IF_CONFIG_SMALL("Conversion to/from different dynamic ranges."),
340  .init = init,
341  .query_formats = query_formats,
342  .priv_size = sizeof(TonemapContext),
343  .priv_class = &tonemap_class,
347 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
out
FILE * out
Definition: movenc.c:54
TONEMAP_GAMMA
@ TONEMAP_GAMMA
Definition: vf_tonemap.c:45
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:489
hable
static float hable(float in)
Definition: vf_tonemap.c:110
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
pixdesc.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: vf_tonemap.c:74
AVOption
AVOption.
Definition: opt.h:246
AVCOL_SPC_NB
@ AVCOL_SPC_NB
Not part of ABI.
Definition: pixfmt.h:526
b
#define b
Definition: input.c:41
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:483
LumaCoefficients
Definition: colorspace.h:28
AVComponentDescriptor::step
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
tonemap_options
static const AVOption tonemap_options[]
Definition: vf_tonemap.c:303
ff_determine_signal_peak
double ff_determine_signal_peak(AVFrame *in)
Definition: colorspace.c:168
float.h
FLAGS
#define FLAGS
Definition: vf_tonemap.c:302
ThreadData::peak
double peak
Definition: vf_tonemap.c:197
tonemap_inputs
static const AVFilterPad tonemap_inputs[]
Definition: vf_tonemap.c:320
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
av_float2int
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
Definition: intfloat.h:50
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:494
video.h
AVFormatContext::internal
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1788
AVCOL_SPC_BT2020_CL
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
Definition: pixfmt.h:521
tonemap_outputs
static const AVFilterPad tonemap_outputs[]
Definition: vf_tonemap.c:329
av_image_copy_plane
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:338
formats.h
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:515
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2942
colorspace.h
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
tonemap
static void tonemap(TonemapContext *s, AVFrame *out, const AVFrame *in, const AVPixFmtDescriptor *desc, int x, int y, double peak)
Definition: vf_tonemap.c:130
TonemapContext::desat
double desat
Definition: vf_tonemap.c:68
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:600
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
ff_vf_tonemap
AVFilter ff_vf_tonemap
Definition: vf_tonemap.c:337
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2040
tonemap_slice
static int tonemap_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: vf_tonemap.c:200
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AV_PIX_FMT_FLAG_ALPHA
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Definition: pixdesc.h:177
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TonemapContext::peak
double peak
Definition: vf_tonemap.c:69
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
arg
const char * arg
Definition: jacosubdec.c:66
if
if(ret)
Definition: filter_design.txt:179
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_tonemap.c:85
luma_coefficients
static const struct LumaCoefficients luma_coefficients[AVCOL_SPC_NB]
Definition: vf_tonemap.c:53
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
MIX
#define MIX(x, y, a)
Definition: vf_tonemap.c:129
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
isnan
#define isnan(x)
Definition: libm.h:340
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_tonemap.c:80
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AVPixFmtDescriptor::flags
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
TonemapAlgorithm
TonemapAlgorithm
Definition: vf_tonemap.c:42
desc
const char * desc
Definition: nvenc.c:79
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
AV_WN32
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_GBRPF32
Definition: pixfmt.h:426
ff_update_hdr_metadata
void ff_update_hdr_metadata(AVFrame *in, double peak)
Definition: colorspace.c:193
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
internal.h
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:517
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(tonemap)
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:520
internal.h
ff_filter_get_nb_threads
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:784
ThreadData
Used for passing data between threads.
Definition: dsddec.c:67
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:512
TONEMAP_CLIP
@ TONEMAP_CLIP
Definition: vf_tonemap.c:46
ThreadData::desc
const AVPixFmtDescriptor * desc
Definition: vf_tonemap.c:196
AVFilter
Filter definition.
Definition: avfilter.h:144
TonemapContext
Definition: vf_tonemap.c:63
ret
ret
Definition: filter_design.txt:187
TonemapContext::coeffs
const struct LumaCoefficients * coeffs
Definition: vf_tonemap.c:71
mobius
static float mobius(float in, float j, double peak)
Definition: vf_tonemap.c:116
TONEMAP_HABLE
@ TONEMAP_HABLE
Definition: vf_tonemap.c:48
TonemapContext::param
double param
Definition: vf_tonemap.c:67
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCOL_SPC_FCC
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:514
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
AV_PIX_FMT_GBRAPF32
#define AV_PIX_FMT_GBRAPF32
Definition: pixfmt.h:427
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
TONEMAP_MAX
@ TONEMAP_MAX
Definition: vf_tonemap.c:50
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
OFFSET
#define OFFSET(x)
Definition: vf_tonemap.c:301
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:116
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ThreadData::in
AVFrame * in
Definition: af_afftdn.c:1083
TONEMAP_NONE
@ TONEMAP_NONE
Definition: vf_tonemap.c:43
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
TonemapContext::tonemap
enum TonemapAlgorithm tonemap
Definition: vf_tonemap.c:66
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:564
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
filter_frame
static int filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_tonemap.c:218
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
TONEMAP_MOBIUS
@ TONEMAP_MOBIUS
Definition: vf_tonemap.c:49
TONEMAP_LINEAR
@ TONEMAP_LINEAR
Definition: vf_tonemap.c:44
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
TONEMAP_REINHARD
@ TONEMAP_REINHARD
Definition: vf_tonemap.c:47