92 #define DEFINE_SMEAR(depth, type, div) \ 93 static int smear_slice ## depth(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \ 95 ChromaShiftContext *s = ctx->priv; \ 96 AVFrame *in = s->in; \ 98 const int sulinesize = in->linesize[1] / div; \ 99 const int svlinesize = in->linesize[2] / div; \ 100 const int ulinesize = out->linesize[1] / div; \ 101 const int vlinesize = out->linesize[2] / div; \ 102 const int cbh = s->cbh; \ 103 const int cbv = s->cbv; \ 104 const int crh = s->crh; \ 105 const int crv = s->crv; \ 106 const int h = s->height[1]; \ 107 const int w = s->width[1]; \ 108 const int slice_start = (h * jobnr) / nb_jobs; \ 109 const int slice_end = (h * (jobnr+1)) / nb_jobs; \ 110 const type *su = (const type *)in->data[1]; \ 111 const type *sv = (const type *)in->data[2]; \ 112 type *du = (type *)out->data[1] + slice_start * ulinesize; \ 113 type *dv = (type *)out->data[2] + slice_start * vlinesize; \ 115 for (int y = slice_start; y < slice_end; y++) { \ 116 const int duy = av_clip(y - cbv, 0, h-1) * sulinesize; \ 117 const int dvy = av_clip(y - crv, 0, h-1) * svlinesize; \ 119 for (int x = 0; x < w; x++) { \ 120 du[x] = su[av_clip(x - cbh, 0, w - 1) + duy]; \ 121 dv[x] = sv[av_clip(x - crh, 0, w - 1) + dvy]; \ 134 #define DEFINE_WRAP(depth, type, div) \ 135 static int wrap_slice ## depth(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \ 137 ChromaShiftContext *s = ctx->priv; \ 138 AVFrame *in = s->in; \ 139 AVFrame *out = arg; \ 140 const int sulinesize = in->linesize[1] / div; \ 141 const int svlinesize = in->linesize[2] / div; \ 142 const int ulinesize = out->linesize[1] / div; \ 143 const int vlinesize = out->linesize[2] / div; \ 144 const int cbh = s->cbh; \ 145 const int cbv = s->cbv; \ 146 const int crh = s->crh; \ 147 const int crv = s->crv; \ 148 const int h = s->height[1]; \ 149 const int w = s->width[1]; \ 150 const int slice_start = (h * jobnr) / nb_jobs; \ 151 const int slice_end = (h * (jobnr+1)) / nb_jobs; \ 152 const type *su = (const type *)in->data[1]; \ 153 const type *sv = (const type *)in->data[2]; \ 154 type *du = (type *)out->data[1] + slice_start * ulinesize; \ 155 type *dv = (type *)out->data[2] + slice_start * vlinesize; \ 157 for (int y = slice_start; y < slice_end; y++) { \ 158 int uy = (y - cbv) % h; \ 159 int vy = (y - crv) % h; \ 166 for (int x = 0; x < w; x++) { \ 167 int ux = (x - cbh) % w; \ 168 int vx = (x - crh) % w; \ 175 du[x] = su[ux + uy * sulinesize]; \ 176 dv[x] = sv[vx + vy * svlinesize]; \ 189 #define DEFINE_RGBASMEAR(depth, type, div) \ 190 static int rgbasmear_slice ## depth(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \ 192 ChromaShiftContext *s = ctx->priv; \ 193 AVFrame *in = s->in; \ 194 AVFrame *out = arg; \ 195 const int srlinesize = in->linesize[2] / div; \ 196 const int sglinesize = in->linesize[0] / div; \ 197 const int sblinesize = in->linesize[1] / div; \ 198 const int salinesize = in->linesize[3] / div; \ 199 const int rlinesize = out->linesize[2] / div; \ 200 const int glinesize = out->linesize[0] / div; \ 201 const int blinesize = out->linesize[1] / div; \ 202 const int alinesize = out->linesize[3] / div; \ 203 const int rh = s->rh; \ 204 const int rv = s->rv; \ 205 const int gh = s->gh; \ 206 const int gv = s->gv; \ 207 const int bh = s->bh; \ 208 const int bv = s->bv; \ 209 const int ah = s->ah; \ 210 const int av = s->av; \ 211 const int h = s->height[1]; \ 212 const int w = s->width[1]; \ 213 const int slice_start = (h * jobnr) / nb_jobs; \ 214 const int slice_end = (h * (jobnr+1)) / nb_jobs; \ 215 const type *sr = (const type *)in->data[2]; \ 216 const type *sg = (const type *)in->data[0]; \ 217 const type *sb = (const type *)in->data[1]; \ 218 const type *sa = (const type *)in->data[3]; \ 219 type *dr = (type *)out->data[2] + slice_start * rlinesize; \ 220 type *dg = (type *)out->data[0] + slice_start * glinesize; \ 221 type *db = (type *)out->data[1] + slice_start * blinesize; \ 222 type *da = (type *)out->data[3] + slice_start * alinesize; \ 224 for (int y = slice_start; y < slice_end; y++) { \ 225 const int ry = av_clip(y - rv, 0, h-1) * srlinesize; \ 226 const int gy = av_clip(y - gv, 0, h-1) * sglinesize; \ 227 const int by = av_clip(y - bv, 0, h-1) * sblinesize; \ 230 for (int x = 0; x < w; x++) { \ 231 dr[x] = sr[av_clip(x - rh, 0, w - 1) + ry]; \ 232 dg[x] = sg[av_clip(x - gh, 0, w - 1) + gy]; \ 233 db[x] = sb[av_clip(x - bh, 0, w - 1) + by]; \ 240 if (s->nb_planes < 4) \ 242 ay = av_clip(y - av, 0, h-1) * salinesize; \ 243 for (int x = 0; x < w; x++) { \ 244 da[x] = sa[av_clip(x - ah, 0, w - 1) + ay]; \ 256 #define DEFINE_RGBAWRAP(depth, type, div) \ 257 static int rgbawrap_slice ## depth(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \ 259 ChromaShiftContext *s = ctx->priv; \ 260 AVFrame *in = s->in; \ 261 AVFrame *out = arg; \ 262 const int srlinesize = in->linesize[2] / div; \ 263 const int sglinesize = in->linesize[0] / div; \ 264 const int sblinesize = in->linesize[1] / div; \ 265 const int salinesize = in->linesize[3] / div; \ 266 const int rlinesize = out->linesize[2] / div; \ 267 const int glinesize = out->linesize[0] / div; \ 268 const int blinesize = out->linesize[1] / div; \ 269 const int alinesize = out->linesize[3] / div; \ 270 const int rh = s->rh; \ 271 const int rv = s->rv; \ 272 const int gh = s->gh; \ 273 const int gv = s->gv; \ 274 const int bh = s->bh; \ 275 const int bv = s->bv; \ 276 const int ah = s->ah; \ 277 const int av = s->av; \ 278 const int h = s->height[1]; \ 279 const int w = s->width[1]; \ 280 const int slice_start = (h * jobnr) / nb_jobs; \ 281 const int slice_end = (h * (jobnr+1)) / nb_jobs; \ 282 const type *sr = (const type *)in->data[2]; \ 283 const type *sg = (const type *)in->data[0]; \ 284 const type *sb = (const type *)in->data[1]; \ 285 const type *sa = (const type *)in->data[3]; \ 286 type *dr = (type *)out->data[2] + slice_start * rlinesize; \ 287 type *dg = (type *)out->data[0] + slice_start * glinesize; \ 288 type *db = (type *)out->data[1] + slice_start * blinesize; \ 289 type *da = (type *)out->data[3] + slice_start * alinesize; \ 291 for (int y = slice_start; y < slice_end; y++) { \ 292 int ry = (y - rv) % h; \ 293 int gy = (y - gv) % h; \ 294 int by = (y - bv) % h; \ 303 for (int x = 0; x < w; x++) { \ 304 int rx = (x - rh) % w; \ 305 int gx = (x - gh) % w; \ 306 int bx = (x - bh) % w; \ 314 dr[x] = sr[rx + ry * srlinesize]; \ 315 dg[x] = sg[gx + gy * sglinesize]; \ 316 db[x] = sb[bx + by * sblinesize]; \ 323 if (s->nb_planes < 4) \ 325 for (int x = 0; x < w; x++) { \ 326 int ax = (x - ah) % w; \ 327 int ay = (x - av) % h; \ 333 da[x] = sa[ax + ay * salinesize]; \ 363 in->data[0], in->linesize[0],
403 #define OFFSET(x) offsetof(ChromaShiftContext, x) 404 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM 438 .
name =
"chromashift",
441 .priv_class = &chromashift_class,
469 .priv_class = &rgbashift_class,
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUV440P10
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Main libavfilter public API header.
int h
agreed upon image height
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_YUV420P12
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static enum AVPixelFormat rgb_pix_fmts[]
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
static const AVFilterPad inputs[]
const char * name
Pad name.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
int(* filter_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
A filter pad used for either input or output.
A link between two filters.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
static const AVOption rgbashift_options[]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const AVOption chromashift_options[]
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_GBRAP16
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int w
agreed upon image width
#define AV_PIX_FMT_YUV422P9
uint8_t nb_components
The number of components each pixel has, (1-4)
#define AV_PIX_FMT_GBRP16
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
#define DEFINE_RGBASMEAR(depth, type, div)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
int format
agreed upon media format
#define AV_PIX_FMT_YUV420P16
static const AVFilterPad outputs[]
#define AV_PIX_FMT_YUV420P14
static enum AVPixelFormat yuv_pix_fmts[]
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define DEFINE_WRAP(depth, type, div)
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
static int query_formats(AVFilterContext *ctx)
const char * name
Filter name.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define flags(name, subs,...)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define DEFINE_RGBAWRAP(depth, type, div)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
planar GBRA 4:4:4:4 32bpp
AVFilter ff_vf_chromashift
AVFILTER_DEFINE_CLASS(chromashift)
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
avfilter_execute_func * execute
AVFilterContext * dst
dest filter
static int config_input(AVFilterLink *inlink)
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
#define DEFINE_SMEAR(depth, type, div)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
int depth
Number of bits in the component.
AVPixelFormat
Pixel format.
const AVFilter * filter
the AVFilter of which this is an instance
#define AV_PIX_FMT_YUV422P16
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)