32 #define AVSC_NO_DECLSPEC 38 #define AVISYNTH_LIB "avisynth" 41 #define AVISYNTH_NAME "libavisynth" 42 #define AVISYNTH_LIB AVISYNTH_NAME SLIBSUF 45 #include <avisynth/avisynth_c.h> 49 #define AVSC_DECLARE_FUNC(name) name ## _func name 71 #undef AVSC_DECLARE_FUNC 75 AVS_ScriptEnvironment *
env;
77 const AVS_VideoInfo *
vi;
100 AVS_PLANAR_V, AVS_PLANAR_A };
102 AVS_PLANAR_R, AVS_PLANAR_A };
120 #define LOAD_AVS_FUNC(name, continue_on_fail) \ 121 avs_library.name = (name ## _func) \ 122 dlsym(avs_library.library, #name); \ 123 if (!continue_on_fail && !avs_library.name) \ 169 avs->
env = avs_library.avs_create_script_environment(3);
170 if (avs_library.avs_get_error) {
171 const char *
error = avs_library.avs_get_error(avs->
env);
193 if (avs == avs_ctx_list) {
194 avs_ctx_list = avs->
next;
197 while (prev->
next != avs)
203 avs_library.avs_release_clip(avs->
clip);
207 avs_library.avs_delete_script_environment(avs->
env);
238 avs->
vi->fps_denominator };
244 switch (avs->
vi->pixel_type) {
246 case AVS_CS_YUV444P10:
250 case AVS_CS_YUV422P10:
254 case AVS_CS_YUV420P10:
258 case AVS_CS_YUV444P12:
262 case AVS_CS_YUV422P12:
266 case AVS_CS_YUV420P12:
270 case AVS_CS_YUV444P14:
274 case AVS_CS_YUV422P14:
278 case AVS_CS_YUV420P14:
282 case AVS_CS_YUV444P16:
286 case AVS_CS_YUV422P16:
290 case AVS_CS_YUV420P16:
307 case AVS_CS_YUVA444P10:
311 case AVS_CS_YUVA422P10:
315 case AVS_CS_YUVA420P10:
319 case AVS_CS_YUVA422P12:
323 case AVS_CS_YUVA444P16:
327 case AVS_CS_YUVA422P16:
331 case AVS_CS_YUVA420P16:
449 "unknown AviSynth colorspace %d\n", avs->
vi->pixel_type);
492 switch (avs->
vi->sample_type) {
493 case AVS_SAMPLE_INT8:
496 case AVS_SAMPLE_INT16:
499 case AVS_SAMPLE_INT24:
502 case AVS_SAMPLE_INT32:
505 case AVS_SAMPLE_FLOAT:
510 "unknown AviSynth sample type %d\n", avs->
vi->sample_type);
524 if (avs_has_video(avs->
vi)) {
532 if (avs_has_audio(avs->
vi)) {
549 char filename_ansi[MAX_PATH * 4];
550 wchar_t filename_wc[MAX_PATH * 4];
558 MultiByteToWideChar(CP_UTF8, 0, s->
url, -1, filename_wc, MAX_PATH * 4);
559 WideCharToMultiByte(CP_THREAD_ACP, 0, filename_wc, -1, filename_ansi,
561 arg = avs_new_value_string(filename_ansi);
563 arg = avs_new_value_string(s->
url);
565 val = avs_library.avs_invoke(avs->
env,
"Import", arg, 0);
566 if (avs_is_error(val)) {
571 if (!avs_is_clip(val)) {
577 avs->
clip = avs_library.avs_take_clip(val, avs->
env);
578 avs->
vi = avs_library.avs_get_video_info(avs->
clip);
584 if (avs_library.avs_get_version(avs->
clip) < 6) {
586 "AviSynth version is too old. Please upgrade to either AviSynth 2.6 >= RC1 or AviSynth+ >= r1718.\n");
592 avs_library.avs_release_value(val);
626 AVS_VideoFrame *
frame;
627 unsigned char *dst_p;
628 const unsigned char *src_p;
629 int n,
i, plane, rowsize, planeheight, pitch,
bits, ret;
644 if (GetProcAddress(avs_library.
library,
"avs_is_planar_rgb") ==
NULL)
654 bits = avs_library.avs_bits_per_pixel(avs->
vi);
658 pkt->
size = (((int64_t)avs->
vi->width *
659 (int64_t)avs->
vi->height) * bits) / 8;
671 frame = avs_library.avs_get_frame(avs->
clip, n);
672 error = avs_library.avs_clip_get_error(avs->
clip);
681 for (i = 0; i < avs->
n_planes; i++) {
683 src_p = avs_library.avs_get_read_ptr_p(frame, plane);
684 pitch = avs_library.avs_get_pitch_p(frame, plane);
686 rowsize = avs_library.avs_get_row_size_p(frame, plane);
687 planeheight = avs_library.avs_get_height_p(frame, plane);
690 if (avs_is_rgb24(avs->
vi) || avs_is_rgb(avs->
vi)) {
691 src_p = src_p + (planeheight - 1) * pitch;
696 if (avsplus && (avs_library.avs_is_planar_rgb(avs->
vi) ||
697 avs_library.avs_is_planar_rgba(avs->
vi))) {
698 src_p = src_p + (planeheight - 1) * pitch;
702 avs_library.avs_bit_blt(avs->
env, dst_p, rowsize, src_p, pitch,
703 rowsize, planeheight);
704 dst_p += rowsize * planeheight;
707 avs_library.avs_release_video_frame(frame);
723 fps.
num = avs->
vi->fps_numerator;
724 fps.
den = avs->
vi->fps_denominator;
725 samplerate.
num = avs->
vi->audio_samples_per_second;
728 if (avs_has_video(avs->
vi)) {
754 pkt->
size = avs_bytes_per_channel_sample(avs->
vi) *
755 samples * avs->
vi->nchannels;
767 avs_library.avs_get_audio(avs->
clip, pkt->
data, n, samples);
768 error = avs_library.avs_clip_get_error(avs->
clip);
836 int64_t timestamp,
int flags)
846 avs->
vi->fps_denominator };
847 samplerate = (
AVRational) { avs->
vi->audio_samples_per_second, 1 };
852 if ((timestamp >= avs->
vi->num_frames) ||
853 (timestamp > INT_MAX) ||
857 if (avs_has_audio(avs->
vi))
860 if ((timestamp >= avs->
vi->num_audio_samples) || (timestamp < 0))
863 if (avs_has_video(avs->
vi)) {
#define AV_PIX_FMT_YUVA422P16
static int avs_atexit_called
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_BGRA64
#define AV_PIX_FMT_YUV420P12
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
static void error(const char *err)
Macro definitions for various function/variable attributes.
static const int avs_planes_yuv[3]
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY12
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int discard)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int id
Format-specific stream ID.
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
AVStream ** streams
A list of all streams in the file.
#define AVERROR_EOF
End of file.
static av_cold int read_close(AVFormatContext *ctx)
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
static av_cold int avisynth_read_close(AVFormatContext *s)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define i(width, name, range_min, range_max)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
char * url
input or output URL.
#define AV_PIX_FMT_YUVA444P16
enum AVMediaType codec_type
General type of the encoded data.
#define AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_YUV444P10
static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard)
AVInputFormat ff_avisynth_demuxer
AVRational avg_frame_rate
Average framerate.
static const int avs_planes_yuva[4]
#define AV_PIX_FMT_GBRAP16
AVS_ScriptEnvironment * env
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static AviSynthLibrary avs_library
static const int avs_planes_rgba[4]
AVSC_DECLARE_FUNC(avs_bit_blt)
static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
common internal API header
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st)
static const int avs_planes_grey[1]
packed RGB 8:8:8, 24bpp, BGRBGR...
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_GBRP14
static int read_header(FFV1Context *f)
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
#define AV_PIX_FMT_YUV420P16
static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st)
static av_cold void avisynth_atexit_handler(void)
#define AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_GRAYF32
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AV_PIX_FMT_GRAY14
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
static const int avs_planes_rgb[3]
#define LOAD_AVS_FUNC(name, continue_on_fail)
#define AV_PIX_FMT_YUV420P10
Rational number (pair of numerator and denominator).
static int avisynth_create_stream(AVFormatContext *s)
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define flags(name, subs,...)
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
static int avisynth_open_file(AVFormatContext *s)
static av_cold int avisynth_load_library(void)
int64_t duration
Decoding: duration of the stream, in stream time base.
int sample_rate
Audio only.
#define AV_PIX_FMT_GBRPF32
static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GBRAPF32
common internal api header.
static AviSynthContext * avs_ctx_list
planar GBRA 4:4:4:4 32bpp
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int64_t nb_frames
number of frames in this stream if known or 0
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
struct AviSynthContext * next
void * priv_data
Format private data.
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
static av_cold int avisynth_context_create(AVFormatContext *s)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
static const int avs_planes_packed[1]
static av_cold void avisynth_context_destroy(AviSynthContext *avs)
static av_cold int avisynth_read_header(AVFormatContext *s)
static double val(void *priv, double ch)
This structure stores compressed data.
static void avisynth_next_stream(AVFormatContext *s, AVStream **st, AVPacket *pkt, int *discard)
#define AV_PIX_FMT_YUV422P16
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_PIX_FMT_YUVA422P12