Go to the documentation of this file.
33 #define AVSC_NO_DECLSPEC
40 #define AVISYNTH_LIB "avisynth"
43 #define AVISYNTH_NAME "libavisynth"
44 #define AVISYNTH_LIB AVISYNTH_NAME SLIBSUF
49 #define PCM(format) (AV_CODEC_ID_PCM_ ## format ## BE)
51 #define PCM(format) (AV_CODEC_ID_PCM_ ## format ## LE)
54 #include <avisynth/avisynth_c.h>
58 #define AVSC_DECLARE_FUNC(name) name ## _func name
86 #undef AVSC_DECLARE_FUNC
101 AVS_ScriptEnvironment *
env;
103 const AVS_VideoInfo *
vi;
128 AVS_PLANAR_V, AVS_PLANAR_A };
130 AVS_PLANAR_R, AVS_PLANAR_A };
148 #define LOAD_AVS_FUNC(name, continue_on_fail) \
149 avs_library.name = (name ## _func) \
150 dlsym(avs_library.library, #name); \
151 if (!continue_on_fail && !avs_library.name) \
231 while (prev->
next != avs)
264 const AVS_Map *avsmap;
265 AVS_VideoFrame *
frame;
277 avs->
vi->fps_denominator };
284 switch (avs->
vi->pixel_type) {
286 case AVS_CS_YUV444P10:
290 case AVS_CS_YUV422P10:
294 case AVS_CS_YUV420P10:
298 case AVS_CS_YUV444P12:
302 case AVS_CS_YUV422P12:
306 case AVS_CS_YUV420P12:
310 case AVS_CS_YUV444P14:
314 case AVS_CS_YUV422P14:
318 case AVS_CS_YUV420P14:
322 case AVS_CS_YUV444P16:
326 case AVS_CS_YUV422P16:
330 case AVS_CS_YUV420P16:
347 case AVS_CS_YUVA444P10:
351 case AVS_CS_YUVA422P10:
355 case AVS_CS_YUVA420P10:
359 case AVS_CS_YUVA422P12:
363 case AVS_CS_YUVA444P16:
367 case AVS_CS_YUVA422P16:
371 case AVS_CS_YUVA420P16:
489 "unknown AviSynth colorspace %d\n", avs->
vi->pixel_type);
536 if(
avs_library.avs_prop_get_type(avs->
env, avsmap,
"_FieldBased") == AVS_PROPTYPE_UNSET) {
557 if(
avs_library.avs_prop_get_type(avs->
env, avsmap,
"_ColorRange") == AVS_PROPTYPE_UNSET) {
678 if(
avs_library.avs_prop_get_type(avs->
env, avsmap,
"_Matrix") == AVS_PROPTYPE_UNSET) {
732 if(
avs_library.avs_prop_get_type(avs->
env, avsmap,
"_ChromaLocation") == AVS_PROPTYPE_UNSET) {
773 if (avs_is_field_based(avs->
vi) == 0) {
774 if (avs_is_tff(avs->
vi)) {
777 else if (avs_is_bff(avs->
vi)) {
796 switch (avs->
vi->sample_type) {
797 case AVS_SAMPLE_INT8:
800 case AVS_SAMPLE_INT16:
803 case AVS_SAMPLE_INT24:
806 case AVS_SAMPLE_INT32:
809 case AVS_SAMPLE_FLOAT:
814 "unknown AviSynth sample type %d\n", avs->
vi->sample_type);
828 if (avs_has_video(avs->
vi)) {
836 if (avs_has_audio(avs->
vi)) {
858 avs_new_value_string(
s->url),
859 avs_new_value_bool(1)
862 avs_new_value_array(args, 2), 0);
868 if (utf8toansi(
s->url, &filename_ansi)) {
872 arg = avs_new_value_string(filename_ansi);
874 arg = avs_new_value_string(
s->url);
882 if (avs_is_error(
val)) {
887 if (!avs_is_clip(
val)) {
902 "AviSynth version is too old. Please upgrade to either AviSynth 2.6 >= RC1 or AviSynth+ >= r1718.\n");
942 AVS_VideoFrame *
frame;
943 unsigned char *dst_p;
944 const unsigned char *src_p;
945 int n,
i, plane, rowsize, planeheight, pitch,
bits,
ret;
961 (int64_t)avs->
vi->height) *
bits) / 8;
995 src_p = src_p + (planeheight - 1) * pitch;
1000 rowsize, planeheight);
1001 dst_p += rowsize * planeheight;
1020 fps.
num = avs->
vi->fps_numerator;
1021 fps.
den = avs->
vi->fps_denominator;
1022 samplerate.
num = avs->
vi->audio_samples_per_second;
1025 if (avs_has_video(avs->
vi)) {
1051 pkt->
size = avs_bytes_per_channel_sample(avs->
vi) *
1133 int64_t timestamp,
int flags)
1143 avs->
vi->fps_denominator };
1144 samplerate = (
AVRational) { avs->
vi->audio_samples_per_second, 1 };
1146 st =
s->streams[stream_index];
1149 if ((timestamp >= avs->
vi->num_frames) ||
1150 (timestamp > INT_MAX) ||
1154 if (avs_has_audio(avs->
vi))
1157 if ((timestamp >= avs->
vi->num_audio_samples) || (timestamp < 0))
1160 if (avs_has_video(avs->
vi)) {
1171 #define AVISYNTH_FRAMEPROP_DEFAULT AVISYNTH_FRAMEPROP_FIELD_ORDER | AVISYNTH_FRAMEPROP_RANGE | \
1172 AVISYNTH_FRAMEPROP_PRIMARIES | AVISYNTH_FRAMEPROP_TRANSFER | \
1173 AVISYNTH_FRAMEPROP_MATRIX | AVISYNTH_FRAMEPROP_CHROMA_LOCATION
1174 #define OFFSET(x) offsetof(AviSynthContext, x)
1202 .extensions =
"avs",
static void error(const char *err)
#define AV_PIX_FMT_YUVA422P16
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define AV_PIX_FMT_GBRAP16
@ AVCOL_PRI_EBU3213
EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors.
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
static const AVClass avisynth_demuxer_class
static av_cold void avisynth_atexit_handler(void)
enum AVMediaType codec_type
General type of the encoded data.
static av_cold int avisynth_load_library(void)
enum AVColorSpace color_space
#define AVERROR_EOF
End of file.
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
static int avisynth_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
#define AV_PIX_FMT_YUVA420P16
@ AVCOL_RANGE_JPEG
Full range content.
static int avs_atexit_called
#define AV_PIX_FMT_YUVA420P10
static const int avs_planes_yuv[3]
@ AVISYNTH_FRAMEPROP_MATRIX
AVRational avg_frame_rate
Average framerate.
#define AV_PIX_FMT_YUV420P10
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
int nb_channels
Number of channels in this layout.
#define AV_PIX_FMT_YUVA422P10
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
enum AVColorPrimaries color_primaries
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
#define AV_PIX_FMT_GBRP14
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
#define AV_PIX_FMT_GBRP10
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
#define AV_PIX_FMT_YUVA444P16
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
static av_cold int read_close(AVFormatContext *ctx)
static double val(void *priv, double ch)
#define AV_PIX_FMT_GRAY16
@ AVCOL_TRC_LOG_SQRT
"Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
int64_t duration
Decoding: duration of the stream, in stream time base.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st)
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
#define AV_PIX_FMT_YUV444P10
enum AVColorTransferCharacteristic color_trc
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PIX_FMT_YUV422P16
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
#define AV_PIX_FMT_GBRAP10
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
#define AV_PIX_FMT_GBRAP12
#define LOAD_AVS_FUNC(name, continue_on_fail)
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
#define AV_PIX_FMT_YUV444P16
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
static av_cold int avisynth_read_close(AVFormatContext *s)
static AviSynthContext * avs_ctx_list
#define AV_PIX_FMT_YUV420P16
static const int avs_planes_rgba[4]
@ AVCOL_PRI_SMPTE428
SMPTE ST 428-1 (CIE 1931 XYZ)
#define AV_PIX_FMT_GRAY14
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static void avisynth_next_stream(AVFormatContext *s, AVStream **st, AVPacket *pkt, int *discard)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
#define AV_PIX_FMT_GRAYF32
@ AVCOL_SPC_CHROMA_DERIVED_CL
Chromaticity-derived constant luminance system.
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
#define AV_PIX_FMT_GRAY10
@ AVDISCARD_ALL
discard all
AVS_ScriptEnvironment * env
#define AV_PIX_FMT_GBRP16
AVCodecParameters * codecpar
Codec parameters associated with this stream.
#define LIBAVUTIL_VERSION_INT
static int read_header(FFV1Context *f)
Describe the class of an AVClass context structure.
static av_cold void avisynth_context_destroy(AviSynthContext *avs)
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Rational number (pair of numerator and denominator).
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt, int discard)
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
const char * av_default_item_name(void *ptr)
Return the context name.
static av_cold int avisynth_read_header(AVFormatContext *s)
static AviSynthLibrary avs_library
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
AVChannelLayout ch_layout
Audio only.
@ AVCOL_RANGE_UNSPECIFIED
int sample_rate
Audio only.
int64_t nb_frames
number of frames in this stream if known or 0
@ AVCOL_PRI_BT2020
ITU-R BT2020.
static const AVOption avisynth_options[]
@ AVCOL_TRC_SMPTE2084
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
@ AVCOL_PRI_SMPTE431
SMPTE ST 431-2 (2011) / DCI P3.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt, int discard)
static int avisynth_create_stream(AVFormatContext *s)
@ AVCOL_PRI_FILM
colour filters using Illuminant C
@ AVISYNTH_FRAMEPROP_PRIMARIES
@ AVCOL_TRC_LOG
"Logarithmic transfer characteristic (100:1 range)"
#define AV_PIX_FMT_GBRPF32
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV444P12
@ AVCHROMA_LOC_UNSPECIFIED
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
@ AVISYNTH_FRAMEPROP_TRANSFER
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
@ AV_FIELD_TT
Top coded_first, top displayed first.
#define AV_PIX_FMT_YUVA444P10
@ AVCOL_SPC_CHROMA_DERIVED_NCL
Chromaticity-derived non-constant luminance system.
@ AVCOL_TRC_BT709
also ITU-R BT1361
@ AVISYNTH_FRAMEPROP_CHROMA_LOCATION
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
static const int avs_planes_grey[1]
#define AV_PIX_FMT_BGRA64
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
#define AVISYNTH_FRAMEPROP_DEFAULT
#define AV_PIX_FMT_GBRP12
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
enum AVColorRange color_range
Video only.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
enum AVFieldOrder field_order
Video only.
static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st)
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
int id
Format-specific stream ID.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
@ AVISYNTH_FRAMEPROP_RANGE
#define AV_PIX_FMT_YUV420P12
enum AVChromaLocation chroma_location
#define AV_PIX_FMT_YUV422P14
@ AVISYNTH_FRAMEPROP_FIELD_ORDER
@ AVCOL_TRC_ARIB_STD_B67
ARIB STD-B67, known as "Hybrid log-gamma".
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
const AVInputFormat ff_avisynth_demuxer
@ AVCOL_SPC_FCC
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
#define AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_GBRAPF32
static const int avs_planes_rgb[3]
Filter the word “frame” indicates either a video frame or a group of audio samples
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static const int avs_planes_yuva[4]
@ AVCOL_PRI_SMPTE432
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
@ AVCOL_SPC_SMPTE2085
SMPTE 2085, Y'D'zD'x.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
struct AviSynthContext * next
@ AV_FIELD_BB
Bottom coded first, bottom displayed first.
#define flags(name, subs,...)
AVSC_DECLARE_FUNC(avs_bit_blt)
@ AVCOL_TRC_SMPTE428
SMPTE ST 428-1.
static av_cold int avisynth_context_create(AVFormatContext *s)
#define AV_PIX_FMT_YUV444P14
static int avisynth_open_file(AVFormatContext *s)
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
#define AV_PIX_FMT_GRAY12
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
@ AVCOL_SPC_ICTCP
ITU-R BT.2100-0, ICtCp.
static const int avs_planes_packed[1]
@ AVCHROMA_LOC_BOTTOMLEFT
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_PIX_FMT_YUV420P14