Go to the documentation of this file.
26 #import <CoreImage/CoreImage.h>
27 #import <AppKit/AppKit.h>
72 link->frame_rate =
ctx->frame_rate;
97 NSArray *filter_categories = nil;
99 if (
ctx->list_generators && !
ctx->list_filters) {
100 filter_categories = [NSArray arrayWithObjects:kCICategoryGenerator, nil];
103 NSArray *filter_names = [CIFilter filterNamesInCategories:filter_categories];
104 NSEnumerator *
filters = [filter_names objectEnumerator];
106 NSString *filter_name;
107 while (filter_name = [
filters nextObject]) {
111 CIFilter *
filter = [CIFilter filterWithName:filter_name];
112 NSDictionary *filter_attribs = [filter attributes];
113 NSArray *filter_inputs = [filter inputKeys];
115 for (
input in filter_inputs) {
116 NSDictionary *input_attribs = [filter_attribs valueForKey:input];
117 NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
118 if ([input_class isEqualToString:
@"NSNumber"]) {
119 NSNumber *value_default = [input_attribs valueForKey:kCIAttributeDefault];
120 NSNumber *value_min = [input_attribs valueForKey:kCIAttributeSliderMin];
121 NSNumber *value_max = [input_attribs valueForKey:kCIAttributeSliderMax];
125 [input_class UTF8String],
126 [[value_min stringValue] UTF8String],
127 [[value_max stringValue] UTF8String],
128 [[value_default stringValue] UTF8String]);
132 [input_class UTF8String]);
148 NSData *
data = [NSData dataWithBytesNoCopy:frame->data[0]
149 length:frame->height*frame->linesize[0]
152 CIImage *
ret = [(__bridge CIImage*)ctx->input_image initWithBitmapData:data
153 bytesPerRow:frame->linesize[0]
155 format:kCIFormatARGB8
156 colorSpace:ctx->color_space];
163 CIImage *filter_input = (__bridge CIImage*)
ctx->input_image;
164 CIImage *filter_output =
NULL;
170 filter_input = [(__bridge CIImage*)ctx->filters[i-1] valueForKey:kCIOutputImageKey];
171 CGRect out_rect = [filter_input extent];
172 if (out_rect.size.width >
frame->width || out_rect.size.height >
frame->height) {
174 out_rect.origin.x = 0.0f;
175 out_rect.origin.y = 0.0f;
176 out_rect.size.width =
frame->width;
177 out_rect.size.height =
frame->height;
179 filter_input = [filter_input imageByCroppingToRect:out_rect];
185 if (!
ctx->is_video_source ||
i) {
187 [filter setValue:filter_input forKey:kCIInputImageKey];
188 }
@catch (NSException *exception) {
189 if (![[exception
name] isEqualToString:NSUndefinedKeyException]) {
200 filter_output = [filter valueForKey:kCIOutputImageKey];
202 if (!filter_output) {
208 CGRect out_rect = [filter_output extent];
209 if (out_rect.size.width >
frame->width || out_rect.size.height >
frame->height) {
211 out_rect.origin.x = 0.0f;
212 out_rect.origin.y = 0.0f;
213 out_rect.size.width =
frame->width;
214 out_rect.size.height =
frame->height;
217 CGImageRef
out = [(__bridge CIContext*)ctx->glctx createCGImage:filter_output
226 CGContextRelease(
ctx->cgctx);
229 size_t out_width = CGImageGetWidth(
out);
230 size_t out_height = CGImageGetHeight(
out);
232 if (out_width >
frame->width || out_height >
frame->height) {
233 av_log(
ctx,
AV_LOG_WARNING,
"Output image has unexpected size: %lux%lu (expected: %ix%i). This may crash...\n",
234 out_width, out_height,
frame->width,
frame->height);
236 ctx->cgctx = CGBitmapContextCreate(
frame->data[0],
239 ctx->bits_per_component,
242 (uint32_t)kCGImageAlphaPremultipliedFirst);
250 if (
ctx->output_rect) {
252 NSString *tmp_string = [NSString stringWithUTF8String:ctx->output_rect];
253 NSRect
tmp = NSRectFromString(tmp_string);
255 }
@catch (NSException *exception) {
259 if (
rect.size.width == 0.0f) {
262 if (
rect.size.height == 0.0f) {
303 frame->key_frame = 1;
304 frame->interlaced_frame = 0;
306 frame->sample_aspect_ratio =
ctx->sar;
317 NSString *input_key = [NSString stringWithUTF8String:key];
318 NSString *input_val = [NSString stringWithUTF8String:value];
320 NSDictionary *filter_attribs = [filter attributes];
321 NSDictionary *input_attribs = [filter_attribs valueForKey:input_key];
323 NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
324 NSString *input_type = [input_attribs valueForKey:kCIAttributeType];
326 if (!input_attribs) {
328 [input_key UTF8String]);
333 [input_key UTF8String],
334 [input_val UTF8String],
335 input_attribs ? (
unsigned long)[input_attribs count] : -1,
336 [input_class UTF8String],
337 [input_type UTF8String]);
339 if ([input_class isEqualToString:
@"NSNumber"]) {
340 float input = input_val.floatValue;
341 NSNumber *max_value = [input_attribs valueForKey:kCIAttributeSliderMax];
342 NSNumber *min_value = [input_attribs valueForKey:kCIAttributeSliderMin];
343 NSNumber *used_value = nil;
345 #define CLAMP_WARNING do { \
346 av_log(ctx, AV_LOG_WARNING, "Value of \"%f\" for option \"%s\" is out of range [%f %f], clamping to \"%f\".\n", \
348 [input_key UTF8String], \
349 min_value.floatValue, \
350 max_value.floatValue, \
351 used_value.floatValue); \
353 if (
input > max_value.floatValue) {
354 used_value = max_value;
356 }
else if (
input < min_value.floatValue) {
357 used_value = min_value;
360 used_value = [NSNumber numberWithFloat:input];
363 [filter setValue:used_value forKey:input_key];
364 }
else if ([input_class isEqualToString:
@"CIVector"]) {
365 CIVector *
input = [CIVector vectorWithString:input_val];
369 [input_val UTF8String]);
373 [filter setValue:input forKey:input_key];
374 }
else if ([input_class isEqualToString:
@"CIColor"]) {
375 CIColor *
input = [CIColor colorWithString:input_val];
379 [input_val UTF8String]);
383 [filter setValue:input forKey:input_key];
384 }
else if ([input_class isEqualToString:
@"NSString"]) {
385 [filter setValue:input_val forKey:input_key];
386 }
else if ([input_class isEqualToString:
@"NSData"]) {
387 NSData *
input = [NSData dataWithBytes:(const void*)[input_val cStringUsingEncoding:NSISOLatin1StringEncoding]
388 length:[input_val lengthOfBytesUsingEncoding:NSISOLatin1StringEncoding]];
392 [input_val UTF8String]);
396 [filter setValue:input forKey:input_key];
399 [input_class UTF8String]);
411 CIFilter *
filter = [CIFilter filterWithName:[NSString stringWithUTF8String:filter_name]];
414 [filter setDefaults];
417 if (filter_options) {
436 if (
ctx->list_filters ||
ctx->list_generators) {
441 if (
ctx->filter_string) {
465 if (strncmp(
f->value,
"default", 7)) {
476 if (!filter_options) {
486 if (!
ctx->filters[
i]) {
499 const NSOpenGLPixelFormatAttribute attr[] = {
500 NSOpenGLPFAAccelerated,
501 NSOpenGLPFANoRecovery,
502 NSOpenGLPFAColorSize, 32,
506 NSOpenGLPixelFormat *pixel_format = [[NSOpenGLPixelFormat alloc] initWithAttributes:(void *)&attr];
507 ctx->color_space = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
508 ctx->glctx = CFBridgingRetain([CIContext contextWithCGLContext:CGLGetCurrentContext()
509 pixelFormat:[pixel_format CGLPixelFormatObj]
510 colorSpace:
ctx->color_space
519 ctx->input_image = CFBridgingRetain([CIImage emptyImage]);
528 ctx->is_video_source = 1;
537 #define SafeCFRelease(ptr) do { \
552 for (
int i = 0;
i <
ctx->num_filters;
i++) {
586 #define OFFSET(x) offsetof(CoreImageContext, x)
587 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
589 #define GENERATOR_OPTIONS \
590 {"size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
591 {"s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
592 {"rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
593 {"r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
594 {"duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
595 {"d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
596 {"sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS},
598 #define FILTER_OPTIONS \
599 {"list_filters", "list available filters", OFFSET(list_filters), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
600 {"list_generators", "list available generators", OFFSET(list_generators), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
601 {"filter", "names and options of filters to apply", OFFSET(filter_string), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS}, \
602 {"output_rect", "output rectangle within output image", OFFSET(output_rect), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS},
619 .priv_class = &coreimage_class,
635 .
name =
"coreimagesrc",
640 .priv_class = &coreimagesrc_class,
const AVFilter ff_vsrc_coreimagesrc
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
CFTypeRef glctx
OpenGL context.
#define AV_LOG_WARNING
Something somehow does not look correct.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
int64_t duration
duration expressed in microseconds
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
CGContextRef cgctx
Bitmap context for image copy.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
AVFILTER_DEFINE_CLASS(coreimage)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
static CIFilter * create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
Create a filter object by a given name and set all options to defaults.
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
CGColorSpaceRef color_space
Common color space for input image and cgcontext.
static const AVFilterPad vf_coreimage_outputs[]
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
const char * name
Filter name.
static const AVOption coreimage_options[]
A link between two filters.
const AVFilter ff_vf_coreimage
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Apply all valid filters successively to the input image.
void * priv
private data for use by the filter
static const AVFilterPad vsrc_coreimagesrc_outputs[]
static int request_frame(AVFilterLink *link)
static av_cold int init(AVFilterContext *fctx)
A filter pad used for either input or output.
#define filters(fmt, inverse, clip, i, c)
static int config_output(AVFilterLink *link)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
AVFrame * picref
cached reference containing the painted picture
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
AVRational time_base
stream time base
#define FILTER_INPUTS(array)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static int config_input(AVFilterLink *link)
Determine image properties from input link of filter chain.
int av_log_get_level(void)
Get the current log level.
Describe the class of an AVClass context structure.
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Rational number (pair of numerator and denominator).
CFTypeRef * filters
CIFilter object for all requested filters.
int bits_per_component
Shared bpc for input-output operation.
@ AV_PICTURE_TYPE_I
Intra.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int64_t pts
increasing presentation time stamp
const OptionDef options[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
AVRational sar
sample aspect ratio
CFTypeRef input_image
Input image container for passing into Core Image API.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVRational time_base
Time base for the timestamps in this frame.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define AVERROR_EXTERNAL
Generic error in an external library.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_LOG_INFO
Standard information.
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
static void list_filters(CoreImageContext *ctx)
Print a list of all available filters including options and respective value ranges and defaults.
#define i(width, name, range_min, range_max)
static const AVOption coreimagesrc_options[]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
char * filter_string
The complete user provided filter definition.
const char * name
Pad name.
static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
Set an option of the given filter to the provided key-value pair.
void * av_calloc(size_t nmemb, size_t size)
AVRational frame_rate
video frame rate
char * output_rect
Rectangle to be filled with filter intput.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
int num_filters
Amount of filters in *filters.
int list_generators
Option used to list all available generators.
static av_cold int init_src(AVFilterContext *fctx)
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
int is_video_source
filter is used as video source
static int apply_filter(CoreImageContext *ctx, AVFilterLink *link, AVFrame *frame)
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
static av_cold void uninit(AVFilterContext *fctx)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
#define FILTER_OUTPUTS(array)
static const AVFilterPad vf_coreimage_inputs[]
#define GENERATOR_OPTIONS
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
#define SafeCFRelease(ptr)
int list_filters
Option used to list all available filters including generators.