Go to the documentation of this file.
30 #include <X11/extensions/Xv.h>
31 #include <X11/extensions/XShm.h>
32 #include <X11/extensions/Xvlib.h>
106 unsigned int num_adaptors;
108 XvImageFormatValues *fv;
110 XWindowAttributes window_attrs;
111 int num_formats = 0, j,
tag,
ret;
114 if (
s->nb_streams > 1
117 av_log(
s,
AV_LOG_ERROR,
"Only a single raw or wrapped avframe video stream is supported.\n");
123 "Unsupported pixel format '%s', only yuv420p, uyvy422, yuyv422 are currently supported\n",
166 if (XvQueryAdaptors(xv->
display, DefaultRootWindow(xv->
display), &num_adaptors, &ai) != Success) {
175 XvFreeAdaptorInfo(ai);
182 for (j = 0; j < num_formats; j++) {
183 if (fv[j].
id ==
tag) {
189 if (j >= num_formats) {
191 "Device does not support pixel format %s, aborting\n",
212 XGetWindowAttributes(xv->
display, xv->
window, &window_attrs);
213 fgcolor.red = fgcolor.green = fgcolor.blue = 0;
214 fgcolor.flags = DoRed | DoGreen | DoBlue;
215 XAllocColor(xv->
display, window_attrs.colormap, &fgcolor);
216 XSetForeground(xv->
display, xv->
gc, fgcolor.pixel);
259 XWindowAttributes window_attrs;
261 XGetWindowAttributes(xv->
display, xv->
window, &window_attrs);
264 xv->
dest_w = window_attrs.width;
265 xv->
dest_h = window_attrs.height;
269 rect[0].height =
rect[1].height = window_attrs.height;
276 rect[0].width =
rect[1].width = window_attrs.width;
300 img->data +
img->offsets[0],
301 img->data +
img->offsets[1],
302 img->data +
img->offsets[2]
308 while (XPending(xv->
display)) {
309 XNextEvent(xv->
display, &event);
310 if (event.type == ClientMessage && event.xclient.data.l[0] == xv->
wm_delete_message) {
359 #define OFFSET(x) offsetof(XVContext, x)
static int xv_repaint(AVFormatContext *s)
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVMediaType codec_type
General type of the encoded data.
This struct describes the properties of an encoded stream.
This structure describes decoded (raw) audio or video data.
@ AV_APP_TO_DEV_WINDOW_REPAINT
Repaint request message.
static void compute_display_area(AVFormatContext *s)
XShmSegmentInfo yuv_shminfo
static int xv_control_message(AVFormatContext *s, int type, void *data, size_t data_size)
static const AVOption options[]
static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
static int xv_get_tag_from_format(enum AVPixelFormat format)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
const AVOutputFormat ff_xv_muxer
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
AVRational sample_aspect_ratio
Video only.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_CODEC_ID_WRAPPED_AVFRAME
Passthrough codec, AVFrames wrapped in AVPacket.
static const char * window_title
static int xv_write_frame(AVFormatContext *s, int stream_index, AVFrame **frame, unsigned flags)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static int write_trailer(AVFormatContext *s1)
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Rational number (pair of numerator and denominator).
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT
enum AVPixelFormat image_format
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
static int write_picture(AVFormatContext *s, uint8_t *input_data[4], int linesize[4])
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
#define AVERROR_EXTERNAL
Generic error in an external library.
static int xv_write_trailer(AVFormatContext *s)
static void input_data(MLPEncodeContext *ctx, void *samples)
Wrapper function for inputting data in two different bit-depths.
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
#define i(width, name, range_min, range_max)
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const XVTagFormatMap tag_codec_map[]
unsigned int dest_h
display area dimensions
static const AVClass xv_class
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
char * av_strdup(const char *s)
Duplicate a string.
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
#define flags(name, subs,...)
#define MKTAG(a, b, c, d)
static void write_header(FFV1Context *f)
static int xv_write_header(AVFormatContext *s)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int dest_y
display area position