Go to the documentation of this file.
56 static const char bgr_chlist[4] = {
'B',
'G',
'R',
'A' };
113 switch (
s->compression) {
117 s->scanline_height = 1;
118 s->nb_scanlines = avctx->
height;
121 s->scanline_height = 16;
122 s->nb_scanlines = (avctx->
height +
s->scanline_height - 1) /
s->scanline_height;
128 s->scanline =
av_calloc(
s->nb_scanlines,
sizeof(*
s->scanline));
139 for (
int y = 0; y <
s->nb_scanlines &&
s->scanline; y++) {
154 const ptrdiff_t half_size = (
size + 1) / 2;
156 uint8_t *
t2 = dst + half_size;
158 for (ptrdiff_t
i = 0;
i < half_size;
i++) {
168 for (ptrdiff_t
i = 1;
i <
size;
i++) {
169 int d =
src[
i] - p + 384;
177 const uint8_t *in, int64_t in_size)
179 int64_t
i = 0, o = 0,
run = 1,
copy = 0;
181 while (
i < in_size) {
182 while (
i +
run < in_size && in[
i] == in[
i +
run] &&
run < 128)
192 if (
i +
run < in_size)
201 for (
int x = 0; x <
copy; x++)
202 out[o + x] = in[
i + x];
217 const int64_t element_size =
s->pixel_type ==
EXR_HALF ? 2LL : 4LL;
219 for (
int y = 0; y <
frame->height; y++) {
221 int64_t tmp_size = element_size *
s->planes *
frame->width;
222 int64_t max_compressed_size = tmp_size * 3 / 2;
236 switch (
s->pixel_type) {
238 for (
int p = 0; p <
s->planes; p++) {
239 int ch =
s->ch_order[p];
246 for (
int p = 0; p <
s->planes; p++) {
247 int ch =
s->ch_order[p];
249 uint32_t *
src = (uint32_t *)(
frame->data[ch] + y *
frame->linesize[ch]);
251 for (
int x = 0; x <
frame->width; x++)
261 scanline->
tmp, tmp_size);
275 const int64_t element_size =
s->pixel_type ==
EXR_HALF ? 2LL : 4LL;
277 for (
int y = 0; y <
s->nb_scanlines; y++) {
279 const int scanline_height =
FFMIN(
s->scanline_height,
frame->height - y *
s->scanline_height);
280 int64_t tmp_size = element_size *
s->planes *
frame->width * scanline_height;
281 int64_t max_compressed_size = tmp_size * 3 / 2;
282 unsigned long actual_size, source_size;
296 switch (
s->pixel_type) {
298 for (
int l = 0; l < scanline_height; l++) {
299 const int scanline_size =
frame->width * 4 *
s->planes;
301 for (
int p = 0; p <
s->planes; p++) {
302 int ch =
s->ch_order[p];
305 frame->data[ch] + (y *
s->scanline_height + l) *
frame->linesize[ch],
311 for (
int l = 0; l < scanline_height; l++) {
312 const int scanline_size =
frame->width * 2 *
s->planes;
314 for (
int p = 0; p <
s->planes; p++) {
315 int ch =
s->ch_order[p];
317 uint32_t *
src = (uint32_t *)(
frame->data[ch] + (y *
s->scanline_height + l) *
frame->linesize[ch]);
319 for (
int x = 0; x <
frame->width; x++)
328 source_size = tmp_size;
329 actual_size = max_compressed_size;
331 scanline->
tmp, source_size);
354 avctx->
height, 64) * 3LL / 2;
361 bytestream2_put_le32(pb, 20000630);
362 bytestream2_put_byte(pb, 2);
363 bytestream2_put_le24(pb, 0);
365 bytestream2_put_le32(pb,
s->planes * 18 + 1);
367 for (
int p = 0; p <
s->planes; p++) {
368 bytestream2_put_byte(pb,
s->ch_names[p]);
369 bytestream2_put_byte(pb, 0);
370 bytestream2_put_le32(pb,
s->pixel_type);
371 bytestream2_put_le32(pb, 0);
372 bytestream2_put_le32(pb, 1);
373 bytestream2_put_le32(pb, 1);
375 bytestream2_put_byte(pb, 0);
378 bytestream2_put_le32(pb, 1);
379 bytestream2_put_byte(pb,
s->compression);
382 bytestream2_put_le32(pb, 16);
383 bytestream2_put_le32(pb, 0);
384 bytestream2_put_le32(pb, 0);
385 bytestream2_put_le32(pb, avctx->
width - 1);
386 bytestream2_put_le32(pb, avctx->
height - 1);
389 bytestream2_put_le32(pb, 16);
390 bytestream2_put_le32(pb, 0);
391 bytestream2_put_le32(pb, 0);
392 bytestream2_put_le32(pb, avctx->
width - 1);
393 bytestream2_put_le32(pb, avctx->
height - 1);
396 bytestream2_put_le32(pb, 1);
397 bytestream2_put_byte(pb, 0);
400 bytestream2_put_le32(pb, 8);
401 bytestream2_put_le64(pb, 0);
404 bytestream2_put_le32(pb, 4);
409 bytestream2_put_le32(pb, 4);
415 bytestream2_put_le32(pb, 8);
421 bytestream2_put_le32(pb, 4);
425 bytestream2_put_le32(pb, 4);
427 bytestream2_put_byte(pb, 0);
429 switch (
s->compression) {
444 switch (
s->compression) {
450 for (
int y = 0; y < avctx->
height; y++) {
451 bytestream2_put_le64(pb,
offset);
455 for (
int y = 0; y < avctx->
height; y++) {
456 bytestream2_put_le32(pb, y);
457 bytestream2_put_le32(pb,
s->planes * avctx->
width * 4);
458 for (
int p = 0; p <
s->planes; p++) {
459 int ch =
s->ch_order[p];
465 for (
int y = 0; y < avctx->
height; y++) {
466 bytestream2_put_le64(pb,
offset);
470 for (
int y = 0; y < avctx->
height; y++) {
471 bytestream2_put_le32(pb, y);
472 bytestream2_put_le32(pb,
s->planes * avctx->
width * 2);
473 for (
int p = 0; p <
s->planes; p++) {
474 int ch =
s->ch_order[p];
475 uint32_t *
src = (uint32_t *)(
frame->data[ch] + y *
frame->linesize[ch]);
477 for (
int x = 0; x <
frame->width; x++)
478 bytestream2_put_le16(pb,
float2half(
src[x],
s->basetable,
s->shifttable));
488 for (
int y = 0; y <
s->nb_scanlines; y++) {
491 bytestream2_put_le64(pb,
offset);
495 for (
int y = 0; y <
s->nb_scanlines; y++) {
498 bytestream2_put_le32(pb, y *
s->scanline_height);
515 #define OFFSET(x) offsetof(EXRContext, x)
516 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
This structure describes decoded (raw) audio or video data.
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
static const uint8_t gbra_order[4]
static av_always_inline uint32_t av_float2int(float f)
Reinterpret a float as a 32-bit integer.
static const AVClass exr_class
uint8_t * uncompressed_data
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
static const char abgr_chlist[4]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
static const uint8_t gbr_order[4]
static double av_q2d(AVRational a)
Convert an AVRational to a double.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
EXRScanlineData * scanline
static const char bgr_chlist[4]
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
static int64_t rle_compress(uint8_t *out, int64_t out_size, const uint8_t *in, int64_t in_size)
Describe the class of an AVClass context structure.
const char * av_default_item_name(void *ptr)
Return the context name.
static uint16_t float2half(uint32_t f, uint16_t *basetable, uint8_t *shifttable)
const AVCodec ff_exr_encoder
static int encode_scanline_zip(EXRContext *s, const AVFrame *frame)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static void copy(const float *p1, float *p2, const int length)
#define AV_PIX_FMT_GBRPF32
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static void predictor(uint8_t *src, ptrdiff_t size)
uint8_t * compressed_data
unsigned int compressed_size
unsigned int uncompressed_size
#define i(width, name, range_min, range_max)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_calloc(size_t nmemb, size_t size)
static void float2half_tables(uint16_t *basetable, uint8_t *shifttable)
static const AVOption options[]
static int encode_scanline_rle(EXRContext *s, const AVFrame *frame)
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void reorder_pixels(uint8_t *dst, const uint8_t *src, ptrdiff_t size)
main external API structure.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static int encode_init(AVCodecContext *avctx)
#define AV_PIX_FMT_GBRAPF32
This structure stores compressed data.
int width
picture width / height.
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
static int encode_close(AVCodecContext *avctx)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.