Go to the documentation of this file.
39 #define IOBUF_SIZE 4096
84 int bits_per_pixel,
int pass,
87 int x,
mask, dst_x, j,
b, bpp;
90 static const int masks[] = {0x80, 0x08, 0x88, 0x22, 0xaa, 0x55, 0xff};
93 switch (bits_per_pixel) {
95 memset(dst, 0, row_size);
97 for (x = 0; x <
width; x++) {
99 if ((
mask << j) & 0x80) {
100 b = (
src[x >> 3] >> (7 - j)) & 1;
101 dst[dst_x >> 3] |=
b << (7 - (dst_x & 7));
107 bpp = bits_per_pixel >> 3;
110 for (x = 0; x <
width; x++) {
112 if ((
mask << j) & 0x80) {
126 for (
i = 0;
i <
w;
i++) {
127 int a,
b,
c, p, pa, pb, pc;
140 if (pa <= pb && pa <= pc)
152 const uint8_t *
src1 =
src + bpp;
153 const uint8_t *src2 =
src;
156 memcpy(dst,
src, bpp);
160 for (x = 0; x < unaligned_w; x++)
161 *dst++ = *
src1++ - *src2++;
163 c->llvidencdsp.diff_bytes(dst,
src1, src2,
size);
167 uint8_t *
src, uint8_t *top,
int size,
int bpp)
171 switch (filter_type) {
179 c->llvidencdsp.diff_bytes(dst,
src, top,
size);
182 for (
i = 0;
i < bpp;
i++)
183 dst[
i] =
src[
i] - (top[
i] >> 1);
185 dst[
i] =
src[
i] - ((
src[
i - bpp] + top[
i]) >> 1);
188 for (
i = 0;
i < bpp;
i++)
196 uint8_t *
src, uint8_t *top,
int size,
int bpp)
198 int pred =
s->filter_type;
204 int cost, bcost = INT_MAX;
205 uint8_t *buf1 = dst, *buf2 = dst +
size + 16;
211 cost +=
abs((int8_t) buf1[
i]);
214 FFSWAP(uint8_t *, buf1, buf2);
226 const uint8_t *buf,
int length)
232 bytestream_put_be32(
f, length);
234 crc =
av_crc(crc_table, crc, tagbuf, 4);
237 crc =
av_crc(crc_table, crc, buf, length);
238 memcpy(*
f, buf, length);
241 bytestream_put_be32(
f, ~crc);
245 const uint8_t *buf,
int length)
256 bytestream_put_be32(&
s->bytestream, length + 4);
258 bytestream_put_be32(&
s->bytestream,
MKBETAG(
'f',
'd',
'A',
'T'));
259 bytestream_put_be32(&
s->bytestream,
s->sequence_number);
260 crc =
av_crc(crc_table, crc,
s->bytestream - 8, 8);
262 crc =
av_crc(crc_table, crc, buf, length);
263 memcpy(
s->bytestream, buf, length);
264 s->bytestream += length;
266 bytestream_put_be32(&
s->bytestream, ~crc);
268 ++
s->sequence_number;
277 s->zstream.avail_in =
size;
278 s->zstream.next_in =
data;
279 while (
s->zstream.avail_in > 0) {
283 if (
s->zstream.avail_out == 0) {
284 if (
s->bytestream_end -
s->bytestream >
IOBUF_SIZE + 100)
287 s->zstream.next_out =
s->buf;
293 #define AV_WB32_PNG(buf, n) AV_WB32(buf, lrint((n) * 100000))
296 double rx, ry, gx, gy, bx, by, wx = 0.3127, wy = 0.3290;
299 rx = 0.640; ry = 0.330;
300 gx = 0.300; gy = 0.600;
301 bx = 0.150; by = 0.060;
304 rx = 0.670; ry = 0.330;
305 gx = 0.210; gy = 0.710;
306 bx = 0.140; by = 0.080;
307 wx = 0.310; wy = 0.316;
310 rx = 0.640; ry = 0.330;
311 gx = 0.290; gy = 0.600;
312 bx = 0.150; by = 0.060;
316 rx = 0.630; ry = 0.340;
317 gx = 0.310; gy = 0.595;
318 bx = 0.155; by = 0.070;
321 rx = 0.708; ry = 0.292;
322 gx = 0.170; gy = 0.797;
323 bx = 0.131; by = 0.046;
354 s->buf[8] =
s->bit_depth;
355 s->buf[9] =
s->color_type;
358 s->buf[12] =
s->is_progressive;
377 switch (stereo3d->
type) {
385 av_log(avctx,
AV_LOG_WARNING,
"Only side-by-side stereo3d flag can be defined within sTER chunk\n");
407 uint8_t *ptr, *alpha_ptr;
409 palette = (uint32_t *)pict->
data[1];
411 alpha_ptr =
s->buf + 256 * 3;
413 for (
i = 0;
i < 256;
i++) {
418 *alpha_ptr++ =
alpha;
419 bytestream_put_be24(&ptr, v);
422 MKTAG(
'P',
'L',
'T',
'E'),
s->buf, 256 * 3);
425 MKTAG(
't',
'R',
'N',
'S'),
s->buf + 256 * 3, 256);
437 int row_size, pass_row_size;
438 uint8_t *ptr, *top, *crow_buf, *crow;
439 uint8_t *crow_base =
NULL;
440 uint8_t *progressive_buf =
NULL;
441 uint8_t *top_buf =
NULL;
443 row_size = (pict->
width *
s->bits_per_pixel + 7) >> 3;
451 crow_buf = crow_base + 15;
452 if (
s->is_progressive) {
453 progressive_buf =
av_malloc(row_size + 1);
455 if (!progressive_buf || !top_buf) {
463 s->zstream.next_out =
s->buf;
464 if (
s->is_progressive) {
471 if (pass_row_size > 0) {
473 for (y = 0; y < pict->
height; y++)
476 FFSWAP(uint8_t *, progressive_buf, top_buf);
478 s->bits_per_pixel,
pass,
481 top, pass_row_size,
s->bits_per_pixel >> 3);
483 top = progressive_buf;
489 for (y = 0; y < pict->
height; y++) {
492 row_size,
s->bits_per_pixel >> 3);
500 if (
ret == Z_OK ||
ret == Z_STREAM_END) {
502 if (
len > 0 &&
s->bytestream_end -
s->bytestream >
len + 100) {
506 s->zstream.next_out =
s->buf;
507 if (
ret == Z_STREAM_END)
521 deflateReset(&
s->zstream);
526 const AVFrame *pict,
int *got_packet)
531 size_t max_packet_size;
533 enc_row_size = deflateBound(&
s->zstream, (avctx->
width *
s->bits_per_pixel + 7) >> 3);
540 if (max_packet_size > INT_MAX)
546 s->bytestream_start =
563 pkt->
size =
s->bytestream -
s->bytestream_start;
577 unsigned int leftmost_x =
input->width;
578 unsigned int rightmost_x = 0;
579 unsigned int topmost_y =
input->height;
580 unsigned int bottommost_y = 0;
583 ptrdiff_t input_linesize =
input->linesize[0];
584 ptrdiff_t output_linesize =
output->linesize[0];
587 for (y = 0; y <
input->height; ++y) {
588 for (x = 0; x <
input->width; ++x) {
594 if (x >= rightmost_x)
598 if (y >= bottommost_y)
599 bottommost_y = y + 1;
606 if (leftmost_x ==
input->width && rightmost_x == 0) {
609 leftmost_x = topmost_y = 0;
610 rightmost_x = bottommost_y = 1;
616 for (y = topmost_y; y < bottommost_y; ++y) {
618 input->data[0] + input_linesize * y + bpp * leftmost_x,
619 bpp * (rightmost_x - leftmost_x));
623 size_t transparent_palette_index;
626 switch (
input->format) {
634 palette = (uint32_t*)
input->data[1];
635 for (transparent_palette_index = 0; transparent_palette_index < 256; ++transparent_palette_index)
636 if (palette[transparent_palette_index] >> 24 == 0)
645 for (y = topmost_y; y < bottommost_y; ++y) {
646 uint8_t *foreground =
input->data[0] + input_linesize * y + bpp * leftmost_x;
647 uint8_t *background =
output->data[0] + output_linesize * y + bpp * leftmost_x;
649 for (x = leftmost_x; x < rightmost_x; ++x, foreground += bpp, background += bpp,
output_data += bpp) {
650 if (!memcmp(foreground, background, bpp)) {
652 if (transparent_palette_index == 256) {
668 switch (
input->format) {
670 if (((uint16_t*)foreground)[3] == 0xffff ||
671 ((uint16_t*)background)[3] == 0)
676 if (((uint16_t*)foreground)[1] == 0xffff ||
677 ((uint16_t*)background)[1] == 0)
682 if (foreground[3] == 0xff || background[3] == 0)
687 if (foreground[1] == 0xff || background[1] == 0)
692 if (palette[*foreground] >> 24 == 0xff ||
693 palette[*background] >> 24 == 0)
703 output->width = rightmost_x - leftmost_x;
704 output->height = bottommost_y - topmost_y;
720 uint8_t bpp = (
s->bits_per_pixel + 7) >> 3;
721 uint8_t *original_bytestream, *original_bytestream_end;
722 uint8_t *temp_bytestream = 0, *temp_bytestream_end;
723 uint32_t best_sequence_number;
724 uint8_t *best_bytestream;
725 size_t best_bytestream_size = SIZE_MAX;
748 original_bytestream =
s->bytestream;
749 original_bytestream_end =
s->bytestream_end;
751 temp_bytestream =
av_malloc(original_bytestream_end - original_bytestream);
752 if (!temp_bytestream) {
756 temp_bytestream_end = temp_bytestream + (original_bytestream_end - original_bytestream);
767 uint32_t original_sequence_number =
s->sequence_number, sequence_number;
768 uint8_t *bytestream_start =
s->bytestream;
769 size_t bytestream_size;
781 size_t row_start = diffFrame->
linesize[0] * y + bpp * last_fctl_chunk.
x_offset;
782 memset(diffFrame->
data[0] + row_start, 0, bpp * last_fctl_chunk.
width);
802 sequence_number =
s->sequence_number;
803 s->sequence_number = original_sequence_number;
804 bytestream_size =
s->bytestream - bytestream_start;
805 s->bytestream = bytestream_start;
809 if (bytestream_size < best_bytestream_size) {
810 *best_fctl_chunk = fctl_chunk;
811 *best_last_fctl_chunk = last_fctl_chunk;
813 best_sequence_number = sequence_number;
814 best_bytestream =
s->bytestream;
815 best_bytestream_size = bytestream_size;
817 if (best_bytestream == original_bytestream) {
818 s->bytestream = temp_bytestream;
819 s->bytestream_end = temp_bytestream_end;
821 s->bytestream = original_bytestream;
822 s->bytestream_end = original_bytestream_end;
828 s->sequence_number = best_sequence_number;
829 s->bytestream = original_bytestream + best_bytestream_size;
830 s->bytestream_end = original_bytestream_end;
831 if (best_bytestream != original_bytestream)
832 memcpy(original_bytestream, best_bytestream, best_bytestream_size);
843 const AVFrame *pict,
int *got_packet)
848 size_t max_packet_size;
856 }
else if (
checksum !=
s->palette_checksum) {
858 "Input contains more than one unique palette. APNG does not support multiple palettes.\n");
863 enc_row_size = deflateBound(&
s->zstream, (avctx->
width *
s->bits_per_pixel + 7) >> 3);
870 if (max_packet_size > INT_MAX)
885 s->extra_data_size =
s->bytestream -
s->extra_data;
887 s->last_frame_packet =
av_malloc(max_packet_size);
888 if (!
s->last_frame_packet)
890 }
else if (
s->last_frame) {
895 memcpy(
pkt->
data,
s->last_frame_packet,
s->last_frame_packet_size);
900 s->bytestream_start =
901 s->bytestream =
s->last_frame_packet;
902 s->bytestream_end =
s->bytestream + max_packet_size;
907 ++
s->sequence_number;
908 s->bytestream += 26 + 12;
921 uint8_t* last_fctl_chunk_start =
pkt->
data;
923 if (!
s->extra_data_updated) {
927 memcpy(side_data,
s->extra_data,
s->extra_data_size);
928 s->extra_data_updated = 1;
931 AV_WB32(buf + 0,
s->last_frame_fctl.sequence_number);
932 AV_WB32(buf + 4,
s->last_frame_fctl.width);
933 AV_WB32(buf + 8,
s->last_frame_fctl.height);
934 AV_WB32(buf + 12,
s->last_frame_fctl.x_offset);
935 AV_WB32(buf + 16,
s->last_frame_fctl.y_offset);
936 AV_WB16(buf + 20,
s->last_frame_fctl.delay_num);
937 AV_WB16(buf + 22,
s->last_frame_fctl.delay_den);
938 buf[24] =
s->last_frame_fctl.dispose_op;
939 buf[25] =
s->last_frame_fctl.blend_op;
946 if (!
s->last_frame) {
951 if (!
s->prev_frame) {
956 s->prev_frame->format = pict->
format;
957 s->prev_frame->width = pict->
width;
958 s->prev_frame->height = pict->
height;
967 uint8_t bpp = (
s->bits_per_pixel + 7) >> 3;
968 for (y =
s->last_frame_fctl.y_offset; y < s->last_frame_fctl.y_offset +
s->last_frame_fctl.height; ++y) {
969 size_t row_start =
s->prev_frame->linesize[0] * y + bpp *
s->last_frame_fctl.x_offset;
970 memset(
s->prev_frame->data[0] + row_start, 0, bpp *
s->last_frame_fctl.width);
980 s->last_frame_fctl = fctl_chunk;
981 s->last_frame_packet_size =
s->bytestream -
s->bytestream_start;
992 int compression_level;
1016 if (
s->dpi &&
s->dpm) {
1019 }
else if (
s->dpi) {
1020 s->dpm =
s->dpi * 10000 / 254;
1072 s->zstream.opaque =
NULL;
1074 ? Z_DEFAULT_COMPRESSION
1076 if (deflateInit2(&
s->zstream, compression_level, Z_DEFLATED, 15, 8, Z_DEFAULT_STRATEGY) != Z_OK)
1086 deflateEnd(&
s->zstream);
1091 s->extra_data_size = 0;
1095 #define OFFSET(x) offsetof(PNGEncContext, x)
1096 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
1099 {
"dpm",
"Set image resolution (in dots per meter)",
OFFSET(dpm),
AV_OPT_TYPE_INT, {.i64 = 0}, 0, 0x10000,
VE},
enum AVColorTransferCharacteristic color_trc
static int encode_frame(AVCodecContext *avctx, const AVFrame *pict)
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
Color Transfer Characteristic.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
#define AV_WB32_PNG(buf, n)
static int png_get_chrm(enum AVColorPrimaries prim, uint8_t *buf)
int ff_png_get_nb_channels(int color_type)
@ APNG_DISPOSE_OP_BACKGROUND
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
const AVCodec ff_apng_encoder
enum AVColorPrimaries color_primaries
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int apng_encode_frame(AVCodecContext *avctx, const AVFrame *pict, APNGFctlChunk *best_fctl_chunk, APNGFctlChunk *best_last_fctl_chunk)
This structure describes decoded (raw) audio or video data.
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
#define PNG_FILTER_VALUE_MIXED
static int png_write_row(AVCodecContext *avctx, const uint8_t *data, int size)
static int output_data(MLPDecodeContext *m, unsigned int substr, AVFrame *frame, int *got_frame_ptr)
Write the audio data into the output buffer.
int dpm
Physical pixel density, in dots per meter, if set.
static int png_get_gama(enum AVColorTransferCharacteristic trc, uint8_t *buf)
uint8_t * last_frame_packet
AVColorPrimaries
Chromaticity coordinates of the source primaries.
#define FF_COMPRESSION_DEFAULT
void ff_png_zfree(void *opaque, void *ptr)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
const AVCodec ff_png_encoder
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
double avpriv_get_gamma_from_trc(enum AVColorTransferCharacteristic trc)
Determine a suitable 'gamma' value to match the supplied AVColorTransferCharacteristic.
int flags
AV_CODEC_FLAG_*.
static int encode_png(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
#define PNG_COLOR_TYPE_RGB_ALPHA
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int encode_apng(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
static const uint16_t mask[17]
static void png_filter_row(PNGEncContext *c, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *top, int size, int bpp)
static void png_write_chunk(uint8_t **f, uint32_t tag, const uint8_t *buf, int length)
#define PNG_COLOR_TYPE_RGB
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
#define AV_INPUT_BUFFER_MIN_SIZE
static void png_write_image_data(AVCodecContext *avctx, const uint8_t *buf, int length)
@ APNG_DISPOSE_OP_PREVIOUS
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
int flags
Additional information about the frame packing.
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
const char * av_default_item_name(void *ptr)
Return the context name.
static int apng_do_inverse_blend(AVFrame *output, const AVFrame *input, APNGFctlChunk *fctl_chunk, uint8_t bpp)
static av_cold int png_enc_close(AVCodecContext *avctx)
#define PNG_COLOR_TYPE_GRAY
static void deflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_png_pass_ymask[NB_PASSES]
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
@ AVCOL_PRI_BT2020
ITU-R BT2020.
#define PNG_FILTER_VALUE_NONE
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
size_t last_frame_packet_size
#define PNG_FILTER_VALUE_AVG
#define MKBETAG(a, b, c, d)
LLVidEncDSPContext llvidencdsp
#define PNG_FILTER_VALUE_PAETH
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define PNG_FILTER_VALUE_UP
static uint8_t * png_choose_filter(PNGEncContext *s, uint8_t *dst, uint8_t *src, uint8_t *top, int size, int bpp)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
int flags
A combination of AV_PKT_FLAG values.
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static void input_data(MLPEncodeContext *ctx, void *samples)
Wrapper function for inputting data in two different bit-depths.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define PNG_FILTER_VALUE_SUB
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
static const AVOption options[]
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * ff_png_zalloc(void *opaque, unsigned int items, unsigned int size)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static void sub_left_prediction(PNGEncContext *c, uint8_t *dst, const uint8_t *src, int bpp, int size)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
static const float pred[4]
#define FFSWAP(type, a, b)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
enum AVStereo3DType type
How views are packed within the video.
static volatile int checksum
uint8_t * bytestream_start
main external API structure.
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
static void png_get_interlaced_row(uint8_t *dst, int row_size, int bits_per_pixel, int pass, const uint8_t *src, int width)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
APNGFctlChunk last_frame_fctl
int dpi
Physical pixel density, in dots per inch, if set.
int frame_number
Frame counter, set by libavcodec.
Structure to hold side data for an AVFrame.
static av_cold int png_enc_init(AVCodecContext *avctx)
static const int16_t alpha[]
This structure stores compressed data.
int width
picture width / height.
static void sub_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define PNG_COLOR_TYPE_GRAY_ALPHA
#define MKTAG(a, b, c, d)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
static int encode_headers(AVCodecContext *avctx, const AVFrame *pict)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
uint32_t palette_checksum
#define PNG_COLOR_TYPE_PALETTE
static const AVClass pngenc_class