Go to the documentation of this file.
112 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
117 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
122 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
129 int bits_per_pixel,
int pass,
130 int color_type,
const uint8_t *
src)
132 int x,
mask, dsp_mask, j, src_x,
b, bpp;
139 switch (bits_per_pixel) {
142 for (x = 0; x <
width; x++) {
144 if ((dsp_mask << j) & 0x80) {
145 b = (
src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
146 dst[x >> 3] &= 0xFF7F>>j;
147 dst[x >> 3] |=
b << (7 - j);
149 if ((
mask << j) & 0x80)
155 for (x = 0; x <
width; x++) {
156 int j2 = 2 * (x & 3);
158 if ((dsp_mask << j) & 0x80) {
159 b = (
src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
160 dst[x >> 2] &= 0xFF3F>>j2;
161 dst[x >> 2] |=
b << (6 - j2);
163 if ((
mask << j) & 0x80)
169 for (x = 0; x <
width; x++) {
172 if ((dsp_mask << j) & 0x80) {
173 b = (
src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
174 dst[x >> 1] &= 0xFF0F>>j2;
175 dst[x >> 1] |=
b << (4 - j2);
177 if ((
mask << j) & 0x80)
182 bpp = bits_per_pixel >> 3;
185 for (x = 0; x <
width; x++) {
187 if ((dsp_mask << j) & 0x80) {
191 if ((
mask << j) & 0x80)
202 for (
i = 0;
i <
w;
i++) {
203 int a,
b,
c, p, pa, pb, pc;
216 if (pa <= pb && pa <= pc)
226 #define UNROLL1(bpp, op) \
235 for (; i <= size - bpp; i += bpp) { \
236 dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
239 dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
242 dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
245 dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
249 #define UNROLL_FILTER(op) \
252 } else if (bpp == 2) { \
254 } else if (bpp == 3) { \
256 } else if (bpp == 4) { \
259 for (; i < size; i++) { \
260 dst[i] = op(dst[i - bpp], src[i], last[i]); \
265 uint8_t *
src, uint8_t *last,
int size,
int bpp)
267 int i, p,
r,
g,
b,
a;
269 switch (filter_type) {
274 for (
i = 0;
i < bpp;
i++)
278 for (;
i <
size;
i += bpp) {
279 unsigned s = *(
int *)(
src +
i);
280 p = ((
s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((
s ^ p) & 0x80808080);
281 *(
int *)(dst +
i) = p;
284 #define OP_SUB(x, s, l) ((x) + (s))
292 for (
i = 0;
i < bpp;
i++) {
296 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
300 for (
i = 0;
i < bpp;
i++) {
304 if (bpp > 2 &&
size > 4) {
321 #define YUV2RGB(NAME, TYPE) \
322 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
325 for (i = 0; i < size; i += 3 + alpha) { \
326 int g = dst [i + 1]; \
337 if (
s->interlace_type) {
340 return 100 - 100 *
s->y /
s->cur_h;
347 uint8_t *ptr, *last_row;
350 if (!
s->interlace_type) {
351 ptr = dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
353 last_row =
s->last_row;
355 last_row = ptr - dst_stride;
358 last_row,
s->row_size,
s->bpp);
361 if (
s->bit_depth == 16) {
362 deloco_rgb16((uint16_t *)(ptr - dst_stride),
s->row_size / 2,
365 deloco_rgb8(ptr - dst_stride,
s->row_size,
370 if (
s->y ==
s->cur_h) {
373 if (
s->bit_depth == 16) {
374 deloco_rgb16((uint16_t *)ptr,
s->row_size / 2,
377 deloco_rgb8(ptr,
s->row_size,
385 ptr = dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
392 s->last_row,
s->pass_row_size,
s->bpp);
393 FFSWAP(uint8_t *,
s->last_row,
s->tmp_row);
394 FFSWAP(
unsigned int,
s->last_row_size,
s->tmp_row_size);
399 s->color_type,
s->last_row);
402 if (
s->y ==
s->cur_h) {
403 memset(
s->last_row, 0,
s->row_size);
414 s->crow_size =
s->pass_row_size + 1;
415 if (
s->pass_row_size != 0)
427 uint8_t *dst, ptrdiff_t dst_stride)
431 s->zstream.next_in = gb->
buffer;
434 while (
s->zstream.avail_in > 0) {
436 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
440 if (
s->zstream.avail_out == 0) {
444 s->zstream.avail_out =
s->crow_size;
445 s->zstream.next_out =
s->crow_buf;
447 if (
ret == Z_STREAM_END &&
s->zstream.avail_in > 0) {
449 "%d undecompressed bytes left in buffer\n",
s->zstream.avail_in);
457 const uint8_t *data_end)
466 zstream.opaque =
NULL;
467 if (inflateInit(&zstream) != Z_OK)
469 zstream.next_in =
data;
470 zstream.avail_in = data_end -
data;
473 while (zstream.avail_in > 0) {
479 zstream.next_out = buf;
480 zstream.avail_out = buf_size - 1;
482 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
486 bp->len += zstream.next_out - buf;
487 if (
ret == Z_STREAM_END)
490 inflateEnd(&zstream);
491 bp->str[bp->len] = 0;
495 inflateEnd(&zstream);
505 for (
i = 0;
i < size_in;
i++)
506 extra += in[
i] >= 0x80;
507 if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
512 for (
i = 0;
i < size_in;
i++) {
514 *(q++) = 0xC0 | (in[
i] >> 6);
515 *(q++) = 0x80 | (in[
i] & 0x3F);
529 const uint8_t *keyword =
data;
530 const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
531 uint8_t *kw_utf8 =
NULL, *text, *txt_utf8 =
NULL;
537 data = keyword_end + 1;
540 if (
data == data_end)
552 text = (uint8_t *)
data;
553 text_len = data_end - text;
560 if (!(kw_utf8 && txt_utf8)) {
587 s->width =
s->cur_w = bytestream2_get_be32(gb);
588 s->height =
s->cur_h = bytestream2_get_be32(gb);
590 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
594 s->bit_depth = bytestream2_get_byte(gb);
595 if (
s->bit_depth != 1 &&
s->bit_depth != 2 &&
s->bit_depth != 4 &&
596 s->bit_depth != 8 &&
s->bit_depth != 16) {
600 s->color_type = bytestream2_get_byte(gb);
601 s->compression_type = bytestream2_get_byte(gb);
602 if (
s->compression_type) {
606 s->filter_type = bytestream2_get_byte(gb);
607 s->interlace_type = bytestream2_get_byte(gb);
611 "compression_type=%d filter_type=%d interlace_type=%d\n",
612 s->width,
s->height,
s->bit_depth,
s->color_type,
613 s->compression_type,
s->filter_type,
s->interlace_type);
617 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
642 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
655 s->bits_per_pixel =
s->bit_depth *
s->channels;
656 s->bpp = (
s->bits_per_pixel + 7) >> 3;
657 s->row_size = (
s->cur_w *
s->bits_per_pixel + 7) >> 3;
659 if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
662 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
665 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
668 }
else if (
s->bit_depth == 16 &&
671 }
else if (
s->bit_depth == 16 &&
674 }
else if (
s->bit_depth == 16 &&
677 }
else if ((
s->bits_per_pixel == 1 ||
s->bits_per_pixel == 2 ||
s->bits_per_pixel == 4 ||
s->bits_per_pixel == 8) &&
682 }
else if (
s->bit_depth == 8 &&
685 }
else if (
s->bit_depth == 16 &&
690 "Bit depth %d color type %d",
691 s->bit_depth,
s->color_type);
715 "and color type %d with TRNS",
716 s->bit_depth,
s->color_type);
720 s->bpp += byte_depth;
734 if (!
s->interlace_type) {
735 s->crow_size =
s->row_size + 1;
741 s->crow_size =
s->pass_row_size + 1;
743 ff_dlog(avctx,
"row_size=%d crow_size =%d\n",
744 s->row_size,
s->crow_size);
748 memcpy(p->
data[1],
s->palette, 256 *
sizeof(uint32_t));
753 if (
s->interlace_type ||
765 s->crow_buf =
s->buffer + 15;
766 s->zstream.avail_out =
s->crow_size;
767 s->zstream.next_out =
s->crow_buf;
774 s->bpp -= byte_depth;
779 s->bpp += byte_depth;
793 if ((length % 3) != 0 || length > 256 * 3)
797 for (
i = 0;
i < n;
i++) {
798 r = bytestream2_get_byte(gb);
799 g = bytestream2_get_byte(gb);
800 b = bytestream2_get_byte(gb);
801 s->palette[
i] = (0xFF
U << 24) | (
r << 16) | (
g << 8) |
b;
804 s->palette[
i] = (0xFFU << 24);
827 if (length > 256 || !(
s->hdr_state &
PNG_PLTE))
830 for (
i = 0;
i < length;
i++) {
831 unsigned v = bytestream2_get_byte(gb);
832 s->palette[
i] = (
s->palette[
i] & 0x00ffffff) | (v << 24);
840 for (
i = 0;
i < length / 2;
i++) {
844 if (
s->bit_depth > 8)
845 AV_WB16(&
s->transparent_color_be[2 *
i], v);
847 s->transparent_color_be[
i] = v;
863 while ((
s->iccp_name[cnt++] = bytestream2_get_byte(gb)) && cnt < 81);
870 if (bytestream2_get_byte(gb) != 0) {
883 s->iccp_data_len = bp.len;
895 uint8_t *pd = p->
data[0];
896 for (j = 0; j <
s->height; j++) {
898 for (k = 7; k >= 1; k--)
899 if ((
s->width&7) >= k)
900 pd[8*
i + k - 1] = (pd[
i]>>8-k) & 1;
901 for (
i--;
i >= 0;
i--) {
902 pd[8*
i + 7]= pd[
i] & 1;
903 pd[8*
i + 6]= (pd[
i]>>1) & 1;
904 pd[8*
i + 5]= (pd[
i]>>2) & 1;
905 pd[8*
i + 4]= (pd[
i]>>3) & 1;
906 pd[8*
i + 3]= (pd[
i]>>4) & 1;
907 pd[8*
i + 2]= (pd[
i]>>5) & 1;
908 pd[8*
i + 1]= (pd[
i]>>6) & 1;
909 pd[8*
i + 0]= pd[
i]>>7;
913 }
else if (
s->bits_per_pixel == 2) {
915 uint8_t *pd = p->
data[0];
916 for (j = 0; j <
s->height; j++) {
919 if ((
s->width&3) >= 3) pd[4*
i + 2]= (pd[
i] >> 2) & 3;
920 if ((
s->width&3) >= 2) pd[4*
i + 1]= (pd[
i] >> 4) & 3;
921 if ((
s->width&3) >= 1) pd[4*
i + 0]= pd[
i] >> 6;
922 for (
i--;
i >= 0;
i--) {
923 pd[4*
i + 3]= pd[
i] & 3;
924 pd[4*
i + 2]= (pd[
i]>>2) & 3;
925 pd[4*
i + 1]= (pd[
i]>>4) & 3;
926 pd[4*
i + 0]= pd[
i]>>6;
929 if ((
s->width&3) >= 3) pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
930 if ((
s->width&3) >= 2) pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
931 if ((
s->width&3) >= 1) pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
932 for (
i--;
i >= 0;
i--) {
933 pd[4*
i + 3]= ( pd[
i] & 3)*0x55;
934 pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
935 pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
936 pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
941 }
else if (
s->bits_per_pixel == 4) {
943 uint8_t *pd = p->
data[0];
944 for (j = 0; j <
s->height; j++) {
947 if (
s->width&1) pd[2*
i+0]= pd[
i]>>4;
948 for (
i--;
i >= 0;
i--) {
949 pd[2*
i + 1] = pd[
i] & 15;
950 pd[2*
i + 0] = pd[
i] >> 4;
953 if (
s->width & 1) pd[2*
i + 0]= (pd[
i] >> 4) * 0x11;
954 for (
i--;
i >= 0;
i--) {
955 pd[2*
i + 1] = (pd[
i] & 15) * 0x11;
956 pd[2*
i + 0] = (pd[
i] >> 4) * 0x11;
967 uint32_t sequence_number;
968 int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
983 s->last_w =
s->cur_w;
984 s->last_h =
s->cur_h;
985 s->last_x_offset =
s->x_offset;
986 s->last_y_offset =
s->y_offset;
987 s->last_dispose_op =
s->dispose_op;
989 sequence_number = bytestream2_get_be32(gb);
990 cur_w = bytestream2_get_be32(gb);
991 cur_h = bytestream2_get_be32(gb);
992 x_offset = bytestream2_get_be32(gb);
993 y_offset = bytestream2_get_be32(gb);
995 dispose_op = bytestream2_get_byte(gb);
996 blend_op = bytestream2_get_byte(gb);
998 if (sequence_number == 0 &&
999 (cur_w !=
s->width ||
1000 cur_h !=
s->height ||
1003 cur_w <= 0 || cur_h <= 0 ||
1004 x_offset < 0 || y_offset < 0 ||
1005 cur_w >
s->width - x_offset|| cur_h >
s->height - y_offset)
1013 if ((sequence_number == 0 || !
s->last_picture.f->data[0]) &&
1034 s->x_offset = x_offset;
1035 s->y_offset = y_offset;
1036 s->dispose_op = dispose_op;
1037 s->blend_op = blend_op;
1045 uint8_t *pd = p->
data[0];
1046 uint8_t *pd_last =
s->last_picture.f->data[0];
1049 ls =
FFMIN(ls,
s->width *
s->bpp);
1052 for (j = 0; j <
s->height; j++) {
1053 for (
i = 0;
i < ls;
i++)
1054 pd[
i] += pd_last[
i];
1056 pd_last +=
s->last_picture.f->linesize[0];
1062 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1067 uint8_t *dst = p->
data[0];
1068 ptrdiff_t dst_stride = p->
linesize[0];
1069 const uint8_t *
src =
s->last_picture.f->data[0];
1070 ptrdiff_t src_stride =
s->last_picture.f->linesize[0];
1089 if (!
s->background_buf)
1092 memcpy(
s->background_buf,
src, src_stride * p->
height);
1094 for (y =
s->last_y_offset; y < s->last_y_offset +
s->last_h; y++) {
1095 memset(
s->background_buf + src_stride * y +
1096 s->bpp *
s->last_x_offset, 0,
s->bpp *
s->last_w);
1099 src =
s->background_buf;
1103 for (y = 0; y <
s->y_offset; y++)
1104 memcpy(dst + y * dst_stride,
src + y * src_stride, p->
width *
s->bpp);
1105 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; y++) {
1106 memcpy(dst + y * dst_stride,
src + y * src_stride,
s->x_offset *
s->bpp);
1107 memcpy(dst + y * dst_stride + (
s->x_offset +
s->cur_w) *
s->bpp,
1108 src + y * src_stride + (
s->x_offset +
s->cur_w) *
s->bpp,
1109 (p->
width -
s->cur_w -
s->x_offset) *
s->bpp);
1111 for (y =
s->y_offset +
s->cur_h; y < p->
height; y++)
1112 memcpy(dst + y * dst_stride,
src + y * src_stride, p->
width *
s->bpp);
1116 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; ++y) {
1117 uint8_t *foreground = dst + dst_stride * y +
s->bpp *
s->x_offset;
1118 const uint8_t *background =
src + src_stride * y +
s->bpp *
s->x_offset;
1119 for (x =
s->x_offset; x < s->x_offset +
s->cur_w; ++x, foreground +=
s->bpp, background +=
s->bpp) {
1121 uint8_t foreground_alpha, background_alpha, output_alpha;
1130 foreground_alpha = foreground[3];
1131 background_alpha = background[3];
1135 foreground_alpha = foreground[1];
1136 background_alpha = background[1];
1140 foreground_alpha =
s->palette[foreground[0]] >> 24;
1141 background_alpha =
s->palette[background[0]] >> 24;
1145 if (foreground_alpha == 255)
1148 if (foreground_alpha == 0) {
1149 memcpy(foreground, background,
s->bpp);
1159 output_alpha = foreground_alpha +
FAST_DIV255((255 - foreground_alpha) * background_alpha);
1163 for (
b = 0;
b <
s->bpp - 1; ++
b) {
1164 if (output_alpha == 0) {
1166 }
else if (background_alpha == 255) {
1167 output[
b] =
FAST_DIV255(foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background[
b]);
1169 output[
b] = (255 * foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background_alpha * background[
b]) / (255 * output_alpha);
1173 memcpy(foreground,
output,
s->bpp);
1185 uint32_t
tag, length;
1186 int decode_next_dat = 0;
1214 length = bytestream2_get_be32(&
s->gb);
1221 uint32_t crc_sig =
AV_RB32(
s->gb.buffer + length + 4);
1222 uint32_t crc_cal = ~
av_crc(crc_tab, UINT32_MAX,
s->gb.buffer, length + 4);
1223 if (crc_sig ^ crc_cal) {
1234 tag = bytestream2_get_le32(&
s->gb);
1245 case MKTAG(
'I',
'H',
'D',
'R'):
1246 case MKTAG(
'p',
'H',
'Y',
's'):
1247 case MKTAG(
't',
'E',
'X',
't'):
1248 case MKTAG(
'I',
'D',
'A',
'T'):
1249 case MKTAG(
't',
'R',
'N',
'S'):
1257 case MKTAG(
'I',
'H',
'D',
'R'):
1261 case MKTAG(
'p',
'H',
'Y',
's'):
1265 case MKTAG(
'f',
'c',
'T',
'L'):
1270 decode_next_dat = 1;
1272 case MKTAG(
'f',
'd',
'A',
'T'):
1279 bytestream2_get_be32(&gb_chunk);
1281 case MKTAG(
'I',
'D',
'A',
'T'):
1287 case MKTAG(
'P',
'L',
'T',
'E'):
1290 case MKTAG(
't',
'R',
'N',
'S'):
1293 case MKTAG(
't',
'E',
'X',
't'):
1297 case MKTAG(
'z',
'T',
'X',
't'):
1301 case MKTAG(
's',
'T',
'E',
'R'): {
1302 int mode = bytestream2_get_byte(&gb_chunk);
1305 s->stereo_mode =
mode;
1308 "Unknown value in sTER chunk (%d)\n",
mode);
1312 case MKTAG(
'i',
'C',
'C',
'P'): {
1317 case MKTAG(
'c',
'H',
'R',
'M'): {
1320 s->white_point[0] = bytestream2_get_be32(&gb_chunk);
1321 s->white_point[1] = bytestream2_get_be32(&gb_chunk);
1324 for (
i = 0;
i < 3;
i++) {
1325 s->display_primaries[
i][0] = bytestream2_get_be32(&gb_chunk);
1326 s->display_primaries[
i][1] = bytestream2_get_be32(&gb_chunk);
1331 case MKTAG(
'g',
'A',
'M',
'A'): {
1334 int num = bytestream2_get_be32(&gb_chunk);
1346 case MKTAG(
'I',
'E',
'N',
'D'):
1366 if (
s->bits_per_pixel <= 4)
1371 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
1372 size_t raw_bpp =
s->bpp - byte_depth;
1377 for (y = 0; y <
s->height; ++y) {
1380 if (
s->bpp == 2 && byte_depth == 1) {
1381 uint8_t *
pixel = &row[2 *
s->width - 1];
1382 uint8_t *rowp = &row[1 *
s->width - 1];
1383 int tcolor =
s->transparent_color_be[0];
1384 for (x =
s->width; x > 0; --x) {
1385 *
pixel-- = *rowp == tcolor ? 0 : 0xff;
1388 }
else if (
s->bpp == 4 && byte_depth == 1) {
1389 uint8_t *
pixel = &row[4 *
s->width - 1];
1390 uint8_t *rowp = &row[3 *
s->width - 1];
1391 int tcolor =
AV_RL24(
s->transparent_color_be);
1392 for (x =
s->width; x > 0; --x) {
1400 for (x =
s->width; x > 0; --x) {
1401 uint8_t *
pixel = &row[
s->bpp * (x - 1)];
1402 memmove(
pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1404 if (!memcmp(
pixel,
s->transparent_color_be, raw_bpp)) {
1405 memset(&
pixel[raw_bpp], 0, byte_depth);
1407 memset(&
pixel[raw_bpp], 0xff, byte_depth);
1415 if (
s->last_picture.f->data[0]) {
1417 &&
s->last_picture.f->width == p->
width
1418 &&
s->last_picture.f->height== p->
height
1419 &&
s->last_picture.f->format== p->
format
1423 else if (CONFIG_APNG_DECODER &&
1441 s->iccp_data_len = 0;
1442 s->iccp_name[0] = 0;
1444 s->stereo_mode = -1;
1466 memcpy(sd->
data,
s->iccp_data,
s->iccp_data_len);
1471 if (
s->stereo_mode >= 0) {
1493 for (
int i = 0;
i < 3;
i++) {
1509 #if CONFIG_PNG_DECODER
1511 void *
data,
int *got_frame,
1515 const uint8_t *buf = avpkt->
data;
1516 int buf_size = avpkt->
size;
1527 sig = bytestream2_get_be64(&
s->gb);
1534 s->y =
s->has_trns = 0;
1541 s->zstream.opaque =
NULL;
1542 ret = inflateInit(&
s->zstream);
1570 inflateEnd(&
s->zstream);
1576 #if CONFIG_APNG_DECODER
1578 void *
data,
int *got_frame,
1602 if ((
ret = inflateInit(&
s->zstream)) != Z_OK) {
1637 inflateEnd(&
s->zstream);
1680 if (src_frame && src_frame->
f->
data[0]) {
1699 if (!
s->last_picture.f || !
s->picture.f) {
1721 s->last_row_size = 0;
1723 s->tmp_row_size = 0;
1732 #if CONFIG_APNG_DECODER
1741 .
decode = decode_frame_apng,
1749 #if CONFIG_PNG_DECODER
1758 .
decode = decode_frame_png,
static void error(const char *err)
#define AV_LOG_WARNING
Something somehow does not look correct.
#define AV_BPRINT_SIZE_UNLIMITED
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
static void clear_frame_metadata(PNGDecContext *s)
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
unsigned int last_row_size
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
int ff_png_get_nb_channels(int color_type)
@ APNG_DISPOSE_OP_BACKGROUND
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static int decode_text_chunk(PNGDecContext *s, GetByteContext *gb, int compressed)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
@ AVCOL_RANGE_JPEG
Full range content.
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end)
static int output_frame(PNGDecContext *s, AVFrame *f, const AVFrame *src)
static uint8_t * iso88591_to_utf8(const uint8_t *in, size_t size_in)
unsigned int tmp_row_size
void ff_png_zfree(void *opaque, void *ptr)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
#define FF_DEBUG_PICT_INFO
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define PNG_FILTER_TYPE_LOCO
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
enum AVDiscard skip_frame
Skip decoding for selected frames.
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
static av_cold int png_dec_end(AVCodecContext *avctx)
enum PNGImageState pic_state
int key_frame
1 -> keyframe, 0-> not
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb, AVFrame *p)
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
#define PNG_COLOR_TYPE_RGB_ALPHA
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define YUV2RGB(NAME, TYPE)
static const uint16_t mask[17]
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define PNG_COLOR_TYPE_RGB
#define av_assert0(cond)
assert() equivalent, that is always enabled.
enum PNGHeaderState hdr_state
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int percent_missing(PNGDecContext *s)
@ APNG_DISPOSE_OP_PREVIOUS
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int flags
Additional information about the frame packing.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
static int png_decode_idat(PNGDecContext *s, GetByteContext *gb, uint8_t *dst, ptrdiff_t dst_stride)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_cold int png_dec_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Rational number (pair of numerator and denominator).
unsigned background_buf_allocated
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AV_PICTURE_TYPE_I
Intra.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
#define PNG_COLOR_TYPE_GRAY
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_EXPLODE
abort decoding on minor error detection
const AVCodec ff_apng_decoder
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_png_pass_ymask[NB_PASSES]
static const uint8_t png_pass_mask[NB_PASSES]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
#define PNG_FILTER_VALUE_NONE
enum AVPictureType pict_type
Picture type of the frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_EF_IGNORE_ERR
ignore errors and continue
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
#define PNG_FILTER_VALUE_AVG
static AVRational av_make_q(int num, int den)
Create an AVRational.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define PNG_FILTER_VALUE_PAETH
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define PNG_FILTER_VALUE_UP
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const AVCodec ff_png_decoder
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
#define FF_COMPLIANCE_NORMAL
#define FF_THREAD_FRAME
Decode more than one frame at once.
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
AVDictionary * frame_metadata
int interlaced_frame
The content of the picture is interlaced.
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
#define PNG_FILTER_VALUE_SUB
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
uint32_t display_primaries[3][2]
static void png_handle_row(PNGDecContext *s, uint8_t *dst, ptrdiff_t dst_stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_DEBUG_STARTCODE
static int decode_iccp_chunk(PNGDecContext *s, GetByteContext *gb, AVFrame *f)
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * ff_png_zalloc(void *opaque, unsigned int items, unsigned int size)
const char * name
Name of the codec implementation.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
const uint8_t * buffer_end
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
#define FFSWAP(type, a, b)
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
enum AVStereo3DType type
How views are packed within the video.
void av_bprintf(AVBPrint *buf, const char *fmt,...)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void ff_png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, const AVPacket *avpkt)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
#define UNROLL_FILTER(op)
uint8_t transparent_color_be[6]
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
static const uint8_t png_pass_dsp_mask[NB_PASSES]
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
#define avpriv_request_sample(...)
Structure to hold side data for an AVFrame.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define PNG_COLOR_TYPE_GRAY_ALPHA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define MKTAG(a, b, c, d)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define PNG_COLOR_TYPE_PALETTE
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that's been allocated with av_malloc() or another memory allocation function.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define av_fourcc2str(fourcc)