Go to the documentation of this file.
24 #include "config_components.h"
131 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
136 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
141 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
148 int bits_per_pixel,
int pass,
149 int color_type,
const uint8_t *
src)
151 int x,
mask, dsp_mask, j, src_x,
b, bpp;
158 switch (bits_per_pixel) {
161 for (x = 0; x <
width; x++) {
163 if ((dsp_mask << j) & 0x80) {
164 b = (
src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
165 dst[x >> 3] &= 0xFF7F>>j;
166 dst[x >> 3] |=
b << (7 - j);
168 if ((
mask << j) & 0x80)
174 for (x = 0; x <
width; x++) {
175 int j2 = 2 * (x & 3);
177 if ((dsp_mask << j) & 0x80) {
178 b = (
src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
179 dst[x >> 2] &= 0xFF3F>>j2;
180 dst[x >> 2] |=
b << (6 - j2);
182 if ((
mask << j) & 0x80)
188 for (x = 0; x <
width; x++) {
191 if ((dsp_mask << j) & 0x80) {
192 b = (
src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
193 dst[x >> 1] &= 0xFF0F>>j2;
194 dst[x >> 1] |=
b << (4 - j2);
196 if ((
mask << j) & 0x80)
201 bpp = bits_per_pixel >> 3;
204 for (x = 0; x <
width; x++) {
206 if ((dsp_mask << j) & 0x80) {
210 if ((
mask << j) & 0x80)
221 for (
i = 0;
i <
w;
i++) {
222 int a,
b,
c, p, pa, pb, pc;
235 if (pa <= pb && pa <= pc)
245 #define UNROLL1(bpp, op) \
254 for (; i <= size - bpp; i += bpp) { \
255 dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
258 dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
261 dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
264 dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
268 #define UNROLL_FILTER(op) \
271 } else if (bpp == 2) { \
273 } else if (bpp == 3) { \
275 } else if (bpp == 4) { \
278 for (; i < size; i++) { \
279 dst[i] = op(dst[i - bpp], src[i], last[i]); \
284 uint8_t *
src, uint8_t *last,
int size,
int bpp)
286 int i, p,
r,
g,
b,
a;
288 switch (filter_type) {
293 for (
i = 0;
i < bpp;
i++)
297 for (;
i <
size;
i += bpp) {
298 unsigned s = *(
int *)(
src +
i);
299 p = ((
s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((
s ^ p) & 0x80808080);
300 *(
int *)(dst +
i) = p;
303 #define OP_SUB(x, s, l) ((x) + (s))
311 for (
i = 0;
i < bpp;
i++) {
315 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
319 for (
i = 0;
i < bpp;
i++) {
323 if (bpp > 2 &&
size > 4) {
340 #define YUV2RGB(NAME, TYPE) \
341 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
344 for (i = 0; i < size - 2; i += 3 + alpha) { \
345 int g = dst [i + 1]; \
356 if (
s->interlace_type) {
359 return 100 - 100 *
s->y /
s->cur_h;
366 uint8_t *ptr, *last_row;
369 if (!
s->interlace_type) {
370 ptr = dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
372 last_row =
s->last_row;
374 last_row = ptr - dst_stride;
377 last_row,
s->row_size,
s->bpp);
380 if (
s->bit_depth == 16) {
381 deloco_rgb16((uint16_t *)(ptr - dst_stride),
s->row_size / 2,
384 deloco_rgb8(ptr - dst_stride,
s->row_size,
389 if (
s->y ==
s->cur_h) {
392 if (
s->bit_depth == 16) {
393 deloco_rgb16((uint16_t *)ptr,
s->row_size / 2,
396 deloco_rgb8(ptr,
s->row_size,
404 ptr = dst + dst_stride * (
s->y +
s->y_offset) +
s->x_offset *
s->bpp;
411 s->last_row,
s->pass_row_size,
s->bpp);
412 FFSWAP(uint8_t *,
s->last_row,
s->tmp_row);
413 FFSWAP(
unsigned int,
s->last_row_size,
s->tmp_row_size);
418 s->color_type,
s->last_row);
421 if (
s->y ==
s->cur_h) {
422 memset(
s->last_row, 0,
s->row_size);
433 s->crow_size =
s->pass_row_size + 1;
434 if (
s->pass_row_size != 0)
446 uint8_t *dst, ptrdiff_t dst_stride)
448 z_stream *
const zstream = &
s->zstream.zstream;
451 zstream->next_in = gb->
buffer;
454 while (zstream->avail_in > 0) {
456 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
460 if (zstream->avail_out == 0) {
464 zstream->avail_out =
s->crow_size;
465 zstream->next_out =
s->crow_buf;
467 if (
ret == Z_STREAM_END && zstream->avail_in > 0) {
469 "%d undecompressed bytes left in buffer\n", zstream->avail_in);
477 const uint8_t *data_end,
void *logctx)
480 z_stream *
const zstream = &z.
zstream;
487 zstream->next_in =
data;
488 zstream->avail_in = data_end -
data;
491 while (zstream->avail_in > 0) {
497 zstream->next_out = buf;
498 zstream->avail_out = buf_size - 1;
500 if (
ret != Z_OK &&
ret != Z_STREAM_END) {
504 bp->len += zstream->next_out - buf;
505 if (
ret == Z_STREAM_END)
509 bp->str[bp->len] = 0;
523 for (
i = 0;
i < size_in;
i++)
524 extra += !!(in[
i] & 0x80);
525 if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
530 for (
i = 0;
i < size_in;
i++) {
532 *(q++) = 0xC0 | (in[
i] >> 6);
533 *(q++) = 0x80 | (in[
i] & 0x3F);
547 const char *keyword =
data;
548 const char *keyword_end = memchr(keyword, 0, data_end -
data);
549 char *kw_utf8 =
NULL, *txt_utf8 =
NULL;
556 data = keyword_end + 1;
559 if (
data == data_end)
570 text_len = data_end -
data;
605 s->width =
s->cur_w = bytestream2_get_be32(gb);
606 s->height =
s->cur_h = bytestream2_get_be32(gb);
608 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
612 s->bit_depth = bytestream2_get_byte(gb);
613 if (
s->bit_depth != 1 &&
s->bit_depth != 2 &&
s->bit_depth != 4 &&
614 s->bit_depth != 8 &&
s->bit_depth != 16) {
618 s->color_type = bytestream2_get_byte(gb);
619 s->compression_type = bytestream2_get_byte(gb);
620 if (
s->compression_type) {
624 s->filter_type = bytestream2_get_byte(gb);
625 s->interlace_type = bytestream2_get_byte(gb);
629 "compression_type=%d filter_type=%d interlace_type=%d\n",
630 s->width,
s->height,
s->bit_depth,
s->color_type,
631 s->compression_type,
s->filter_type,
s->interlace_type);
635 s->cur_w =
s->cur_h =
s->width =
s->height = 0;
675 if (
s->cicp_range == 0) {
678 }
else if (
s->cicp_range != 1) {
682 }
else if (
s->iccp_data) {
685 s->iccp_data_len, &sd);
689 memcpy(sd->
data,
s->iccp_data,
s->iccp_data_len);
692 }
else if (
s->have_srgb) {
695 }
else if (
s->have_chrm) {
714 if (
s->iccp_data ||
s->have_srgb ||
s->have_cicp) {
716 }
else if (
s->gamma) {
726 if (
s->gamma > 45355 &&
s->gamma < 45555)
728 else if (
s->gamma > 35614 &&
s->gamma < 35814)
730 else if (
s->gamma > 38362 &&
s->gamma < 38562)
732 else if (
s->gamma > 99900 &&
s->gamma < 100100)
738 if (!
s->have_cicp ||
s->cicp_range == 1)
746 if (!
s->has_trns &&
s->significant_bits > 0)
761 clli->
MaxCLL =
s->clli_max / 10000;
762 clli->
MaxFALL =
s->clli_avg / 10000;
775 for (
int i = 0;
i < 3;
i++) {
794 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
809 s->bits_per_pixel =
s->bit_depth *
s->channels;
810 s->bpp = (
s->bits_per_pixel + 7) >> 3;
811 s->row_size = (
s->cur_w *
s->bits_per_pixel + 7) >> 3;
813 if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
816 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
819 }
else if ((
s->bit_depth == 2 ||
s->bit_depth == 4 ||
s->bit_depth == 8) &&
822 }
else if (
s->bit_depth == 16 &&
825 }
else if (
s->bit_depth == 16 &&
828 }
else if (
s->bit_depth == 16 &&
831 }
else if ((
s->bits_per_pixel == 1 ||
s->bits_per_pixel == 2 ||
s->bits_per_pixel == 4 ||
s->bits_per_pixel == 8) &&
836 }
else if (
s->bit_depth == 8 &&
839 }
else if (
s->bit_depth == 16 &&
844 "Bit depth %d color type %d",
845 s->bit_depth,
s->color_type);
869 "and color type %d with TRNS",
870 s->bit_depth,
s->color_type);
874 s->bpp += byte_depth;
912 if (!
s->interlace_type) {
913 s->crow_size =
s->row_size + 1;
919 s->crow_size =
s->pass_row_size + 1;
921 ff_dlog(avctx,
"row_size=%d crow_size =%d\n",
922 s->row_size,
s->crow_size);
926 memcpy(p->
data[1],
s->palette, 256 *
sizeof(uint32_t));
931 if (
s->interlace_type ||
943 s->crow_buf =
s->buffer + 15;
944 s->zstream.zstream.avail_out =
s->crow_size;
945 s->zstream.zstream.next_out =
s->crow_buf;
952 s->bpp -= byte_depth;
957 s->bpp += byte_depth;
971 if ((length % 3) != 0 || length > 256 * 3)
975 for (
i = 0;
i < n;
i++) {
976 r = bytestream2_get_byte(gb);
977 g = bytestream2_get_byte(gb);
978 b = bytestream2_get_byte(gb);
979 s->palette[
i] = (0xFF
U << 24) | (
r << 16) | (
g << 8) |
b;
982 s->palette[
i] = (0xFFU << 24);
1005 if (length > 256 || !(
s->hdr_state &
PNG_PLTE))
1008 for (
i = 0;
i < length;
i++) {
1009 unsigned v = bytestream2_get_byte(gb);
1010 s->palette[
i] = (
s->palette[
i] & 0x00ffffff) | (v << 24);
1018 for (
i = 0;
i < length / 2;
i++) {
1022 if (
s->bit_depth > 8)
1023 AV_WB16(&
s->transparent_color_be[2 *
i], v);
1025 s->transparent_color_be[
i] = v;
1041 while ((
s->iccp_name[cnt++] = bytestream2_get_byte(gb)) && cnt < 81);
1048 if (bytestream2_get_byte(gb) != 0) {
1061 s->iccp_data_len = bp.len;
1065 s->iccp_name[0] = 0;
1094 int b = bytestream2_get_byteu(gb);
1102 s->significant_bits =
bits;
1111 uint8_t *pd = p->
data[0];
1112 for (j = 0; j <
s->height; j++) {
1114 for (k = 7; k >= 1; k--)
1115 if ((
s->width&7) >= k)
1116 pd[8*
i + k - 1] = (pd[
i]>>8-k) & 1;
1117 for (
i--;
i >= 0;
i--) {
1118 pd[8*
i + 7]= pd[
i] & 1;
1119 pd[8*
i + 6]= (pd[
i]>>1) & 1;
1120 pd[8*
i + 5]= (pd[
i]>>2) & 1;
1121 pd[8*
i + 4]= (pd[
i]>>3) & 1;
1122 pd[8*
i + 3]= (pd[
i]>>4) & 1;
1123 pd[8*
i + 2]= (pd[
i]>>5) & 1;
1124 pd[8*
i + 1]= (pd[
i]>>6) & 1;
1125 pd[8*
i + 0]= pd[
i]>>7;
1129 }
else if (
s->bits_per_pixel == 2) {
1131 uint8_t *pd = p->
data[0];
1132 for (j = 0; j <
s->height; j++) {
1135 if ((
s->width&3) >= 3) pd[4*
i + 2]= (pd[
i] >> 2) & 3;
1136 if ((
s->width&3) >= 2) pd[4*
i + 1]= (pd[
i] >> 4) & 3;
1137 if ((
s->width&3) >= 1) pd[4*
i + 0]= pd[
i] >> 6;
1138 for (
i--;
i >= 0;
i--) {
1139 pd[4*
i + 3]= pd[
i] & 3;
1140 pd[4*
i + 2]= (pd[
i]>>2) & 3;
1141 pd[4*
i + 1]= (pd[
i]>>4) & 3;
1142 pd[4*
i + 0]= pd[
i]>>6;
1145 if ((
s->width&3) >= 3) pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1146 if ((
s->width&3) >= 2) pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1147 if ((
s->width&3) >= 1) pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1148 for (
i--;
i >= 0;
i--) {
1149 pd[4*
i + 3]= ( pd[
i] & 3)*0x55;
1150 pd[4*
i + 2]= ((pd[
i]>>2) & 3)*0x55;
1151 pd[4*
i + 1]= ((pd[
i]>>4) & 3)*0x55;
1152 pd[4*
i + 0]= ( pd[
i]>>6 )*0x55;
1157 }
else if (
s->bits_per_pixel == 4) {
1159 uint8_t *pd = p->
data[0];
1160 for (j = 0; j <
s->height; j++) {
1163 if (
s->width&1) pd[2*
i+0]= pd[
i]>>4;
1164 for (
i--;
i >= 0;
i--) {
1165 pd[2*
i + 1] = pd[
i] & 15;
1166 pd[2*
i + 0] = pd[
i] >> 4;
1169 if (
s->width & 1) pd[2*
i + 0]= (pd[
i] >> 4) * 0x11;
1170 for (
i--;
i >= 0;
i--) {
1171 pd[2*
i + 1] = (pd[
i] & 15) * 0x11;
1172 pd[2*
i + 0] = (pd[
i] >> 4) * 0x11;
1183 uint32_t sequence_number;
1184 int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
1199 sequence_number = bytestream2_get_be32(gb);
1200 cur_w = bytestream2_get_be32(gb);
1201 cur_h = bytestream2_get_be32(gb);
1202 x_offset = bytestream2_get_be32(gb);
1203 y_offset = bytestream2_get_be32(gb);
1205 dispose_op = bytestream2_get_byte(gb);
1206 blend_op = bytestream2_get_byte(gb);
1208 if (sequence_number == 0 &&
1209 (cur_w !=
s->width ||
1210 cur_h !=
s->height ||
1213 cur_w <= 0 || cur_h <= 0 ||
1214 x_offset < 0 || y_offset < 0 ||
1215 cur_w >
s->width - x_offset|| cur_h >
s->height - y_offset)
1223 if ((sequence_number == 0 || !
s->last_picture.f) &&
1243 s->x_offset = x_offset;
1244 s->y_offset = y_offset;
1245 s->dispose_op = dispose_op;
1246 s->blend_op = blend_op;
1254 uint8_t *pd = p->
data[0];
1255 uint8_t *pd_last =
s->last_picture.f->data[0];
1258 ls =
FFMIN(ls,
s->width *
s->bpp);
1261 for (j = 0; j <
s->height; j++) {
1262 for (
i = 0;
i < ls;
i++)
1263 pd[
i] += pd_last[
i];
1265 pd_last +=
s->last_picture.f->linesize[0];
1271 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1276 uint8_t *dst = p->
data[0];
1277 ptrdiff_t dst_stride = p->
linesize[0];
1278 const uint8_t *
src =
s->last_picture.f->data[0];
1279 ptrdiff_t src_stride =
s->last_picture.f->linesize[0];
1295 for (y = 0; y <
s->y_offset; y++)
1296 memcpy(dst + y * dst_stride,
src + y * src_stride, p->
width * bpp);
1297 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; y++) {
1298 memcpy(dst + y * dst_stride,
src + y * src_stride,
s->x_offset * bpp);
1299 memcpy(dst + y * dst_stride + (
s->x_offset +
s->cur_w) * bpp,
1300 src + y * src_stride + (
s->x_offset +
s->cur_w) * bpp,
1301 (p->
width -
s->cur_w -
s->x_offset) * bpp);
1303 for (y =
s->y_offset +
s->cur_h; y < p->
height; y++)
1304 memcpy(dst + y * dst_stride,
src + y * src_stride, p->
width * bpp);
1308 for (y =
s->y_offset; y < s->y_offset +
s->cur_h; ++y) {
1309 uint8_t *foreground = dst + dst_stride * y + bpp *
s->x_offset;
1310 const uint8_t *background =
src + src_stride * y + bpp *
s->x_offset;
1311 for (x =
s->x_offset; x < s->x_offset +
s->cur_w; ++x, foreground += bpp, background += bpp) {
1313 uint8_t foreground_alpha, background_alpha, output_alpha;
1322 foreground_alpha = foreground[3];
1323 background_alpha = background[3];
1327 foreground_alpha = foreground[1];
1328 background_alpha = background[1];
1332 if (foreground_alpha == 255)
1335 if (foreground_alpha == 0) {
1336 memcpy(foreground, background, bpp);
1340 output_alpha = foreground_alpha +
FAST_DIV255((255 - foreground_alpha) * background_alpha);
1344 for (
b = 0;
b < bpp - 1; ++
b) {
1345 if (output_alpha == 0) {
1347 }
else if (background_alpha == 255) {
1348 output[
b] =
FAST_DIV255(foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background[
b]);
1350 output[
b] = (255 * foreground_alpha * foreground[
b] + (255 - foreground_alpha) * background_alpha * background[
b]) / (255 * output_alpha);
1354 memcpy(foreground,
output, bpp);
1367 const ptrdiff_t dst_stride =
s->picture.f->linesize[0];
1368 uint8_t *dst =
s->picture.f->data[0] +
s->y_offset * dst_stride + bpp *
s->x_offset;
1372 for (
size_t y = 0; y <
s->cur_h; y++) {
1373 memset(dst, 0, bpp *
s->cur_w);
1382 uint32_t
tag, length;
1383 int decode_next_dat = 0;
1411 length = bytestream2_get_be32(&
s->gb);
1418 uint32_t crc_sig =
AV_RB32(
s->gb.buffer + length + 4);
1419 uint32_t crc_cal = ~
av_crc(crc_tab, UINT32_MAX,
s->gb.buffer, length + 4);
1420 if (crc_sig ^ crc_cal) {
1432 tag = bytestream2_get_le32(&
s->gb);
1443 case MKTAG(
'I',
'H',
'D',
'R'):
1444 case MKTAG(
'p',
'H',
'Y',
's'):
1445 case MKTAG(
't',
'E',
'X',
't'):
1446 case MKTAG(
'I',
'D',
'A',
'T'):
1447 case MKTAG(
't',
'R',
'N',
'S'):
1448 case MKTAG(
's',
'R',
'G',
'B'):
1449 case MKTAG(
'c',
'I',
'C',
'P'):
1450 case MKTAG(
'c',
'H',
'R',
'M'):
1451 case MKTAG(
'g',
'A',
'M',
'A'):
1459 case MKTAG(
'I',
'H',
'D',
'R'):
1463 case MKTAG(
'p',
'H',
'Y',
's'):
1467 case MKTAG(
'f',
'c',
'T',
'L'):
1472 decode_next_dat = 1;
1474 case MKTAG(
'f',
'd',
'A',
'T'):
1481 bytestream2_get_be32(&gb_chunk);
1483 case MKTAG(
'I',
'D',
'A',
'T'):
1489 case MKTAG(
'P',
'L',
'T',
'E'):
1492 case MKTAG(
't',
'R',
'N',
'S'):
1495 case MKTAG(
't',
'E',
'X',
't'):
1499 case MKTAG(
'z',
'T',
'X',
't'):
1503 case MKTAG(
's',
'T',
'E',
'R'): {
1504 int mode = bytestream2_get_byte(&gb_chunk);
1507 s->stereo_mode =
mode;
1510 "Unknown value in sTER chunk (%d)\n",
mode);
1514 case MKTAG(
'c',
'I',
'C',
'P'):
1515 s->cicp_primaries = bytestream2_get_byte(&gb_chunk);
1516 s->cicp_trc = bytestream2_get_byte(&gb_chunk);
1517 if (bytestream2_get_byte(&gb_chunk) != 0)
1519 s->cicp_range = bytestream2_get_byte(&gb_chunk);
1520 if (
s->cicp_range != 0 &&
s->cicp_range != 1)
1524 case MKTAG(
's',
'R',
'G',
'B'):
1529 case MKTAG(
'i',
'C',
'C',
'P'): {
1534 case MKTAG(
'c',
'H',
'R',
'M'): {
1537 s->white_point[0] = bytestream2_get_be32(&gb_chunk);
1538 s->white_point[1] = bytestream2_get_be32(&gb_chunk);
1541 for (
i = 0;
i < 3;
i++) {
1542 s->display_primaries[
i][0] = bytestream2_get_be32(&gb_chunk);
1543 s->display_primaries[
i][1] = bytestream2_get_be32(&gb_chunk);
1548 case MKTAG(
's',
'B',
'I',
'T'):
1552 case MKTAG(
'g',
'A',
'M',
'A'): {
1555 s->gamma = bytestream2_get_be32(&gb_chunk);
1567 case MKTAG(
'c',
'L',
'L',
'i'):
1573 s->clli_max = bytestream2_get_be32u(&gb_chunk);
1574 s->clli_avg = bytestream2_get_be32u(&gb_chunk);
1576 case MKTAG(
'm',
'D',
'C',
'v'):
1582 for (
int i = 0;
i < 3;
i++) {
1583 s->mdcv_primaries[
i][0] = bytestream2_get_be16u(&gb_chunk);
1584 s->mdcv_primaries[
i][1] = bytestream2_get_be16u(&gb_chunk);
1586 s->mdcv_white_point[0] = bytestream2_get_be16u(&gb_chunk);
1587 s->mdcv_white_point[1] = bytestream2_get_be16u(&gb_chunk);
1588 s->mdcv_max_lum = bytestream2_get_be32u(&gb_chunk);
1589 s->mdcv_min_lum = bytestream2_get_be32u(&gb_chunk);
1591 case MKTAG(
'I',
'E',
'N',
'D'):
1616 if (
s->bits_per_pixel <= 4)
1620 for (
int y = 0; y <
s->height; y++) {
1623 for (
int x =
s->width - 1; x >= 0; x--) {
1624 const uint8_t idx = row[x];
1626 row[4*x+2] =
s->palette[idx] & 0xFF;
1627 row[4*x+1] = (
s->palette[idx] >> 8 ) & 0xFF;
1628 row[4*x+0] = (
s->palette[idx] >> 16) & 0xFF;
1629 row[4*x+3] =
s->palette[idx] >> 24;
1636 size_t byte_depth =
s->bit_depth > 8 ? 2 : 1;
1637 size_t raw_bpp =
s->bpp - byte_depth;
1642 for (y = 0; y <
s->height; ++y) {
1645 if (
s->bpp == 2 && byte_depth == 1) {
1646 uint8_t *
pixel = &row[2 *
s->width - 1];
1647 uint8_t *rowp = &row[1 *
s->width - 1];
1648 int tcolor =
s->transparent_color_be[0];
1649 for (x =
s->width; x > 0; --x) {
1650 *
pixel-- = *rowp == tcolor ? 0 : 0xff;
1653 }
else if (
s->bpp == 4 && byte_depth == 1) {
1654 uint8_t *
pixel = &row[4 *
s->width - 1];
1655 uint8_t *rowp = &row[3 *
s->width - 1];
1656 int tcolor =
AV_RL24(
s->transparent_color_be);
1657 for (x =
s->width; x > 0; --x) {
1665 for (x =
s->width; x > 0; --x) {
1666 uint8_t *
pixel = &row[
s->bpp * (x - 1)];
1667 memmove(
pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1669 if (!memcmp(
pixel,
s->transparent_color_be, raw_bpp)) {
1670 memset(&
pixel[raw_bpp], 0, byte_depth);
1672 memset(&
pixel[raw_bpp], 0xff, byte_depth);
1680 if (
s->last_picture.f->data[0]) {
1682 &&
s->last_picture.f->width == p->
width
1683 &&
s->last_picture.f->height== p->
height
1684 &&
s->last_picture.f->format== p->
format
1688 else if (CONFIG_APNG_DECODER &&
1709 s->iccp_data_len = 0;
1710 s->iccp_name[0] = 0;
1712 s->stereo_mode = -1;
1725 if (
s->stereo_mode >= 0) {
1744 #if CONFIG_PNG_DECODER
1749 const uint8_t *buf = avpkt->
data;
1750 int buf_size = avpkt->
size;
1759 sig = bytestream2_get_be64(&
s->gb);
1766 s->y =
s->has_trns = 0;
1771 ret = inflateReset(&
s->zstream.zstream);
1802 #if CONFIG_APNG_DECODER
1815 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
1823 if ((
ret = inflateReset(&
s->zstream.zstream)) != Z_OK)
1886 if (src_frame && src_frame->
f->
data[0]) {
1903 if (!
s->last_picture.f || !
s->picture.f)
1922 s->last_row_size = 0;
1924 s->tmp_row_size = 0;
1933 #if CONFIG_APNG_DECODER
1951 #if CONFIG_PNG_DECODER
static void error(const char *err)
enum AVColorTransferCharacteristic color_trc
#define AV_LOG_WARNING
Something somehow does not look correct.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
#define AV_BPRINT_SIZE_UNLIMITED
#define AV_EF_EXPLODE
abort decoding on minor error detection
static void clear_frame_metadata(PNGDecContext *s)
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
enum AVColorRange cicp_range
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
enum AVColorSpace colorspace
YUV colorspace type.
AVColorTransferCharacteristic
Color Transfer Characteristic.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Struct that contains both white point location and primaries location, providing the complete descrip...
unsigned int last_row_size
#define APNG_FCTL_CHUNK_SIZE
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
int ff_png_get_nb_channels(int color_type)
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
enum AVColorPrimaries color_primaries
static int decode_text_chunk(PNGDecContext *s, GetByteContext *gb, int compressed)
uint16_t mdcv_white_point[2]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
unsigned MaxCLL
Max content light level (cd/m^2).
enum AVColorSpace colorspace
YUV colorspace type.
This structure describes decoded (raw) audio or video data.
@ AVCOL_TRC_NB
Not part of ABI.
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
unsigned int tmp_row_size
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
#define FF_DEBUG_PICT_INFO
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define PNG_FILTER_TYPE_LOCO
AVCodec p
The public AVCodec.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
enum AVDiscard skip_frame
Skip decoding for selected frames.
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
static av_cold int png_dec_end(AVCodecContext *avctx)
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
enum PNGImageState pic_state
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb, AVFrame *p)
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
#define PNG_COLOR_TYPE_RGB_ALPHA
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define YUV2RGB(NAME, TYPE)
static const uint16_t mask[17]
#define FF_CODEC_DECODE_CB(func)
@ AVCOL_PRI_NB
Not part of ABI.
const FFCodec ff_apng_decoder
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
@ APNG_DISPOSE_OP_PREVIOUS
#define PNG_COLOR_TYPE_RGB
const FFCodec ff_png_decoder
#define AV_EF_IGNORE_ERR
ignore errors and continue
#define av_assert0(cond)
assert() equivalent, that is always enabled.
enum PNGHeaderState hdr_state
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int percent_missing(PNGDecContext *s)
enum AVColorPrimaries av_csp_primaries_id_from_desc(const AVColorPrimariesDesc *prm)
Detects which enum AVColorPrimaries constant corresponds to the given complete gamut description.
@ APNG_DISPOSE_OP_BACKGROUND
#define CODEC_LONG_NAME(str)
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, AVMasteringDisplayMetadata **mdm)
Wrapper around av_mastering_display_metadata_create_side_data(), which rejects side data overridden b...
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
int flags
Additional information about the frame packing.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AV_PIX_FMT_GRAY8A
alias for AV_PIX_FMT_YA8
enum AVColorPrimaries cicp_primaries
static int png_decode_idat(PNGDecContext *s, GetByteContext *gb, uint8_t *dst, ptrdiff_t dst_stride)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_cold int png_dec_init(AVCodecContext *avctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Rational number (pair of numerator and denominator).
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
@ AV_PICTURE_TYPE_I
Intra.
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
#define PNG_COLOR_TYPE_GRAY
#define UPDATE_THREAD_CONTEXT(func)
static int decode_sbit_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
static void apng_reset_background(PNGDecContext *s, const AVFrame *p)
@ AVCOL_RANGE_UNSPECIFIED
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
const uint8_t ff_png_pass_ymask[NB_PASSES]
static int output_frame(PNGDecContext *s, AVFrame *f)
static const uint8_t png_pass_mask[NB_PASSES]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
#define PNG_FILTER_VALUE_NONE
enum AVPictureType pict_type
Picture type of the frame.
int(* init)(AVBSFContext *ctx)
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
#define PNG_FILTER_VALUE_AVG
static AVRational av_make_q(int num, int den)
Create an AVRational.
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define PNG_FILTER_VALUE_PAETH
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define PNG_FILTER_VALUE_UP
#define FF_COMPLIANCE_NORMAL
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
#define AVERROR_EXTERNAL
Generic error in an external library.
int flags
A combination of AV_PKT_FLAG values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static int populate_avctx_color_fields(AVCodecContext *avctx, AVFrame *frame)
#define FF_THREAD_FRAME
Decode more than one frame at once.
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
AVDictionary * frame_metadata
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
#define PNG_FILTER_VALUE_SUB
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
uint32_t display_primaries[3][2]
uint16_t mdcv_primaries[3][2]
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static void png_handle_row(PNGDecContext *s, uint8_t *dst, ptrdiff_t dst_stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define FF_DEBUG_STARTCODE
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Name of the codec implementation.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
void ff_inflate_end(FFZStream *zstream)
Wrapper around inflateEnd().
const uint8_t * buffer_end
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
#define FFSWAP(type, a, b)
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
enum AVStereo3DType type
How views are packed within the video.
int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, AVContentLightMetadata **clm)
Wrapper around av_content_light_metadata_create_side_data(), which rejects side data overridden by th...
void av_bprintf(AVBPrint *buf, const char *fmt,...)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static char * iso88591_to_utf8(const char *in, size_t size_in)
void ff_png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, const AVPacket *avpkt)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
#define UNROLL_FILTER(op)
uint8_t transparent_color_be[6]
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call.
static const uint8_t png_pass_dsp_mask[NB_PASSES]
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
#define PNG_COLOR_MASK_PALETTE
static int decode_iccp_chunk(PNGDecContext *s, GetByteContext *gb)
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, GetByteContext *gb)
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
#define avpriv_request_sample(...)
Structure to hold side data for an AVFrame.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int ff_inflate_init(FFZStream *zstream, void *logctx)
Wrapper around inflateInit().
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define PNG_COLOR_TYPE_GRAY_ALPHA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
@ AVCOL_TRC_SMPTE428
SMPTE ST 428-1.
enum AVColorTransferCharacteristic cicp_trc
#define MKTAG(a, b, c, d)
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
AVColorRange
Visual content value range.
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end, void *logctx)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
#define PNG_COLOR_TYPE_PALETTE
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that's been allocated with av_malloc() or another memory allocation function.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define av_fourcc2str(fourcc)