Go to the documentation of this file.
202 unsigned long dest_len = uncompressed_size;
204 if (uncompress(
td->tmp, &dest_len,
src, compressed_size) != Z_OK ||
205 dest_len != uncompressed_size)
210 s->dsp.predictor(
td->tmp, uncompressed_size);
211 s->dsp.reorder_pixels(
td->uncompressed_data,
td->tmp, uncompressed_size);
216 static int rle(uint8_t *dst,
const uint8_t *
src,
217 int compressed_size,
int uncompressed_size)
220 const int8_t *
s =
src;
221 int ssize = compressed_size;
222 int dsize = uncompressed_size;
223 uint8_t *dend =
d + dsize;
232 if ((dsize -= count) < 0 ||
233 (ssize -= count + 1) < 0)
241 if ((dsize -= count) < 0 ||
261 rle(
td->tmp,
src, compressed_size, uncompressed_size);
265 ctx->dsp.predictor(
td->tmp, uncompressed_size);
266 ctx->dsp.reorder_pixels(
td->uncompressed_data,
td->tmp, uncompressed_size);
271 #define USHORT_RANGE (1 << 16)
272 #define BITMAP_SIZE (1 << 13)
279 if ((
i == 0) || (bitmap[
i >> 3] & (1 << (
i & 7))))
289 static void apply_lut(
const uint16_t *lut, uint16_t *dst,
int dsize)
293 for (
i = 0;
i < dsize; ++
i)
294 dst[
i] = lut[dst[
i]];
297 #define HUF_ENCBITS 16 // literal (value) bit length
298 #define HUF_ENCSIZE ((1 << HUF_ENCBITS) + 1) // encoding table size
302 uint64_t
c, n[59] = { 0 };
309 for (
i = 58;
i > 0; --
i) {
310 uint64_t nc = ((
c + n[
i]) >> 1);
319 freq[
i] = l | (n[l]++ << 6);
323 #define SHORT_ZEROCODE_RUN 59
324 #define LONG_ZEROCODE_RUN 63
325 #define SHORTEST_LONG_RUN (2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN)
326 #define LONGEST_LONG_RUN (255 + SHORTEST_LONG_RUN)
336 for (;
im <= iM;
im++) {
342 if (
im + zerun > iM + 1)
352 if (
im + zerun > iM + 1)
374 for (
int i =
im;
i < iM;
i++) {
376 td->he[j].len =
td->freq[
i] & 63;
377 td->he[j].code =
td->freq[
i] >> 6;
378 if (
td->he[j].len > 32) {
382 if (
td->he[j].len > 0)
393 if (
td->run_sym == -1) {
398 td->he[j].sym =
td->run_sym;
399 td->he[j].len =
td->freq[iM] & 63;
400 if (
td->he[j].len > 32) {
404 td->he[j].code =
td->freq[iM] >> 6;
409 &
td->he[0].len,
sizeof(
td->he[0]),
sizeof(
td->he[0].len),
410 &
td->he[0].code,
sizeof(
td->he[0]),
sizeof(
td->he[0].code),
411 &
td->he[0].sym,
sizeof(
td->he[0]),
sizeof(
td->he[0].sym), 0);
415 int no, uint16_t *
out)
428 if (oe == 0 || oe +
run > no)
446 uint16_t *dst,
int dst_size)
452 im = bytestream2_get_le32(gb);
453 iM = bytestream2_get_le32(gb);
455 nBits = bytestream2_get_le32(gb);
466 if (!
td->freq || !
td->he) {
482 return huf_decode(&
td->vlc, gb, nBits,
td->run_sym, dst_size, dst);
485 static inline void wdec14(uint16_t l, uint16_t
h, uint16_t *
a, uint16_t *
b)
490 int ai = ls + (hi & 1) + (hi >> 1);
492 int16_t bs = ai - hi;
499 #define A_OFFSET (1 << (NBITS - 1))
500 #define MOD_MASK ((1 << NBITS) - 1)
502 static inline void wdec16(uint16_t l, uint16_t
h, uint16_t *
a, uint16_t *
b)
513 int ny,
int oy, uint16_t mx)
515 int w14 = (mx < (1 << 14));
516 int n = (nx > ny) ? ny : nx;
529 uint16_t *ey = in + oy * (ny - p2);
530 uint16_t i00, i01, i10, i11;
536 for (; py <= ey; py += oy2) {
538 uint16_t *ex = py + ox * (nx - p2);
540 for (; px <= ex; px += ox2) {
541 uint16_t *p01 = px + ox1;
542 uint16_t *p10 = px + oy1;
543 uint16_t *p11 = p10 + ox1;
546 wdec14(*px, *p10, &i00, &i10);
547 wdec14(*p01, *p11, &i01, &i11);
548 wdec14(i00, i01, px, p01);
549 wdec14(i10, i11, p10, p11);
551 wdec16(*px, *p10, &i00, &i10);
552 wdec16(*p01, *p11, &i01, &i11);
553 wdec16(i00, i01, px, p01);
554 wdec16(i10, i11, p10, p11);
559 uint16_t *p10 = px + oy1;
562 wdec14(*px, *p10, &i00, p10);
564 wdec16(*px, *p10, &i00, p10);
572 uint16_t *ex = py + ox * (nx - p2);
574 for (; px <= ex; px += ox2) {
575 uint16_t *p01 = px + ox1;
578 wdec14(*px, *p01, &i00, p01);
580 wdec16(*px, *p01, &i00, p01);
595 uint16_t maxval, min_non_zero, max_non_zero;
597 uint16_t *
tmp = (uint16_t *)
td->tmp;
609 if (!
td->bitmap || !
td->lut) {
616 min_non_zero = bytestream2_get_le16(&gb);
617 max_non_zero = bytestream2_get_le16(&gb);
623 if (min_non_zero <= max_non_zero)
625 max_non_zero - min_non_zero + 1);
626 memset(
td->bitmap + max_non_zero + 1, 0,
BITMAP_SIZE - max_non_zero - 1);
636 for (
i = 0;
i <
s->nb_channels;
i++) {
644 for (j = 0; j < pixel_half_size; j++)
646 td->xsize * pixel_half_size, maxval);
647 ptr +=
td->xsize *
td->ysize * pixel_half_size;
652 out = (uint16_t *)
td->uncompressed_data;
655 for (j = 0; j <
s->nb_channels; j++) {
662 in =
tmp + tmp_offset *
td->xsize *
td->ysize +
i *
td->xsize * pixel_half_size;
663 tmp_offset += pixel_half_size;
666 s->bbdsp.bswap16_buf(
out, in,
td->xsize * pixel_half_size);
668 memcpy(
out, in,
td->xsize * 2 * pixel_half_size);
670 out +=
td->xsize * pixel_half_size;
678 int compressed_size,
int uncompressed_size,
681 unsigned long dest_len, expected_len = 0;
682 const uint8_t *in =
td->tmp;
686 for (
i = 0;
i <
s->nb_channels;
i++) {
688 expected_len += (
td->xsize *
td->ysize * 3);
689 }
else if (
s->channels[
i].pixel_type ==
EXR_HALF) {
690 expected_len += (
td->xsize *
td->ysize * 2);
692 expected_len += (
td->xsize *
td->ysize * 4);
696 dest_len = expected_len;
698 if (uncompress(
td->tmp, &dest_len,
src, compressed_size) != Z_OK) {
700 }
else if (dest_len != expected_len) {
704 out =
td->uncompressed_data;
705 for (
i = 0;
i <
td->ysize;
i++)
706 for (
c = 0;
c <
s->nb_channels;
c++) {
708 const uint8_t *ptr[4];
714 ptr[1] = ptr[0] +
td->xsize;
715 ptr[2] = ptr[1] +
td->xsize;
716 in = ptr[2] +
td->xsize;
718 for (j = 0; j <
td->xsize; ++j) {
719 uint32_t
diff = ((unsigned)*(ptr[0]++) << 24) |
720 (*(ptr[1]++) << 16) |
728 ptr[1] = ptr[0] +
td->xsize;
729 in = ptr[1] +
td->xsize;
730 for (j = 0; j <
td->xsize; j++) {
731 uint32_t
diff = (*(ptr[0]++) << 8) | *(ptr[1]++);
739 ptr[1] = ptr[0] +
s->xdelta;
740 ptr[2] = ptr[1] +
s->xdelta;
741 ptr[3] = ptr[2] +
s->xdelta;
742 in = ptr[3] +
s->xdelta;
744 for (j = 0; j <
s->xdelta; ++j) {
745 uint32_t
diff = ((uint32_t)*(ptr[0]++) << 24) |
746 (*(ptr[1]++) << 16) |
747 (*(ptr[2]++) << 8 ) |
763 unsigned short shift = (
b[ 2] >> 2) & 15;
767 s[ 0] = (
b[0] << 8) |
b[1];
769 s[ 4] =
s[ 0] + ((((
b[ 2] << 4) | (
b[ 3] >> 4)) & 0x3f) <<
shift) -
bias;
770 s[ 8] =
s[ 4] + ((((
b[ 3] << 2) | (
b[ 4] >> 6)) & 0x3f) <<
shift) -
bias;
774 s[ 5] =
s[ 4] + ((((
b[ 5] << 4) | (
b[ 6] >> 4)) & 0x3f) <<
shift) -
bias;
775 s[ 9] =
s[ 8] + ((((
b[ 6] << 2) | (
b[ 7] >> 6)) & 0x3f) <<
shift) -
bias;
779 s[ 6] =
s[ 5] + ((((
b[ 8] << 4) | (
b[ 9] >> 4)) & 0x3f) <<
shift) -
bias;
780 s[10] =
s[ 9] + ((((
b[ 9] << 2) | (
b[10] >> 6)) & 0x3f) <<
shift) -
bias;
784 s[ 7] =
s[ 6] + ((((
b[11] << 4) | (
b[12] >> 4)) & 0x3f) <<
shift) -
bias;
785 s[11] =
s[10] + ((((
b[12] << 2) | (
b[13] >> 6)) & 0x3f) <<
shift) -
bias;
788 for (
i = 0;
i < 16; ++
i) {
800 s[0] = (
b[0] << 8) |
b[1];
807 for (
i = 1;
i < 16;
i++)
814 const int8_t *sr =
src;
815 int stay_to_uncompress = compressed_size;
816 int nb_b44_block_w, nb_b44_block_h;
817 int index_tl_x, index_tl_y, index_out, index_tmp;
818 uint16_t tmp_buffer[16];
820 int target_channel_offset = 0;
823 nb_b44_block_w =
td->xsize / 4;
824 if ((
td->xsize % 4) != 0)
827 nb_b44_block_h =
td->ysize / 4;
828 if ((
td->ysize % 4) != 0)
831 for (
c = 0;
c <
s->nb_channels;
c++) {
833 for (iY = 0; iY < nb_b44_block_h; iY++) {
834 for (iX = 0; iX < nb_b44_block_w; iX++) {
835 if (stay_to_uncompress < 3)
838 if (
src[compressed_size - stay_to_uncompress + 2] == 0xfc) {
841 stay_to_uncompress -= 3;
843 if (stay_to_uncompress < 14)
847 stay_to_uncompress -= 14;
854 for (y = index_tl_y; y <
FFMIN(index_tl_y + 4,
td->ysize); y++) {
855 for (x = index_tl_x; x <
FFMIN(index_tl_x + 4,
td->xsize); x++) {
856 index_out = target_channel_offset *
td->xsize + y *
td->channel_line_size + 2 * x;
857 index_tmp = (y-index_tl_y) * 4 + (x-index_tl_x);
858 td->uncompressed_data[index_out] = tmp_buffer[index_tmp] & 0xff;
859 td->uncompressed_data[index_out + 1] = tmp_buffer[index_tmp] >> 8;
864 target_channel_offset += 2;
866 if (stay_to_uncompress < td->ysize *
td->xsize * 4)
869 for (y = 0; y <
td->ysize; y++) {
870 index_out = target_channel_offset *
td->xsize + y *
td->channel_line_size;
871 memcpy(&
td->uncompressed_data[index_out], sr,
td->xsize * 4);
874 target_channel_offset += 4;
876 stay_to_uncompress -=
td->ysize *
td->xsize * 4;
892 }
else if ((
val >> 8) == 0xff) {
914 float alpha[4], beta[4], theta[4], gamma[4];
932 gamma[0] = theta[0] + theta[1];
933 gamma[1] = theta[3] + theta[2];
934 gamma[2] = theta[3] - theta[2];
935 gamma[3] = theta[0] - theta[1];
937 blk[0 *
step] = gamma[0] + beta[0];
938 blk[1 *
step] = gamma[1] + beta[1];
939 blk[2 *
step] = gamma[2] + beta[2];
940 blk[3 *
step] = gamma[3] + beta[3];
942 blk[4 *
step] = gamma[3] - beta[3];
943 blk[5 *
step] = gamma[2] - beta[2];
944 blk[6 *
step] = gamma[1] - beta[1];
945 blk[7 *
step] = gamma[0] - beta[0];
950 for (
int i = 0;
i < 8;
i++)
953 for (
int i = 0;
i < 8;
i++) {
960 float *
b,
float *
g,
float *
r)
962 *
r = y + 1.5747f * v;
963 *
g = y - 0.1873f *
u - 0.4682f * v;
964 *
b = y + 1.8556f *
u;
983 int64_t
version, lo_usize, lo_size;
984 int64_t ac_size, dc_size, rle_usize, rle_csize, rle_raw_size;
985 int64_t ac_count, dc_count, ac_compression;
986 const int dc_w =
td->xsize >> 3;
987 const int dc_h =
td->ysize >> 3;
991 if (compressed_size <= 88)
1009 if ( compressed_size < (uint64_t)(lo_size | ac_size | dc_size | rle_csize) || compressed_size < 88LL + lo_size + ac_size + dc_size + rle_csize
1010 || ac_count > (uint64_t)INT_MAX/2
1015 skip = bytestream2_get_le16(&gb);
1022 if (lo_usize > uncompressed_size)
1028 unsigned long dest_len;
1031 if (ac_count > 3LL *
td->xsize *
s->scan_lines_per_block)
1034 dest_len = ac_count * 2LL;
1040 switch (ac_compression) {
1047 if (uncompress(
td->ac_data, &dest_len, agb.
buffer, ac_size) != Z_OK ||
1048 dest_len != ac_count * 2LL)
1059 unsigned long dest_len;
1062 if (dc_count != dc_w * dc_h * 3)
1065 dest_len = dc_count * 2LL;
1071 if (uncompress(
td->dc_data +
FFALIGN(dest_len, 64), &dest_len, agb.
buffer, dc_size) != Z_OK ||
1072 (dest_len != dc_count * 2LL))
1075 s->dsp.predictor(
td->dc_data +
FFALIGN(dest_len, 64), dest_len);
1076 s->dsp.reorder_pixels(
td->dc_data,
td->dc_data +
FFALIGN(dest_len, 64), dest_len);
1081 if (rle_raw_size > 0 && rle_csize > 0 && rle_usize > 0) {
1082 unsigned long dest_len = rle_usize;
1089 if (!
td->rle_raw_data)
1092 if (uncompress(
td->rle_data, &dest_len, gb.
buffer, rle_csize) != Z_OK ||
1093 (dest_len != rle_usize))
1096 ret =
rle(
td->rle_raw_data,
td->rle_data, rle_usize, rle_raw_size);
1104 for (
int y = 0; y <
td->ysize; y += 8) {
1105 for (
int x = 0; x <
td->xsize; x += 8) {
1106 memset(
td->block, 0,
sizeof(
td->block));
1108 for (
int j = 0; j < 3; j++) {
1110 const int idx = (x >> 3) + (y >> 3) * dc_w + dc_w * dc_h * j;
1111 uint16_t *
dc = (uint16_t *)
td->dc_data;
1116 block[0] = dc_val.f;
1123 const int o =
s->nb_channels == 4;
1124 float *bo = ((
float *)
td->uncompressed_data) +
1125 y *
td->xsize *
s->nb_channels +
td->xsize * (o + 0) + x;
1126 float *go = ((
float *)
td->uncompressed_data) +
1127 y *
td->xsize *
s->nb_channels +
td->xsize * (o + 1) + x;
1128 float *ro = ((
float *)
td->uncompressed_data) +
1129 y *
td->xsize *
s->nb_channels +
td->xsize * (o + 2) + x;
1130 float *yb =
td->block[0];
1131 float *
ub =
td->block[1];
1132 float *vb =
td->block[2];
1134 for (
int yy = 0; yy < 8; yy++) {
1135 for (
int xx = 0; xx < 8; xx++) {
1136 const int idx = xx + yy * 8;
1138 convert(yb[idx],
ub[idx], vb[idx], &bo[xx], &go[xx], &ro[xx]);
1145 bo +=
td->xsize *
s->nb_channels;
1146 go +=
td->xsize *
s->nb_channels;
1147 ro +=
td->xsize *
s->nb_channels;
1153 if (
s->nb_channels < 4)
1156 for (
int y = 0; y <
td->ysize &&
td->rle_raw_data; y++) {
1157 uint32_t *ao = ((uint32_t *)
td->uncompressed_data) + y *
td->xsize *
s->nb_channels;
1158 uint8_t *ai0 =
td->rle_raw_data + y *
td->xsize;
1159 uint8_t *ai1 =
td->rle_raw_data + y *
td->xsize + rle_raw_size / 2;
1161 for (
int x = 0; x <
td->xsize; x++) {
1162 uint16_t ha = ai0[x] | (ai1[x] << 8);
1172 int jobnr,
int threadnr)
1177 const uint8_t *channel_buffer[4] = { 0 };
1178 const uint8_t *buf =
s->buf;
1179 uint64_t line_offset, uncompressed_size;
1183 uint64_t tile_x, tile_y, tile_level_x, tile_level_y;
1186 int bxmin = 0, axmax = 0, window_xoffset = 0;
1187 int window_xmin, window_xmax, window_ymin, window_ymax;
1188 int data_xoffset, data_yoffset, data_window_offset, xsize, ysize;
1189 int i, x, buf_size =
s->buf_size;
1190 int c, rgb_channel_count;
1191 float one_gamma = 1.0f /
s->gamma;
1195 line_offset =
AV_RL64(
s->gb.buffer + jobnr * 8);
1198 if (buf_size < 20 || line_offset > buf_size - 20)
1201 src = buf + line_offset + 20;
1202 if (
s->is_multipart)
1211 if (data_size <= 0 || data_size > buf_size - line_offset - 20)
1214 if (tile_level_x || tile_level_y) {
1219 if (tile_x &&
s->tile_attr.xSize + (int64_t)
FFMAX(
s->xmin, 0) >= INT_MAX / tile_x )
1221 if (tile_y &&
s->tile_attr.ySize + (int64_t)
FFMAX(
s->ymin, 0) >= INT_MAX / tile_y )
1224 line =
s->ymin +
s->tile_attr.ySize * tile_y;
1225 col =
s->tile_attr.xSize * tile_x;
1228 s->xmin + col < s->xmin ||
s->xmin + col >
s->xmax)
1231 td->ysize =
FFMIN(
s->tile_attr.ySize,
s->ydelta - tile_y *
s->tile_attr.ySize);
1232 td->xsize =
FFMIN(
s->tile_attr.xSize,
s->xdelta - tile_x *
s->tile_attr.xSize);
1234 if (
td->xsize * (uint64_t)
s->current_channel_offset > INT_MAX ||
1238 td->channel_line_size =
td->xsize *
s->current_channel_offset;
1239 uncompressed_size =
td->channel_line_size * (uint64_t)
td->ysize;
1241 if (buf_size < 8 || line_offset > buf_size - 8)
1244 src = buf + line_offset + 8;
1245 if (
s->is_multipart)
1253 if (data_size <= 0 || data_size > buf_size - line_offset - 8)
1256 td->ysize =
FFMIN(
s->scan_lines_per_block,
s->ymax -
line + 1);
1257 td->xsize =
s->xdelta;
1259 if (
td->xsize * (uint64_t)
s->current_channel_offset > INT_MAX ||
1263 td->channel_line_size =
td->xsize *
s->current_channel_offset;
1264 uncompressed_size =
td->channel_line_size * (uint64_t)
td->ysize;
1266 if ((
s->compression ==
EXR_RAW && (data_size != uncompressed_size ||
1267 line_offset > buf_size - uncompressed_size)) ||
1268 (
s->compression !=
EXR_RAW && (data_size > uncompressed_size ||
1269 line_offset > buf_size - data_size))) {
1278 xsize = window_xmax - window_xmin;
1279 ysize = window_ymax - window_ymin;
1282 if (xsize <= 0 || ysize <= 0)
1289 window_xoffset =
FFMAX(0,
s->xmin);
1291 bxmin = window_xoffset *
step;
1295 if(col +
td->xsize ==
s->xdelta) {
1296 window_xmax = avctx->
width;
1304 if (data_size < uncompressed_size || s->is_tile) {
1310 if (data_size < uncompressed_size) {
1312 &
td->uncompressed_size, uncompressed_size + 64);
1314 if (!
td->uncompressed_data)
1318 switch (
s->compression) {
1345 src =
td->uncompressed_data;
1351 data_window_offset = (data_yoffset *
td->channel_line_size) + data_xoffset;
1354 channel_buffer[0] =
src + (
td->xsize *
s->channel_offsets[0]) + data_window_offset;
1355 channel_buffer[1] =
src + (
td->xsize *
s->channel_offsets[1]) + data_window_offset;
1356 channel_buffer[2] =
src + (
td->xsize *
s->channel_offsets[2]) + data_window_offset;
1357 rgb_channel_count = 3;
1359 channel_buffer[0] =
src + (
td->xsize *
s->channel_offsets[1]) + data_window_offset;
1360 rgb_channel_count = 1;
1362 if (
s->channel_offsets[3] >= 0)
1363 channel_buffer[3] =
src + (
td->xsize *
s->channel_offsets[3]) + data_window_offset;
1367 int channel_count =
s->channel_offsets[3] >= 0 ? 4 : rgb_channel_count;
1369 channel_buffer[1] = channel_buffer[0];
1370 channel_buffer[2] = channel_buffer[0];
1373 for (
c = 0;
c < channel_count;
c++) {
1374 int plane =
s->desc->comp[
c].plane;
1375 ptr = p->
data[plane] + window_ymin * p->
linesize[plane] + (window_xmin * 4);
1377 for (
i = 0;
i < ysize;
i++, ptr += p->
linesize[plane]) {
1381 src = channel_buffer[
c];
1385 memset(ptr_x, 0, bxmin);
1386 ptr_x += window_xoffset;
1393 if (trc_func &&
c < 3) {
1394 for (x = 0; x < xsize; x++) {
1395 t.
i = bytestream_get_le32(&
src);
1396 t.
f = trc_func(t.
f);
1399 }
else if (one_gamma != 1.
f) {
1400 for (x = 0; x < xsize; x++) {
1401 t.
i = bytestream_get_le32(&
src);
1402 if (t.
f > 0.0f &&
c < 3)
1403 t.
f =
powf(t.
f, one_gamma);
1407 for (x = 0; x < xsize; x++) {
1408 t.
i = bytestream_get_le32(&
src);
1414 if (
c < 3 || !trc_func) {
1415 for (x = 0; x < xsize; x++) {
1416 *ptr_x++ =
s->gamma_table[bytestream_get_le16(&
src)];
1419 for (x = 0; x < xsize; x++) {
1427 memset(ptr_x, 0, axmax);
1428 channel_buffer[
c] +=
td->channel_line_size;
1434 ptr = p->
data[0] + window_ymin * p->
linesize[0] + (window_xmin *
s->desc->nb_components * 2);
1436 for (
i = 0;
i < ysize;
i++, ptr += p->
linesize[0]) {
1439 const uint8_t *
rgb[3];
1442 for (
c = 0;
c < rgb_channel_count;
c++) {
1443 rgb[
c] = channel_buffer[
c];
1446 if (channel_buffer[3])
1447 a = channel_buffer[3];
1449 ptr_x = (uint16_t *) ptr;
1452 memset(ptr_x, 0, bxmin);
1453 ptr_x += window_xoffset *
s->desc->nb_components;
1455 for (x = 0; x < xsize; x++) {
1456 for (
c = 0;
c < rgb_channel_count;
c++) {
1457 *ptr_x++ = bytestream_get_le32(&
rgb[
c]) >> 16;
1460 if (channel_buffer[3])
1461 *ptr_x++ = bytestream_get_le32(&
a) >> 16;
1465 memset(ptr_x, 0, axmax);
1467 channel_buffer[0] +=
td->channel_line_size;
1468 channel_buffer[1] +=
td->channel_line_size;
1469 channel_buffer[2] +=
td->channel_line_size;
1470 if (channel_buffer[3])
1471 channel_buffer[3] +=
td->channel_line_size;
1483 if (!bytestream2_peek_byte(gb))
1487 for (
int i = 0;
i < 2;
i++)
1488 while (bytestream2_get_byte(gb) != 0);
1508 const char *value_name,
1509 const char *value_type,
1510 unsigned int minimum_length)
1516 !strcmp(gb->
buffer, value_name)) {
1518 gb->
buffer += strlen(value_name) + 1;
1519 if (!strcmp(gb->
buffer, value_type)) {
1520 gb->
buffer += strlen(value_type) + 1;
1521 var_size = bytestream2_get_le32(gb);
1527 gb->
buffer -= strlen(value_name) + 1;
1529 "Unknown data type %s for header variable %s.\n",
1530 value_type, value_name);
1542 int layer_match = 0;
1544 int dup_channels = 0;
1546 s->current_channel_offset = 0;
1553 s->channel_offsets[0] = -1;
1554 s->channel_offsets[1] = -1;
1555 s->channel_offsets[2] = -1;
1556 s->channel_offsets[3] = -1;
1562 s->tile_attr.xSize = -1;
1563 s->tile_attr.ySize = -1;
1565 s->is_multipart = 0;
1567 s->current_part = 0;
1574 magic_number = bytestream2_get_le32(gb);
1575 if (magic_number != 20000630) {
1582 version = bytestream2_get_byte(gb);
1588 flags = bytestream2_get_le24(gb);
1593 s->is_multipart = 1;
1603 while (
s->is_multipart &&
s->current_part <
s->selected_part &&
1605 if (bytestream2_peek_byte(gb)) {
1609 if (!bytestream2_peek_byte(gb))
1616 if (!bytestream2_peek_byte(gb)) {
1617 if (!
s->is_multipart)
1620 if (
s->current_part ==
s->selected_part) {
1622 if (bytestream2_peek_byte(gb)) {
1626 if (!bytestream2_peek_byte(gb))
1631 if (!bytestream2_peek_byte(gb))
1637 "chlist", 38)) >= 0) {
1649 int channel_index = -1;
1652 if (strcmp(
s->layer,
"") != 0) {
1653 if (strncmp(ch_gb.
buffer,
s->layer, strlen(
s->layer)) == 0) {
1656 "Channel match layer : %s.\n", ch_gb.
buffer);
1657 ch_gb.
buffer += strlen(
s->layer);
1658 if (*ch_gb.
buffer ==
'.')
1663 "Channel doesn't match layer : %s.\n", ch_gb.
buffer);
1691 "Unsupported channel %.256s.\n", ch_gb.
buffer);
1697 bytestream2_get_byte(&ch_gb))
1706 current_pixel_type = bytestream2_get_le32(&ch_gb);
1709 current_pixel_type);
1715 xsub = bytestream2_get_le32(&ch_gb);
1716 ysub = bytestream2_get_le32(&ch_gb);
1718 if (xsub != 1 || ysub != 1) {
1720 "Subsampling %dx%d",
1726 if (channel_index >= 0 &&
s->channel_offsets[channel_index] == -1) {
1728 s->pixel_type != current_pixel_type) {
1730 "RGB channels not of the same depth.\n");
1734 s->pixel_type = current_pixel_type;
1735 s->channel_offsets[channel_index] =
s->current_channel_offset;
1736 }
else if (channel_index >= 0) {
1738 "Multiple channels with index %d.\n", channel_index);
1739 if (++dup_channels > 10) {
1751 channel = &
s->channels[
s->nb_channels - 1];
1752 channel->pixel_type = current_pixel_type;
1756 if (current_pixel_type ==
EXR_HALF) {
1757 s->current_channel_offset += 2;
1759 s->current_channel_offset += 4;
1766 if (
FFMIN3(
s->channel_offsets[0],
1767 s->channel_offsets[1],
1768 s->channel_offsets[2]) < 0) {
1769 if (
s->channel_offsets[0] < 0)
1771 if (
s->channel_offsets[1] < 0)
1773 if (
s->channel_offsets[2] < 0)
1785 int xmin, ymin, xmax, ymax;
1791 xmin = bytestream2_get_le32(gb);
1792 ymin = bytestream2_get_le32(gb);
1793 xmax = bytestream2_get_le32(gb);
1794 ymax = bytestream2_get_le32(gb);
1796 if (xmin > xmax || ymin > ymax ||
1797 ymax == INT_MAX || xmax == INT_MAX ||
1798 (
unsigned)xmax - xmin >= INT_MAX ||
1799 (
unsigned)ymax - ymin >= INT_MAX) {
1807 s->xdelta = (
s->xmax -
s->xmin) + 1;
1808 s->ydelta = (
s->ymax -
s->ymin) + 1;
1812 "box2i", 34)) >= 0) {
1820 sx = bytestream2_get_le32(gb);
1821 sy = bytestream2_get_le32(gb);
1822 dx = bytestream2_get_le32(gb);
1823 dy = bytestream2_get_le32(gb);
1825 s->w = (unsigned)dx - sx + 1;
1826 s->h = (unsigned)dy - sy + 1;
1830 "lineOrder", 25)) >= 0) {
1837 line_order = bytestream2_get_byte(gb);
1839 if (line_order > 2) {
1847 "float", 31)) >= 0) {
1853 s->sar = bytestream2_get_le32(gb);
1857 "compression", 29)) >= 0) {
1864 s->compression = bytestream2_get_byte(gb);
1868 "Found more than one compression attribute.\n");
1873 "tiledesc", 22)) >= 0) {
1878 "Found tile attribute and scanline flags. Exr will be interpreted as scanline.\n");
1880 s->tile_attr.xSize = bytestream2_get_le32(gb);
1881 s->tile_attr.ySize = bytestream2_get_le32(gb);
1883 tileLevel = bytestream2_get_byte(gb);
1884 s->tile_attr.level_mode = tileLevel & 0x0f;
1885 s->tile_attr.level_round = (tileLevel >> 4) & 0x0f;
1889 s->tile_attr.level_mode);
1896 s->tile_attr.level_round);
1903 "string", 1)) >= 0) {
1904 uint8_t
key[256] = { 0 };
1911 "rational", 33)) >= 0) {
1917 s->avctx->framerate.num = bytestream2_get_le32(gb);
1918 s->avctx->framerate.den = bytestream2_get_le32(gb);
1924 s->chunk_count = bytestream2_get_le32(gb);
1928 "string", 16)) >= 0) {
1929 uint8_t
key[256] = { 0 };
1932 if (strncmp(
"scanlineimage",
key, var_size) &&
1933 strncmp(
"tiledimage",
key, var_size))
1938 "preview", 16)) >= 0) {
1939 uint32_t pw = bytestream2_get_le32(gb);
1940 uint32_t ph = bytestream2_get_le32(gb);
1941 uint64_t psize = pw * ph;
1942 if (psize > INT64_MAX / 4)
1963 uint8_t
name[256] = { 0 };
1964 uint8_t
type[256] = { 0 };
1965 uint8_t
value[8192] = { 0 };
1969 bytestream2_peek_byte(gb) &&
i < 255) {
1970 name[
i++] = bytestream2_get_byte(gb);
1976 bytestream2_peek_byte(gb) &&
i < 255) {
1977 type[
i++] = bytestream2_get_byte(gb);
1980 size = bytestream2_get_le32(gb);
1985 if (!strcmp(
type,
"string"))
1997 if (
s->tile_attr.xSize < 1 ||
s->tile_attr.ySize < 1) {
2010 frame->metadata = metadata;
2027 int i, y,
ret, ymax;
2031 uint64_t start_offset_table;
2032 uint64_t start_next_scanline;
2041 s->current_channel_offset *= 2;
2042 for (
int i = 0;
i < 4;
i++)
2043 s->channel_offsets[
i] *= 2;
2046 switch (
s->pixel_type) {
2049 if (
s->channel_offsets[3] >= 0) {
2065 if (
s->channel_offsets[3] >= 0) {
2087 switch (
s->compression) {
2091 s->scan_lines_per_block = 1;
2095 s->scan_lines_per_block = 16;
2101 s->scan_lines_per_block = 32;
2104 s->scan_lines_per_block = 256;
2113 if (
s->xmin >
s->xmax ||
s->ymin >
s->ymax ||
2114 s->ydelta == 0xFFFFFFFF ||
s->xdelta == 0xFFFFFFFF) {
2132 planes =
s->desc->nb_components;
2133 out_line_size = avctx->
width * 4;
2136 out_line_size = avctx->
width * 2 *
s->desc->nb_components;
2140 nb_blocks = ((
s->xdelta +
s->tile_attr.xSize - 1) /
s->tile_attr.xSize) *
2141 ((
s->ydelta +
s->tile_attr.ySize - 1) /
s->tile_attr.ySize);
2143 nb_blocks = (
s->ydelta +
s->scan_lines_per_block - 1) /
2144 s->scan_lines_per_block;
2154 if (!
s->is_tile && bytestream2_peek_le64(gb) == 0) {
2160 if (!
s->offset_table)
2164 start_next_scanline = start_offset_table + nb_blocks * 8;
2167 for (y = 0; y < nb_blocks; y++) {
2169 bytestream2_put_le64(&offset_table_writer, start_next_scanline);
2173 start_next_scanline += (bytestream2_get_le32(gb) + 8);
2179 s->buf = avpkt->
data;
2180 s->buf_size = avpkt->
size;
2184 ptr = picture->
data[
i];
2185 for (y = 0; y <
FFMIN(
s->ymin,
s->h); y++) {
2186 memset(ptr, 0, out_line_size);
2191 s->picture = picture;
2195 ymax =
FFMAX(0,
s->ymax + 1);
2197 if (ymax < avctx->
height)
2200 for (y = ymax; y < avctx->
height; y++) {
2201 memset(ptr, 0, out_line_size);
2217 float one_gamma = 1.0
f /
s->gamma;
2232 for (
i = 0;
i < 65536; ++
i) {
2234 t.
f = trc_func(t.
f);
2235 s->gamma_table[
i] = t;
2238 if (one_gamma > 0.9999
f && one_gamma < 1.0001
f) {
2239 for (
i = 0;
i < 65536; ++
i) {
2243 for (
i = 0;
i < 65536; ++
i) {
2247 s->gamma_table[
i] = t;
2249 t.
f =
powf(t.
f, one_gamma);
2250 s->gamma_table[
i] = t;
2258 if (!
s->thread_data)
2290 #define OFFSET(x) offsetof(EXRContext, x)
2291 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
2293 {
"layer",
"Set the decoding layer",
OFFSET(layer),
2295 {
"part",
"Set the decoding part",
OFFSET(selected_part),
2297 {
"gamma",
"Set the float gamma value when decoding",
OFFSET(gamma),
2301 {
"apply_trc",
"color transfer characteristics to apply to EXR linear input",
OFFSET(apply_trc_type),
2303 {
"bt709",
"BT.709", 0,
2305 {
"gamma",
"gamma", 0,
2307 {
"gamma22",
"BT.470 M", 0,
2309 {
"gamma28",
"BT.470 BG", 0,
2311 {
"smpte170m",
"SMPTE 170 M", 0,
2313 {
"smpte240m",
"SMPTE 240 M", 0,
2315 {
"linear",
"Linear", 0,
2319 {
"log_sqrt",
"Log square root", 0,
2321 {
"iec61966_2_4",
"IEC 61966-2-4", 0,
2323 {
"bt1361",
"BT.1361", 0,
2325 {
"iec61966_2_1",
"IEC 61966-2-1", 0,
2327 {
"bt2020_10bit",
"BT.2020 - 10 bit", 0,
2329 {
"bt2020_12bit",
"BT.2020 - 12 bit", 0,
2331 {
"smpte2084",
"SMPTE ST 2084", 0,
2333 {
"smpte428_1",
"SMPTE ST 428-1", 0,
enum ExrTileLevelRound level_round
#define AV_LOG_WARNING
Something somehow does not look correct.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
static const struct @346 planes[]
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
Color Transfer Characteristic.
#define u(width, name, range_min, range_max)
uint8_t * uncompressed_data
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
static int decode_header(EXRContext *s, AVFrame *frame)
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
static int get_bits_count(const GetBitContext *s)
#define AV_PIX_FMT_FLAG_FLOAT
The pixel format contains IEEE-754 floating point values.
static int decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_frame, AVPacket *avpkt)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
@ AVCOL_TRC_NB
Not part of ABI.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static av_cold int decode_init(AVCodecContext *avctx)
static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut)
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
static int piz_uncompress(const EXRContext *s, const uint8_t *src, int ssize, int dsize, EXRThreadData *td)
static const AVOption options[]
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static int b44_uncompress(const EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
static int rle(uint8_t *dst, const uint8_t *src, int compressed_size, int uncompressed_size)
static void convert(float y, float u, float v, float *b, float *g, float *r)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
av_csp_trc_function av_csp_trc_func_from_id(enum AVColorTransferCharacteristic trc)
Determine the function needed to apply the given AVColorTransferCharacteristic to linear input.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
EXRTileAttribute tile_attr
AVCodec p
The public AVCodec.
static void apply_lut(const uint16_t *lut, uint16_t *dst, int dsize)
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
enum AVDiscard skip_frame
Skip decoding for selected frames.
static av_always_inline float av_int2float(uint32_t i)
Reinterpret a 32-bit integer as a float.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_always_inline float scale(float x, float s)
#define AV_PIX_FMT_GRAY16
@ AVCOL_TRC_LOG_SQRT
"Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
static __device__ float fabsf(float a)
const FFCodec ff_exr_decoder
static int huf_build_dec_table(const EXRContext *s, EXRThreadData *td, int im, int iM)
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
static float to_linear(float x, float scale)
static av_cold int decode_end(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
enum ExrCompr compression
#define FF_CODEC_DECODE_CB(func)
static int check_header_variable(EXRContext *s, const char *value_name, const char *value_type, unsigned int minimum_length)
Check if the variable name corresponds to its data type.
static void huf_canonical_code_table(uint64_t *freq)
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
int current_channel_offset
int(* init)(AVBSFContext *ctx)
static int decode_block(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
enum ExrPixelType pixel_type
int64_t max_pixels
The number of pixels per image to maximally accept.
#define SHORTEST_LONG_RUN
static void skip_header_chunk(EXRContext *s)
#define AV_PIX_FMT_GRAYF32
#define CODEC_LONG_NAME(str)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
const AVPixFmtDescriptor * desc
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
#define av_realloc_f(p, o, n)
#define AV_PIX_FMT_RGBA64
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static int bias(int x, int c)
#define LONG_ZEROCODE_RUN
#define SHORT_ZEROCODE_RUN
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
av_cold void ff_exrdsp_init(ExrDSPContext *c)
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
static void unpack_14(const uint8_t b[14], uint16_t s[16])
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
enum ExrPixelType pixel_type
enum ExrTileLevelMode level_mode
EXRThreadData * thread_data
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static void wdec14(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
static void wav_decode(uint16_t *in, int nx, int ox, int ny, int oy, uint16_t mx)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
static int dwa_uncompress(const EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
static int shift(int a, int b)
@ AVCOL_TRC_LOG
"Logarithmic transfer characteristic (100:1 range)"
#define bytestream2_get_ne16
#define AV_PIX_FMT_GBRPF32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static int ac_uncompress(const EXRContext *s, GetByteContext *gb, float *block)
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static void idct_1d(float *blk, int step)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
enum AVColorTransferCharacteristic apply_trc_type
static void unpack_3(const uint8_t b[3], uint16_t s[16])
#define AV_LOG_INFO
Standard information.
@ AVCOL_TRC_BT709
also ITU-R BT1361
static void dct_inverse(float *block)
double(* av_csp_trc_function)(double)
Function pointer representing a double -> double transfer function that performs an EOTF transfer inv...
Half2FloatTables h2f_tables
#define i(width, name, range_min, range_max)
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
#define av_malloc_array(a, b)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
const char * name
Name of the codec implementation.
static int huf_unpack_enc_table(GetByteContext *gb, int32_t im, int32_t iM, uint64_t *freq)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_calloc(size_t nmemb, size_t size)
void ff_free_vlc(VLC *vlc)
const uint8_t ff_zigzag_direct[64]
static int huf_decode(VLC *vlc, GetByteContext *gb, int nbits, int run_sym, int no, uint16_t *out)
static uint32_t half2float(uint16_t h, const Half2FloatTables *t)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
void ff_init_half2float_tables(Half2FloatTables *t)
static int rle_uncompress(const EXRContext *ctx, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
union av_intfloat32 gamma_table[65536]
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
main external API structure.
static int pxr24_uncompress(const EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
static void wdec16(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b)
#define AV_PIX_FMT_GBRAPF32
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
#define avpriv_request_sample(...)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static int huf_uncompress(const EXRContext *s, EXRThreadData *td, GetByteContext *gb, uint16_t *dst, int dst_size)
static const int16_t alpha[]
This structure stores compressed data.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
static int zip_uncompress(const EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td)
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static const AVClass exr_class
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.