FFmpeg
tiff.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Konstantin Shishkov
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * TIFF image decoder
24  * @author Konstantin Shishkov
25  */
26 
27 #include "config.h"
28 #if CONFIG_ZLIB
29 #include <zlib.h>
30 #endif
31 #if CONFIG_LZMA
32 #define LZMA_API_STATIC
33 #include <lzma.h>
34 #endif
35 
36 #include <float.h>
37 
38 #include "libavutil/attributes.h"
40 #include "libavutil/avstring.h"
41 #include "libavutil/error.h"
42 #include "libavutil/intreadwrite.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/reverse.h"
46 #include "avcodec.h"
47 #include "bytestream.h"
48 #include "codec_internal.h"
49 #include "decode.h"
50 #include "faxcompr.h"
51 #include "lzw.h"
52 #include "tiff.h"
53 #include "tiff_common.h"
54 #include "tiff_data.h"
55 #include "mjpegdec.h"
56 #include "thread.h"
57 #include "get_bits.h"
58 
59 typedef struct TiffContext {
60  AVClass *class;
63 
64  /* JPEG decoding for DNG */
65  AVCodecContext *avctx_mjpeg; // wrapper context for MJPEG
66  AVPacket *jpkt; // encoded JPEG tile
67  AVFrame *jpgframe; // decoded JPEG tile
68 
70  uint16_t get_page;
72 
74  int width, height;
75  unsigned int bpp, bppcount;
76  uint32_t palette[256];
78  int le;
81  int planar;
82  int subsampling[2];
83  int fax_opts;
84  int predictor;
86  uint32_t res[4];
88  unsigned last_tag;
89 
90  int is_bayer;
92  uint8_t pattern[4];
93 
94  float analog_balance[4];
95  float as_shot_neutral[4];
96  float as_shot_white[4];
97  float color_matrix[3][4];
98  float camera_calibration[4][4];
99  float premultiply[4];
100  float black_level[4];
101 
102  unsigned white_level;
103  uint16_t dng_lut[65536];
104 
105  uint32_t sub_ifd;
106  uint16_t cur_page;
107 
109  int sot;
112 
113  /* Tile support */
114  int is_tiled;
117 
118  int is_jpeg;
119 
120  uint8_t *deinvert_buf;
122  uint8_t *yuv_line;
123  unsigned int yuv_line_size;
124 
127 } TiffContext;
128 
129 static const float d65_white[3] = { 0.950456f, 1.f, 1.088754f };
130 
131 static void tiff_set_type(TiffContext *s, enum TiffType tiff_type) {
132  if (s->tiff_type < tiff_type) // Prioritize higher-valued entries
133  s->tiff_type = tiff_type;
134 }
135 
136 static void free_geotags(TiffContext *const s)
137 {
138  for (int i = 0; i < s->geotag_count; i++)
139  av_freep(&s->geotags[i].val);
140  av_freep(&s->geotags);
141  s->geotag_count = 0;
142 }
143 
144 static const char *get_geokey_name(int key)
145 {
146 #define RET_GEOKEY_STR(TYPE, array)\
147  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
148  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
149  return tiff_##array##_name_type_string + tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].offset;
150 
151  RET_GEOKEY_STR(VERT, vert);
152  RET_GEOKEY_STR(PROJ, proj);
153  RET_GEOKEY_STR(GEOG, geog);
154  RET_GEOKEY_STR(CONF, conf);
155 
156  return NULL;
157 }
158 
159 static int get_geokey_type(int key)
160 {
161 #define RET_GEOKEY_TYPE(TYPE, array)\
162  if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
163  key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_name_type_map))\
164  return tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].type;
165  RET_GEOKEY_TYPE(VERT, vert);
166  RET_GEOKEY_TYPE(PROJ, proj);
167  RET_GEOKEY_TYPE(GEOG, geog);
168  RET_GEOKEY_TYPE(CONF, conf);
169 
170  return AVERROR_INVALIDDATA;
171 }
172 
173 static int cmp_id_key(const void *id, const void *k)
174 {
175  return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
176 }
177 
178 static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
179 {
180  const TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
181  if(r)
182  return r->name;
183 
184  return NULL;
185 }
186 
187 static const char *get_geokey_val(int key, uint16_t val)
188 {
190  return "undefined";
192  return "User-Defined";
193 
194 #define RET_GEOKEY_VAL(TYPE, array)\
195  if (val >= TIFF_##TYPE##_OFFSET &&\
196  val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(tiff_##array##_codes))\
197  return tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET];
198 
199  switch (key) {
201  RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
202  break;
204  RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
205  break;
209  RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
210  break;
213  RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
214  break;
216  RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
217  RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
218  break;
220  RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
221  RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
222  break;
224  RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
225  break;
227  RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
228  break;
234  RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
235  break;
237  RET_GEOKEY_VAL(VERT_CS, vert_cs);
238  RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
239  break;
240 
241  }
242 
243  return NULL;
244 }
245 
246 static char *doubles2str(double *dp, int count, const char *sep)
247 {
248  int i;
249  char *ap, *ap0;
250  uint64_t component_len;
251  if (!sep) sep = ", ";
252  component_len = 24LL + strlen(sep);
253  if (count >= (INT_MAX - 1)/component_len)
254  return NULL;
255  ap = av_malloc(component_len * count + 1);
256  if (!ap)
257  return NULL;
258  ap0 = ap;
259  ap[0] = '\0';
260  for (i = 0; i < count; i++) {
261  unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
262  if(l >= component_len) {
263  av_free(ap0);
264  return NULL;
265  }
266  ap += l;
267  }
268  ap0[strlen(ap0) - strlen(sep)] = '\0';
269  return ap0;
270 }
271 
272 static int add_metadata(int count, int type,
273  const char *name, const char *sep, TiffContext *s, AVFrame *frame)
274 {
275  switch(type) {
276  case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
277  case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
278  case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
279  default : return AVERROR_INVALIDDATA;
280  };
281 }
282 
283 /**
284  * Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
285  */
286 static uint16_t av_always_inline dng_process_color16(uint16_t value,
287  const uint16_t *lut,
288  float black_level,
289  float scale_factor)
290 {
291  float value_norm;
292 
293  // Lookup table lookup
294  value = lut[value];
295 
296  // Black level subtraction
297  // Color scaling
298  value_norm = ((float)value - black_level) * scale_factor;
299 
300  value = av_clip_uint16(lrintf(value_norm));
301 
302  return value;
303 }
304 
305 static uint16_t av_always_inline dng_process_color8(uint16_t value,
306  const uint16_t *lut,
307  float black_level,
308  float scale_factor)
309 {
310  return dng_process_color16(value, lut, black_level, scale_factor) >> 8;
311 }
312 
313 static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride,
314  const uint8_t *src, int src_stride, int width, int height,
315  int is_single_comp, int is_u16, int odd_line)
316 {
317  float scale_factor[4];
318  int line, col;
319 
320  if (s->is_bayer) {
321  for (int i = 0; i < 4; i++)
322  scale_factor[i] = s->premultiply[s->pattern[i]] * 65535.f / (s->white_level - s->black_level[i]);
323  } else {
324  for (int i = 0; i < 4; i++)
325  scale_factor[i] = s->premultiply[ i ] * 65535.f / (s->white_level - s->black_level[i]);
326  }
327 
328  if (is_single_comp) {
329  if (!is_u16)
330  return; /* <= 8bpp unsupported */
331 
332  /* Image is double the width and half the height we need, each row comprises 2 rows of the output
333  (split vertically in the middle). */
334  for (line = 0; line < height / 2; line++) {
335  uint16_t *dst_u16 = (uint16_t *)dst;
336  const uint16_t *src_u16 = (const uint16_t *)src;
337 
338  /* Blit first half of input row row to initial row of output */
339  for (col = 0; col < width; col++)
340  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[col&1], scale_factor[col&1]);
341 
342  /* Advance the destination pointer by a row (source pointer remains in the same place) */
343  dst += dst_stride * sizeof(uint16_t);
344  dst_u16 = (uint16_t *)dst;
345 
346  /* Blit second half of input row row to next row of output */
347  for (col = 0; col < width; col++)
348  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut, s->black_level[(col&1) + 2], scale_factor[(col&1) + 2]);
349 
350  dst += dst_stride * sizeof(uint16_t);
351  src += src_stride * sizeof(uint16_t);
352  }
353  } else {
354  /* Input and output image are the same size and the MJpeg decoder has done per-component
355  deinterleaving, so blitting here is straightforward. */
356  if (is_u16) {
357  for (line = 0; line < height; line++) {
358  uint16_t *dst_u16 = (uint16_t *)dst;
359  const uint16_t *src_u16 = (const uint16_t *)src;
360 
361  for (col = 0; col < width; col++)
362  *dst_u16++ = dng_process_color16(*src_u16++, s->dng_lut,
363  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
364  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
365 
366  dst += dst_stride * sizeof(uint16_t);
367  src += src_stride * sizeof(uint16_t);
368  }
369  } else {
370  for (line = 0; line < height; line++) {
371  uint8_t *dst_u8 = dst;
372  const uint8_t *src_u8 = src;
373 
374  for (col = 0; col < width; col++)
375  *dst_u8++ = dng_process_color8(*src_u8++, s->dng_lut,
376  s->black_level[(col&1) + 2 * ((line&1) + odd_line)],
377  scale_factor[(col&1) + 2 * ((line&1) + odd_line)]);
378 
379  dst += dst_stride;
380  src += src_stride;
381  }
382  }
383  }
384 }
385 
387  unsigned int bpp, uint8_t* dst,
388  int usePtr, const uint8_t *src,
389  uint8_t c, int width, int offset)
390 {
391  switch (bpp) {
392  case 1:
393  while (--width >= 0) {
394  dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
395  dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
396  dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
397  dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
398  dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
399  dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
400  dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
401  dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
402  }
403  break;
404  case 2:
405  while (--width >= 0) {
406  dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
407  dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
408  dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
409  dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
410  }
411  break;
412  case 4:
413  while (--width >= 0) {
414  dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
415  dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
416  }
417  break;
418  case 10:
419  case 12:
420  case 14: {
421  uint16_t *dst16 = (uint16_t *)dst;
422  int is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
423  uint8_t shift = is_dng ? 0 : 16 - bpp;
424  GetBitContext gb;
425 
426  av_unused int ret = init_get_bits8(&gb, src, width);
427  av_assert1(ret >= 0);
428  for (int i = 0; i < s->width; i++) {
429  dst16[i] = get_bits(&gb, bpp) << shift;
430  }
431  }
432  break;
433  default:
434  if (usePtr) {
435  memcpy(dst + offset, src, width);
436  } else {
437  memset(dst + offset, c, width);
438  }
439  }
440 }
441 
442 static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
443 {
444  int i;
445 
446  av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
447  if (!s->deinvert_buf)
448  return AVERROR(ENOMEM);
449  for (i = 0; i < size; i++)
450  s->deinvert_buf[i] = ff_reverse[src[i]];
451 
452  return 0;
453 }
454 
455 static void unpack_gray(TiffContext *s, AVFrame *p,
456  const uint8_t *src, int lnum, int width, int bpp)
457 {
458  GetBitContext gb;
459  uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
460 
461  av_unused int ret = init_get_bits8(&gb, src, width);
462  av_assert1(ret >= 0);
463 
464  for (int i = 0; i < s->width; i++) {
465  dst[i] = get_bits(&gb, bpp);
466  }
467 }
468 
469 static void unpack_yuv(TiffContext *s, AVFrame *p,
470  const uint8_t *src, int lnum)
471 {
472  int i, j, k;
473  int w = (s->width - 1) / s->subsampling[0] + 1;
474  uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
475  uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
476  if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
477  for (i = 0; i < w; i++) {
478  for (j = 0; j < s->subsampling[1]; j++)
479  for (k = 0; k < s->subsampling[0]; k++)
480  p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
481  FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
482  *pu++ = *src++;
483  *pv++ = *src++;
484  }
485  }else{
486  for (i = 0; i < w; i++) {
487  for (j = 0; j < s->subsampling[1]; j++)
488  for (k = 0; k < s->subsampling[0]; k++)
489  p->data[0][(lnum + j) * p->linesize[0] +
490  i * s->subsampling[0] + k] = *src++;
491  *pu++ = *src++;
492  *pv++ = *src++;
493  }
494  }
495 }
496 
497 #if CONFIG_ZLIB
498 static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
499  int size)
500 {
501  z_stream zstream = { 0 };
502  int zret;
503 
504  zstream.next_in = src;
505  zstream.avail_in = size;
506  zstream.next_out = dst;
507  zstream.avail_out = *len;
508  zret = inflateInit(&zstream);
509  if (zret != Z_OK) {
510  av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
511  return zret;
512  }
513  zret = inflate(&zstream, Z_SYNC_FLUSH);
514  inflateEnd(&zstream);
515  *len = zstream.total_out;
516  return zret == Z_STREAM_END ? Z_OK : zret;
517 }
518 
519 static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
520  const uint8_t *src, int size, int width, int lines,
521  int strip_start, int is_yuv)
522 {
523  uint8_t *zbuf;
524  unsigned long outlen;
525  int ret, line;
526  outlen = width * lines;
527  zbuf = av_malloc(outlen);
528  if (!zbuf)
529  return AVERROR(ENOMEM);
530  if (s->fill_order) {
531  if ((ret = deinvert_buffer(s, src, size)) < 0) {
532  av_free(zbuf);
533  return ret;
534  }
535  src = s->deinvert_buf;
536  }
537  ret = tiff_uncompress(zbuf, &outlen, src, size);
538  if (ret != Z_OK) {
539  av_log(s->avctx, AV_LOG_ERROR,
540  "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
541  (unsigned long)width * lines, ret);
542  av_free(zbuf);
543  return AVERROR_UNKNOWN;
544  }
545  src = zbuf;
546  for (line = 0; line < lines; line++) {
547  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
548  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
549  } else {
550  memcpy(dst, src, width);
551  }
552  if (is_yuv) {
553  unpack_yuv(s, p, dst, strip_start + line);
554  line += s->subsampling[1] - 1;
555  }
556  dst += stride;
557  src += width;
558  }
559  av_free(zbuf);
560  return 0;
561 }
562 #endif
563 
564 #if CONFIG_LZMA
565 static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
566  int size)
567 {
568  lzma_stream stream = LZMA_STREAM_INIT;
569  lzma_ret ret;
570 
571  stream.next_in = src;
572  stream.avail_in = size;
573  stream.next_out = dst;
574  stream.avail_out = *len;
575  ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
576  if (ret != LZMA_OK) {
577  av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
578  return ret;
579  }
580  ret = lzma_code(&stream, LZMA_RUN);
581  lzma_end(&stream);
582  *len = stream.total_out;
583  return ret == LZMA_STREAM_END ? LZMA_OK : ret;
584 }
585 
586 static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
587  const uint8_t *src, int size, int width, int lines,
588  int strip_start, int is_yuv)
589 {
590  uint64_t outlen = width * (uint64_t)lines;
591  int ret, line;
592  uint8_t *buf = av_malloc(outlen);
593  if (!buf)
594  return AVERROR(ENOMEM);
595  if (s->fill_order) {
596  if ((ret = deinvert_buffer(s, src, size)) < 0) {
597  av_free(buf);
598  return ret;
599  }
600  src = s->deinvert_buf;
601  }
602  ret = tiff_uncompress_lzma(buf, &outlen, src, size);
603  if (ret != LZMA_OK) {
604  av_log(s->avctx, AV_LOG_ERROR,
605  "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
606  (uint64_t)width * lines, ret);
607  av_free(buf);
608  return AVERROR_UNKNOWN;
609  }
610  src = buf;
611  for (line = 0; line < lines; line++) {
612  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
613  horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
614  } else {
615  memcpy(dst, src, width);
616  }
617  if (is_yuv) {
618  unpack_yuv(s, p, dst, strip_start + line);
619  line += s->subsampling[1] - 1;
620  }
621  dst += stride;
622  src += width;
623  }
624  av_free(buf);
625  return 0;
626 }
627 #endif
628 
629 static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
630  const uint8_t *src, int size, int width, int lines)
631 {
632  int line;
633  int ret;
634 
635  if (s->fill_order) {
636  if ((ret = deinvert_buffer(s, src, size)) < 0)
637  return ret;
638  src = s->deinvert_buf;
639  }
640  ret = ff_ccitt_unpack(s->avctx, src, size, dst, lines, stride,
641  s->compr, s->fax_opts);
642  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
643  for (line = 0; line < lines; line++) {
644  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
645  dst += stride;
646  }
647  return ret;
648 }
649 
651  int tile_byte_count, int dst_x, int dst_y, int w, int h)
652 {
653  TiffContext *s = avctx->priv_data;
654  uint8_t *dst_data, *src_data;
655  uint32_t dst_offset; /* offset from dst buffer in pixels */
656  int is_single_comp, is_u16, pixel_size;
657  int ret;
658 
659  if (tile_byte_count < 0 || tile_byte_count > bytestream2_get_bytes_left(&s->gb))
660  return AVERROR_INVALIDDATA;
661 
662  /* Prepare a packet and send to the MJPEG decoder */
663  av_packet_unref(s->jpkt);
664  s->jpkt->data = (uint8_t*)s->gb.buffer;
665  s->jpkt->size = tile_byte_count;
666 
667  if (s->is_bayer) {
668  MJpegDecodeContext *mjpegdecctx = s->avctx_mjpeg->priv_data;
669  /* We have to set this information here, there is no way to know if a given JPEG is a DNG-embedded
670  image or not from its own data (and we need that information when decoding it). */
671  mjpegdecctx->bayer = 1;
672  }
673 
674  ret = avcodec_send_packet(s->avctx_mjpeg, s->jpkt);
675  if (ret < 0) {
676  av_log(avctx, AV_LOG_ERROR, "Error submitting a packet for decoding\n");
677  return ret;
678  }
679 
680  ret = avcodec_receive_frame(s->avctx_mjpeg, s->jpgframe);
681  if (ret < 0) {
682  av_log(avctx, AV_LOG_ERROR, "JPEG decoding error: %s.\n", av_err2str(ret));
683 
684  /* Normally skip, error if explode */
685  if (avctx->err_recognition & AV_EF_EXPLODE)
686  return AVERROR_INVALIDDATA;
687  else
688  return 0;
689  }
690 
691  is_u16 = (s->bpp > 8);
692 
693  /* Copy the outputted tile's pixels from 'jpgframe' to 'frame' (final buffer) */
694 
695  if (s->jpgframe->width != s->avctx_mjpeg->width ||
696  s->jpgframe->height != s->avctx_mjpeg->height ||
697  s->jpgframe->format != s->avctx_mjpeg->pix_fmt)
698  return AVERROR_INVALIDDATA;
699 
700  /* See dng_blit for explanation */
701  if (s->avctx_mjpeg->width == w * 2 &&
702  s->avctx_mjpeg->height == h / 2 &&
703  s->avctx_mjpeg->pix_fmt == AV_PIX_FMT_GRAY16LE) {
704  is_single_comp = 1;
705  } else if (s->avctx_mjpeg->width >= w &&
706  s->avctx_mjpeg->height >= h &&
707  s->avctx_mjpeg->pix_fmt == (is_u16 ? AV_PIX_FMT_GRAY16 : AV_PIX_FMT_GRAY8)
708  ) {
709  is_single_comp = 0;
710  } else
711  return AVERROR_INVALIDDATA;
712 
713  pixel_size = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
714 
715  if (is_single_comp && !is_u16) {
716  av_log(s->avctx, AV_LOG_ERROR, "DNGs with bpp <= 8 and 1 component are unsupported\n");
717  av_frame_unref(s->jpgframe);
718  return AVERROR_PATCHWELCOME;
719  }
720 
721  dst_offset = dst_x + frame->linesize[0] * dst_y / pixel_size;
722  dst_data = frame->data[0] + dst_offset * pixel_size;
723  src_data = s->jpgframe->data[0];
724 
725  dng_blit(s,
726  dst_data,
727  frame->linesize[0] / pixel_size,
728  src_data,
729  s->jpgframe->linesize[0] / pixel_size,
730  w,
731  h,
732  is_single_comp,
733  is_u16, 0);
734 
735  av_frame_unref(s->jpgframe);
736 
737  return 0;
738 }
739 
740 static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
741  const uint8_t *src, int size, int strip_start, int lines)
742 {
743  PutByteContext pb;
744  int c, line, pixels, code, ret;
745  const uint8_t *ssrc = src;
746  int width = ((s->width * s->bpp) + 7) >> 3;
748  int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
749  (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
750  desc->nb_components >= 3;
751  int is_dng;
752 
753  if (s->planar)
754  width /= s->bppcount;
755 
756  if (size <= 0)
757  return AVERROR_INVALIDDATA;
758 
759  if (is_yuv) {
760  int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
761  s->subsampling[0] * s->subsampling[1] + 7) >> 3;
762  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
763  if (s->yuv_line == NULL) {
764  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
765  return AVERROR(ENOMEM);
766  }
767  dst = s->yuv_line;
768  stride = 0;
769 
770  width = (s->width - 1) / s->subsampling[0] + 1;
771  width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
772  av_assert0(width <= bytes_per_row);
773  av_assert0(s->bpp == 24);
774  }
775  if (s->is_bayer) {
776  av_assert0(width == (s->bpp * s->width + 7) >> 3);
777  }
778  av_assert0(!(s->is_bayer && is_yuv));
779  if (p->format == AV_PIX_FMT_GRAY12) {
780  av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
781  if (s->yuv_line == NULL) {
782  av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
783  return AVERROR(ENOMEM);
784  }
785  dst = s->yuv_line;
786  stride = 0;
787  }
788 
789  if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
790 #if CONFIG_ZLIB
791  return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
792  strip_start, is_yuv);
793 #else
794  av_log(s->avctx, AV_LOG_ERROR,
795  "zlib support not enabled, "
796  "deflate compression not supported\n");
797  return AVERROR(ENOSYS);
798 #endif
799  }
800  if (s->compr == TIFF_LZMA) {
801 #if CONFIG_LZMA
802  return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
803  strip_start, is_yuv);
804 #else
805  av_log(s->avctx, AV_LOG_ERROR,
806  "LZMA support not enabled\n");
807  return AVERROR(ENOSYS);
808 #endif
809  }
810  if (s->compr == TIFF_LZW) {
811  if (s->fill_order) {
812  if ((ret = deinvert_buffer(s, src, size)) < 0)
813  return ret;
814  ssrc = src = s->deinvert_buf;
815  }
816  if (size > 1 && !src[0] && (src[1]&1)) {
817  av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
818  }
819  if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
820  av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
821  return ret;
822  }
823  for (line = 0; line < lines; line++) {
824  pixels = ff_lzw_decode(s->lzw, dst, width);
825  if (pixels < width) {
826  av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
827  pixels, width);
828  return AVERROR_INVALIDDATA;
829  }
830  if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
831  horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
832  if (is_yuv) {
833  unpack_yuv(s, p, dst, strip_start + line);
834  line += s->subsampling[1] - 1;
835  } else if (p->format == AV_PIX_FMT_GRAY12) {
836  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
837  }
838  dst += stride;
839  }
840  return 0;
841  }
842  if (s->compr == TIFF_CCITT_RLE ||
843  s->compr == TIFF_G3 ||
844  s->compr == TIFF_G4) {
845  if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
846  return AVERROR_INVALIDDATA;
847 
848  return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
849  }
850 
851  bytestream2_init(&s->gb, src, size);
852  bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
853 
854  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
855 
856  /* Decode JPEG-encoded DNGs with strips */
857  if (s->compr == TIFF_NEWJPEG && is_dng) {
858  if (s->strips > 1) {
859  av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n");
860  return AVERROR_PATCHWELCOME;
861  }
862  if (!s->is_bayer)
863  return AVERROR_PATCHWELCOME;
864  if ((ret = dng_decode_jpeg(s->avctx, p, s->stripsize, 0, 0, s->width, s->height)) < 0)
865  return ret;
866  return 0;
867  }
868 
869  if (is_dng && stride == 0)
870  return AVERROR_INVALIDDATA;
871 
872  for (line = 0; line < lines; line++) {
873  if (src - ssrc > size) {
874  av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
875  return AVERROR_INVALIDDATA;
876  }
877 
878  if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
879  break;
880  bytestream2_seek_p(&pb, stride * line, SEEK_SET);
881  switch (s->compr) {
882  case TIFF_RAW:
883  if (ssrc + size - src < width)
884  return AVERROR_INVALIDDATA;
885 
886  if (!s->fill_order) {
887  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
888  dst, 1, src, 0, width, 0);
889  } else {
890  int i;
891  for (i = 0; i < width; i++)
892  dst[i] = ff_reverse[src[i]];
893  }
894 
895  /* Color processing for DNG images with uncompressed strips (non-tiled) */
896  if (is_dng) {
897  int is_u16, pixel_size_bytes, pixel_size_bits, elements;
898 
899  is_u16 = (s->bpp / s->bppcount > 8);
900  pixel_size_bits = (is_u16 ? 16 : 8);
901  pixel_size_bytes = (is_u16 ? sizeof(uint16_t) : sizeof(uint8_t));
902 
903  elements = width / pixel_size_bytes * pixel_size_bits / s->bpp * s->bppcount; // need to account for [1, 16] bpp
904  av_assert0 (elements * pixel_size_bytes <= FFABS(stride));
905  dng_blit(s,
906  dst,
907  0, // no stride, only 1 line
908  dst,
909  0, // no stride, only 1 line
910  elements,
911  1,
912  0, // single-component variation is only preset in JPEG-encoded DNGs
913  is_u16,
914  (line + strip_start)&1);
915  }
916 
917  src += width;
918  break;
919  case TIFF_PACKBITS:
920  for (pixels = 0; pixels < width;) {
921  if (ssrc + size - src < 2) {
922  av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
923  return AVERROR_INVALIDDATA;
924  }
925  code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
926  if (code >= 0) {
927  code++;
928  if (pixels + code > width ||
929  ssrc + size - src < code) {
930  av_log(s->avctx, AV_LOG_ERROR,
931  "Copy went out of bounds\n");
932  return AVERROR_INVALIDDATA;
933  }
934  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
935  dst, 1, src, 0, code, pixels);
936  src += code;
937  pixels += code;
938  } else if (code != -128) { // -127..-1
939  code = (-code) + 1;
940  if (pixels + code > width) {
941  av_log(s->avctx, AV_LOG_ERROR,
942  "Run went out of bounds\n");
943  return AVERROR_INVALIDDATA;
944  }
945  c = *src++;
946  horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
947  dst, 0, NULL, c, code, pixels);
948  pixels += code;
949  }
950  }
951  if (s->fill_order) {
952  int i;
953  for (i = 0; i < width; i++)
954  dst[i] = ff_reverse[dst[i]];
955  }
956  break;
957  }
958  if (is_yuv) {
959  unpack_yuv(s, p, dst, strip_start + line);
960  line += s->subsampling[1] - 1;
961  } else if (p->format == AV_PIX_FMT_GRAY12) {
962  unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
963  }
964  dst += stride;
965  }
966  return 0;
967 }
968 
970  const AVPacket *avpkt)
971 {
972  TiffContext *s = avctx->priv_data;
973  int tile_idx;
974  int tile_offset_offset, tile_offset;
975  int tile_byte_count_offset, tile_byte_count;
976  int tile_count_x, tile_count_y;
977  int tile_width, tile_length;
978  int has_width_leftover, has_height_leftover;
979  int tile_x = 0, tile_y = 0;
980  int pos_x = 0, pos_y = 0;
981  int ret;
982 
983  if (s->tile_width <= 0 || s->tile_length <= 0)
984  return AVERROR_INVALIDDATA;
985 
986  has_width_leftover = (s->width % s->tile_width != 0);
987  has_height_leftover = (s->height % s->tile_length != 0);
988 
989  /* Calculate tile counts (round up) */
990  tile_count_x = (s->width + s->tile_width - 1) / s->tile_width;
991  tile_count_y = (s->height + s->tile_length - 1) / s->tile_length;
992 
993  /* Iterate over the number of tiles */
994  for (tile_idx = 0; tile_idx < tile_count_x * tile_count_y; tile_idx++) {
995  tile_x = tile_idx % tile_count_x;
996  tile_y = tile_idx / tile_count_x;
997 
998  if (has_width_leftover && tile_x == tile_count_x - 1) // If on the right-most tile
999  tile_width = s->width % s->tile_width;
1000  else
1001  tile_width = s->tile_width;
1002 
1003  if (has_height_leftover && tile_y == tile_count_y - 1) // If on the bottom-most tile
1004  tile_length = s->height % s->tile_length;
1005  else
1006  tile_length = s->tile_length;
1007 
1008  /* Read tile offset */
1009  tile_offset_offset = s->tile_offsets_offset + tile_idx * sizeof(int);
1010  bytestream2_seek(&s->gb, tile_offset_offset, SEEK_SET);
1011  tile_offset = ff_tget_long(&s->gb, s->le);
1012 
1013  /* Read tile byte size */
1014  tile_byte_count_offset = s->tile_byte_counts_offset + tile_idx * sizeof(int);
1015  bytestream2_seek(&s->gb, tile_byte_count_offset, SEEK_SET);
1016  tile_byte_count = ff_tget_long(&s->gb, s->le);
1017 
1018  /* Seek to tile data */
1019  bytestream2_seek(&s->gb, tile_offset, SEEK_SET);
1020 
1021  /* Decode JPEG tile and copy it in the reference frame */
1022  ret = dng_decode_jpeg(avctx, frame, tile_byte_count, pos_x, pos_y, tile_width, tile_length);
1023 
1024  if (ret < 0)
1025  return ret;
1026 
1027  /* Advance current positions */
1028  pos_x += tile_width;
1029  if (tile_x == tile_count_x - 1) { // If on the right edge
1030  pos_x = 0;
1031  pos_y += tile_length;
1032  }
1033  }
1034 
1035  /* Frame is ready to be output */
1036  frame->pict_type = AV_PICTURE_TYPE_I;
1037  frame->flags |= AV_FRAME_FLAG_KEY;
1038 
1039  return avpkt->size;
1040 }
1041 
1043 {
1044  int ret;
1045  int create_gray_palette = 0;
1046 
1047  // make sure there is no aliasing in the following switch
1048  if (s->bpp > 128 || s->bppcount >= 10) {
1049  av_log(s->avctx, AV_LOG_ERROR,
1050  "Unsupported image parameters: bpp=%d, bppcount=%d\n",
1051  s->bpp, s->bppcount);
1052  return AVERROR_INVALIDDATA;
1053  }
1054 
1055  switch (s->planar * 10000 + s->bpp * 10 + s->bppcount + s->is_bayer * 100000) {
1056  case 11:
1057  if (!s->palette_is_set) {
1058  s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
1059  break;
1060  }
1061  case 21:
1062  case 41:
1063  s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
1064  if (!s->palette_is_set) {
1065  create_gray_palette = 1;
1066  }
1067  break;
1068  case 81:
1069  s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
1070  break;
1071  case 121:
1072  s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
1073  break;
1074  case 100081:
1075  switch (AV_RL32(s->pattern)) {
1076  case 0x02010100:
1077  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
1078  break;
1079  case 0x00010102:
1080  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
1081  break;
1082  case 0x01000201:
1083  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
1084  break;
1085  case 0x01020001:
1086  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
1087  break;
1088  default:
1089  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1090  AV_RL32(s->pattern));
1091  return AVERROR_PATCHWELCOME;
1092  }
1093  break;
1094  case 100101:
1095  case 100121:
1096  case 100141:
1097  case 100161:
1098  switch (AV_RL32(s->pattern)) {
1099  case 0x02010100:
1100  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
1101  break;
1102  case 0x00010102:
1103  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR16;
1104  break;
1105  case 0x01000201:
1106  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG16;
1107  break;
1108  case 0x01020001:
1109  s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG16;
1110  break;
1111  default:
1112  av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
1113  AV_RL32(s->pattern));
1114  return AVERROR_PATCHWELCOME;
1115  }
1116  break;
1117  case 243:
1118  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1119  if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
1120  s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1121  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
1122  s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1123  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
1124  s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
1125  } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
1126  s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
1127  } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
1128  s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1129  } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
1130  s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
1131  } else {
1132  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
1133  return AVERROR_PATCHWELCOME;
1134  }
1135  } else
1136  s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
1137  break;
1138  case 161:
1139  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
1140  break;
1141  case 162:
1142  s->avctx->pix_fmt = AV_PIX_FMT_YA8;
1143  break;
1144  case 322:
1145  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
1146  break;
1147  case 324:
1148  s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
1149  break;
1150  case 405:
1151  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
1152  s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
1153  else {
1154  av_log(s->avctx, AV_LOG_ERROR,
1155  "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
1156  return AVERROR_PATCHWELCOME;
1157  }
1158  break;
1159  case 483:
1160  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
1161  break;
1162  case 644:
1163  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
1164  break;
1165  case 10243:
1166  s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
1167  break;
1168  case 10324:
1169  s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1170  break;
1171  case 10483:
1172  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
1173  break;
1174  case 10644:
1175  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
1176  break;
1177  case 963:
1178  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBF32LE : AV_PIX_FMT_RGBF32BE;
1179  break;
1180  case 1284:
1181  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBAF32LE : AV_PIX_FMT_RGBAF32BE;
1182  break;
1183  case 10963:
1184  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRPF32LE : AV_PIX_FMT_GBRPF32BE;
1185  break;
1186  case 11284:
1187  s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAPF32LE : AV_PIX_FMT_GBRAPF32BE;
1188  break;
1189  default:
1190  av_log(s->avctx, AV_LOG_ERROR,
1191  "This format is not supported (bpp=%d, bppcount=%d)\n",
1192  s->bpp, s->bppcount);
1193  return AVERROR_INVALIDDATA;
1194  }
1195 
1196  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
1197  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1198  if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
1199  !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
1200  desc->nb_components < 3) {
1201  av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
1202  return AVERROR_INVALIDDATA;
1203  }
1204  }
1205 
1206  if (s->width != s->avctx->width || s->height != s->avctx->height) {
1207  ret = ff_set_dimensions(s->avctx, s->width, s->height);
1208  if (ret < 0)
1209  return ret;
1210  }
1211 
1212  if (s->avctx->skip_frame >= AVDISCARD_ALL)
1213  return 0;
1214 
1215  if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
1216  return ret;
1217  if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1218  if (!create_gray_palette)
1219  memcpy(frame->data[1], s->palette, sizeof(s->palette));
1220  else {
1221  /* make default grayscale pal */
1222  int i;
1223  uint32_t *pal = (uint32_t *)frame->data[1];
1224  for (i = 0; i < 1<<s->bpp; i++)
1225  pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
1226  }
1227  }
1228  return 1;
1229 }
1230 
1231 static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
1232 {
1233  int offset = tag == TIFF_YRES ? 2 : 0;
1234  s->res[offset++] = num;
1235  s->res[offset] = den;
1236  if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
1237  uint64_t num = s->res[2] * (uint64_t)s->res[1];
1238  uint64_t den = s->res[0] * (uint64_t)s->res[3];
1239  if (num > INT64_MAX || den > INT64_MAX) {
1240  num = num >> 1;
1241  den = den >> 1;
1242  }
1243  av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
1244  num, den, INT32_MAX);
1245  if (!s->avctx->sample_aspect_ratio.den)
1246  s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
1247  }
1248 }
1249 
1251 {
1252  AVFrameSideData *sd;
1253  GetByteContext gb_temp;
1254  unsigned tag, type, count, off, value = 0, value2 = 1; // value2 is a denominator so init. to 1
1255  int i, start;
1256  int pos;
1257  int ret;
1258  double *dp;
1259 
1260  ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
1261  if (ret < 0) {
1262  goto end;
1263  }
1264  if (tag <= s->last_tag)
1265  return AVERROR_INVALIDDATA;
1266 
1267  // We ignore TIFF_STRIP_SIZE as it is sometimes in the logic but wrong order around TIFF_STRIP_OFFS
1268  if (tag != TIFF_STRIP_SIZE)
1269  s->last_tag = tag;
1270 
1271  off = bytestream2_tell(&s->gb);
1272  if (count == 1) {
1273  switch (type) {
1274  case TIFF_BYTE:
1275  case TIFF_SHORT:
1276  case TIFF_LONG:
1277  value = ff_tget(&s->gb, type, s->le);
1278  break;
1279  case TIFF_RATIONAL:
1280  value = ff_tget_long(&s->gb, s->le);
1281  value2 = ff_tget_long(&s->gb, s->le);
1282  if (!value2) {
1283  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator in rational\n");
1284  value2 = 1;
1285  }
1286 
1287  break;
1288  case TIFF_STRING:
1289  if (count <= 4) {
1290  break;
1291  }
1292  default:
1293  value = UINT_MAX;
1294  }
1295  }
1296 
1297  switch (tag) {
1298  case TIFF_SUBFILE:
1299  s->is_thumbnail = (value != 0);
1300  break;
1301  case TIFF_WIDTH:
1302  if (value > INT_MAX)
1303  return AVERROR_INVALIDDATA;
1304  s->width = value;
1305  break;
1306  case TIFF_HEIGHT:
1307  if (value > INT_MAX)
1308  return AVERROR_INVALIDDATA;
1309  s->height = value;
1310  break;
1311  case TIFF_BPP:
1312  if (count > 5 || count <= 0) {
1313  av_log(s->avctx, AV_LOG_ERROR,
1314  "This format is not supported (bpp=%d, %d components)\n",
1315  value, count);
1316  return AVERROR_INVALIDDATA;
1317  }
1318  s->bppcount = count;
1319  if (count == 1)
1320  s->bpp = value;
1321  else {
1322  switch (type) {
1323  case TIFF_BYTE:
1324  case TIFF_SHORT:
1325  case TIFF_LONG:
1326  s->bpp = 0;
1327  if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
1328  return AVERROR_INVALIDDATA;
1329  for (i = 0; i < count; i++)
1330  s->bpp += ff_tget(&s->gb, type, s->le);
1331  break;
1332  default:
1333  s->bpp = -1;
1334  }
1335  }
1336  break;
1338  if (count != 1) {
1339  av_log(s->avctx, AV_LOG_ERROR,
1340  "Samples per pixel requires a single value, many provided\n");
1341  return AVERROR_INVALIDDATA;
1342  }
1343  if (value > 5 || value <= 0) {
1344  av_log(s->avctx, AV_LOG_ERROR,
1345  "Invalid samples per pixel %d\n", value);
1346  return AVERROR_INVALIDDATA;
1347  }
1348  if (s->bppcount == 1)
1349  s->bpp *= value;
1350  s->bppcount = value;
1351  break;
1352  case TIFF_COMPR:
1353  s->compr = value;
1354  av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
1355  s->predictor = 0;
1356  switch (s->compr) {
1357  case TIFF_RAW:
1358  case TIFF_PACKBITS:
1359  case TIFF_LZW:
1360  case TIFF_CCITT_RLE:
1361  break;
1362  case TIFF_G3:
1363  case TIFF_G4:
1364  s->fax_opts = 0;
1365  break;
1366  case TIFF_DEFLATE:
1367  case TIFF_ADOBE_DEFLATE:
1368 #if CONFIG_ZLIB
1369  break;
1370 #else
1371  av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
1372  return AVERROR(ENOSYS);
1373 #endif
1374  case TIFF_JPEG:
1375  case TIFF_NEWJPEG:
1376  s->is_jpeg = 1;
1377  break;
1378  case TIFF_LZMA:
1379 #if CONFIG_LZMA
1380  break;
1381 #else
1382  av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
1383  return AVERROR(ENOSYS);
1384 #endif
1385  default:
1386  av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
1387  s->compr);
1388  return AVERROR_INVALIDDATA;
1389  }
1390  break;
1391  case TIFF_ROWSPERSTRIP:
1392  if (!value || (type == TIFF_LONG && value == UINT_MAX))
1393  value = s->height;
1394  s->rps = FFMIN(value, s->height);
1395  break;
1396  case TIFF_STRIP_OFFS:
1397  if (count == 1) {
1398  if (value > INT_MAX) {
1399  av_log(s->avctx, AV_LOG_ERROR,
1400  "strippos %u too large\n", value);
1401  return AVERROR_INVALIDDATA;
1402  }
1403  s->strippos = 0;
1404  s->stripoff = value;
1405  } else
1406  s->strippos = off;
1407  s->strips = count;
1408  if (s->strips == s->bppcount)
1409  s->rps = s->height;
1410  s->sot = type;
1411  break;
1412  case TIFF_STRIP_SIZE:
1413  if (count == 1) {
1414  if (value > INT_MAX) {
1415  av_log(s->avctx, AV_LOG_ERROR,
1416  "stripsize %u too large\n", value);
1417  return AVERROR_INVALIDDATA;
1418  }
1419  s->stripsizesoff = 0;
1420  s->stripsize = value;
1421  s->strips = 1;
1422  } else {
1423  s->stripsizesoff = off;
1424  }
1425  s->strips = count;
1426  s->sstype = type;
1427  break;
1428  case TIFF_XRES:
1429  case TIFF_YRES:
1430  set_sar(s, tag, value, value2);
1431  break;
1432  case TIFF_TILE_OFFSETS:
1433  s->tile_offsets_offset = off;
1434  s->is_tiled = 1;
1435  break;
1436  case TIFF_TILE_BYTE_COUNTS:
1437  s->tile_byte_counts_offset = off;
1438  break;
1439  case TIFF_TILE_LENGTH:
1440  if (value > INT_MAX)
1441  return AVERROR_INVALIDDATA;
1442  s->tile_length = value;
1443  break;
1444  case TIFF_TILE_WIDTH:
1445  if (value > INT_MAX)
1446  return AVERROR_INVALIDDATA;
1447  s->tile_width = value;
1448  break;
1449  case TIFF_PREDICTOR:
1450  if (value > INT_MAX)
1451  return AVERROR_INVALIDDATA;
1452  s->predictor = value;
1453  break;
1454  case TIFF_SUB_IFDS:
1455  if (count == 1)
1456  s->sub_ifd = value;
1457  else if (count > 1)
1458  s->sub_ifd = ff_tget_long(&s->gb, s->le); /** Only get the first SubIFD */
1459  break;
1462  if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut))
1463  return AVERROR_INVALIDDATA;
1464  for (int i = 0; i < count; i++)
1465  s->dng_lut[i] = ff_tget(&s->gb, type, s->le);
1466  s->white_level = s->dng_lut[count-1];
1467  break;
1468  case DNG_BLACK_LEVEL:
1469  if (count > FF_ARRAY_ELEMS(s->black_level))
1470  return AVERROR_INVALIDDATA;
1471  s->black_level[0] = value / (float)value2;
1472  for (int i = 0; i < count && count > 1; i++) {
1473  if (type == TIFF_RATIONAL) {
1474  value = ff_tget_long(&s->gb, s->le);
1475  value2 = ff_tget_long(&s->gb, s->le);
1476  if (!value2) {
1477  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1478  value2 = 1;
1479  }
1480 
1481  s->black_level[i] = value / (float)value2;
1482  } else if (type == TIFF_SRATIONAL) {
1483  int value = ff_tget_long(&s->gb, s->le);
1484  int value2 = ff_tget_long(&s->gb, s->le);
1485  if (!value2) {
1486  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1487  value2 = 1;
1488  }
1489 
1490  s->black_level[i] = value / (float)value2;
1491  } else {
1492  s->black_level[i] = ff_tget(&s->gb, type, s->le);
1493  }
1494  }
1495  for (int i = count; i < 4 && count > 0; i++)
1496  s->black_level[i] = s->black_level[count - 1];
1497  break;
1498  case DNG_WHITE_LEVEL:
1499  s->white_level = value;
1500  break;
1501  case TIFF_CFA_PATTERN_DIM:
1502  if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
1503  ff_tget(&s->gb, type, s->le) != 2)) {
1504  av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
1505  return AVERROR_INVALIDDATA;
1506  }
1507  break;
1508  case TIFF_CFA_PATTERN:
1509  s->is_bayer = 1;
1510  s->pattern[0] = ff_tget(&s->gb, type, s->le);
1511  s->pattern[1] = ff_tget(&s->gb, type, s->le);
1512  s->pattern[2] = ff_tget(&s->gb, type, s->le);
1513  s->pattern[3] = ff_tget(&s->gb, type, s->le);
1514  break;
1515  case TIFF_PHOTOMETRIC:
1516  switch (value) {
1519  case TIFF_PHOTOMETRIC_RGB:
1523  case TIFF_PHOTOMETRIC_CFA:
1524  case TIFF_PHOTOMETRIC_LINEAR_RAW: // Used by DNG images
1525  s->photometric = value;
1526  break;
1534  "PhotometricInterpretation 0x%04X",
1535  value);
1536  return AVERROR_PATCHWELCOME;
1537  default:
1538  av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
1539  "unknown\n", value);
1540  return AVERROR_INVALIDDATA;
1541  }
1542  break;
1543  case TIFF_FILL_ORDER:
1544  if (value < 1 || value > 2) {
1545  av_log(s->avctx, AV_LOG_ERROR,
1546  "Unknown FillOrder value %d, trying default one\n", value);
1547  value = 1;
1548  }
1549  s->fill_order = value - 1;
1550  break;
1551  case TIFF_PAL: {
1552  GetByteContext pal_gb[3];
1553  off = type_sizes[type];
1554  if (count / 3 > 256 ||
1555  bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
1556  return AVERROR_INVALIDDATA;
1557 
1558  pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
1559  bytestream2_skip(&pal_gb[1], count / 3 * off);
1560  bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
1561 
1562  off = (type_sizes[type] - 1) << 3;
1563  if (off > 31U) {
1564  av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
1565  return AVERROR_INVALIDDATA;
1566  }
1567 
1568  for (i = 0; i < count / 3; i++) {
1569  uint32_t p = 0xFF000000;
1570  p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
1571  p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
1572  p |= ff_tget(&pal_gb[2], type, s->le) >> off;
1573  s->palette[i] = p;
1574  }
1575  s->palette_is_set = 1;
1576  break;
1577  }
1578  case TIFF_PLANAR:
1579  s->planar = value == 2;
1580  break;
1582  if (count != 2) {
1583  av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
1584  return AVERROR_INVALIDDATA;
1585  }
1586  for (i = 0; i < count; i++) {
1587  s->subsampling[i] = ff_tget(&s->gb, type, s->le);
1588  if (s->subsampling[i] <= 0) {
1589  av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
1590  s->subsampling[i] = 1;
1591  return AVERROR_INVALIDDATA;
1592  }
1593  }
1594  break;
1595  case TIFF_T4OPTIONS:
1596  if (s->compr == TIFF_G3) {
1597  if (value > INT_MAX)
1598  return AVERROR_INVALIDDATA;
1599  s->fax_opts = value;
1600  }
1601  break;
1602  case TIFF_T6OPTIONS:
1603  if (s->compr == TIFF_G4) {
1604  if (value > INT_MAX)
1605  return AVERROR_INVALIDDATA;
1606  s->fax_opts = value;
1607  }
1608  break;
1609 #define ADD_METADATA(count, name, sep)\
1610  if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
1611  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
1612  goto end;\
1613  }
1615  ADD_METADATA(count, "ModelPixelScaleTag", NULL);
1616  break;
1618  ADD_METADATA(count, "ModelTransformationTag", NULL);
1619  break;
1620  case TIFF_MODEL_TIEPOINT:
1621  ADD_METADATA(count, "ModelTiepointTag", NULL);
1622  break;
1624  if (s->geotag_count) {
1625  avpriv_request_sample(s->avctx, "Multiple geo key directories");
1626  return AVERROR_INVALIDDATA;
1627  }
1628  ADD_METADATA(1, "GeoTIFF_Version", NULL);
1629  ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
1630  s->geotag_count = ff_tget_short(&s->gb, s->le);
1631  if (s->geotag_count > count / 4 - 1) {
1632  s->geotag_count = count / 4 - 1;
1633  av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
1634  }
1635  if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
1636  || s->geotag_count == 0) {
1637  s->geotag_count = 0;
1638  return -1;
1639  }
1640  s->geotags = av_calloc(s->geotag_count, sizeof(*s->geotags));
1641  if (!s->geotags) {
1642  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1643  s->geotag_count = 0;
1644  goto end;
1645  }
1646  for (i = 0; i < s->geotag_count; i++) {
1647  unsigned val;
1648  s->geotags[i].key = ff_tget_short(&s->gb, s->le);
1649  s->geotags[i].type = ff_tget_short(&s->gb, s->le);
1650  s->geotags[i].count = ff_tget_short(&s->gb, s->le);
1651  val = ff_tget_short(&s->gb, s->le);
1652 
1653  if (!s->geotags[i].type) {
1654  const char *str = get_geokey_val(s->geotags[i].key, val);
1655 
1656  s->geotags[i].val = str ? av_strdup(str) : av_asprintf("Unknown-%u", val);
1657  if (!s->geotags[i].val)
1658  return AVERROR(ENOMEM);
1659  } else
1660  s->geotags[i].offset = val;
1661  }
1662  break;
1664  if (count >= INT_MAX / sizeof(int64_t))
1665  return AVERROR_INVALIDDATA;
1666  if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
1667  return AVERROR_INVALIDDATA;
1668  dp = av_malloc_array(count, sizeof(double));
1669  if (!dp) {
1670  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1671  goto end;
1672  }
1673  for (i = 0; i < count; i++)
1674  dp[i] = ff_tget_double(&s->gb, s->le);
1675  for (i = 0; i < s->geotag_count; i++) {
1676  if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
1677  if (s->geotags[i].count == 0
1678  || s->geotags[i].offset + s->geotags[i].count > count) {
1679  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1680  } else if (s->geotags[i].val) {
1681  av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
1682  } else {
1683  char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
1684  if (!ap) {
1685  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1686  av_freep(&dp);
1687  return AVERROR(ENOMEM);
1688  }
1689  s->geotags[i].val = ap;
1690  }
1691  }
1692  }
1693  av_freep(&dp);
1694  break;
1695  case TIFF_GEO_ASCII_PARAMS:
1696  pos = bytestream2_tell(&s->gb);
1697  for (i = 0; i < s->geotag_count; i++) {
1698  if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
1699  if (s->geotags[i].count == 0
1700  || s->geotags[i].offset + s->geotags[i].count > count) {
1701  av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
1702  } else {
1703  char *ap;
1704 
1705  bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
1706  if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
1707  return AVERROR_INVALIDDATA;
1708  if (s->geotags[i].val)
1709  return AVERROR_INVALIDDATA;
1710  ap = av_malloc(s->geotags[i].count);
1711  if (!ap) {
1712  av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
1713  return AVERROR(ENOMEM);
1714  }
1715  bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
1716  ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
1717  s->geotags[i].val = ap;
1718  }
1719  }
1720  }
1721  break;
1722  case TIFF_ICC_PROFILE:
1723  gb_temp = s->gb;
1724  bytestream2_seek(&gb_temp, off, SEEK_SET);
1725 
1726  if (bytestream2_get_bytes_left(&gb_temp) < count)
1727  return AVERROR_INVALIDDATA;
1728 
1730  if (ret < 0)
1731  return ret;
1732  if (sd)
1733  bytestream2_get_bufferu(&gb_temp, sd->data, count);
1734  break;
1735  case TIFF_ARTIST:
1736  ADD_METADATA(count, "artist", NULL);
1737  break;
1738  case TIFF_COPYRIGHT:
1739  ADD_METADATA(count, "copyright", NULL);
1740  break;
1741  case TIFF_DATE:
1742  ADD_METADATA(count, "date", NULL);
1743  break;
1744  case TIFF_DOCUMENT_NAME:
1745  ADD_METADATA(count, "document_name", NULL);
1746  break;
1747  case TIFF_HOST_COMPUTER:
1748  ADD_METADATA(count, "computer", NULL);
1749  break;
1751  ADD_METADATA(count, "description", NULL);
1752  break;
1753  case TIFF_MAKE:
1754  ADD_METADATA(count, "make", NULL);
1755  break;
1756  case TIFF_MODEL:
1757  ADD_METADATA(count, "model", NULL);
1758  break;
1759  case TIFF_PAGE_NAME:
1760  ADD_METADATA(count, "page_name", NULL);
1761  break;
1762  case TIFF_PAGE_NUMBER:
1763  ADD_METADATA(count, "page_number", " / ");
1764  // need to seek back to re-read the page number
1765  bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
1766  // read the page number
1767  s->cur_page = ff_tget_short(&s->gb, s->le);
1768  // get back to where we were before the previous seek
1769  bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
1770  break;
1771  case TIFF_SOFTWARE_NAME:
1772  ADD_METADATA(count, "software", NULL);
1773  break;
1774  case DNG_VERSION:
1775  if (count == 4) {
1776  unsigned int ver[4];
1777  ver[0] = ff_tget(&s->gb, type, s->le);
1778  ver[1] = ff_tget(&s->gb, type, s->le);
1779  ver[2] = ff_tget(&s->gb, type, s->le);
1780  ver[3] = ff_tget(&s->gb, type, s->le);
1781 
1782  av_log(s->avctx, AV_LOG_DEBUG, "DNG file, version %u.%u.%u.%u\n",
1783  ver[0], ver[1], ver[2], ver[3]);
1784 
1786  }
1787  break;
1788  case DNG_ANALOG_BALANCE:
1789  if (type != TIFF_RATIONAL)
1790  break;
1791 
1792  for (int i = 0; i < 3; i++) {
1793  value = ff_tget_long(&s->gb, s->le);
1794  value2 = ff_tget_long(&s->gb, s->le);
1795  if (!value2) {
1796  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1797  value2 = 1;
1798  }
1799 
1800  s->analog_balance[i] = value / (float)value2;
1801  }
1802  break;
1803  case DNG_AS_SHOT_NEUTRAL:
1804  if (type != TIFF_RATIONAL)
1805  break;
1806 
1807  for (int i = 0; i < 3; i++) {
1808  value = ff_tget_long(&s->gb, s->le);
1809  value2 = ff_tget_long(&s->gb, s->le);
1810  if (!value2) {
1811  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1812  value2 = 1;
1813  }
1814 
1815  s->as_shot_neutral[i] = value / (float)value2;
1816  }
1817  break;
1818  case DNG_AS_SHOT_WHITE_XY:
1819  if (type != TIFF_RATIONAL)
1820  break;
1821 
1822  for (int i = 0; i < 2; i++) {
1823  value = ff_tget_long(&s->gb, s->le);
1824  value2 = ff_tget_long(&s->gb, s->le);
1825  if (!value2) {
1826  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1827  value2 = 1;
1828  }
1829 
1830  s->as_shot_white[i] = value / (float)value2;
1831  }
1832  s->as_shot_white[2] = 1.f - s->as_shot_white[0] - s->as_shot_white[1];
1833  for (int i = 0; i < 3; i++) {
1834  s->as_shot_white[i] /= d65_white[i];
1835  }
1836  break;
1837  case DNG_COLOR_MATRIX1:
1838  case DNG_COLOR_MATRIX2:
1839  for (int i = 0; i < 3; i++) {
1840  for (int j = 0; j < 3; j++) {
1841  int value = ff_tget_long(&s->gb, s->le);
1842  int value2 = ff_tget_long(&s->gb, s->le);
1843  if (!value2) {
1844  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1845  value2 = 1;
1846  }
1847  s->color_matrix[i][j] = value / (float)value2;
1848  }
1849  s->use_color_matrix = 1;
1850  }
1851  break;
1854  for (int i = 0; i < 3; i++) {
1855  for (int j = 0; j < 3; j++) {
1856  int value = ff_tget_long(&s->gb, s->le);
1857  int value2 = ff_tget_long(&s->gb, s->le);
1858  if (!value2) {
1859  av_log(s->avctx, AV_LOG_WARNING, "Invalid denominator\n");
1860  value2 = 1;
1861  }
1862  s->camera_calibration[i][j] = value / (float)value2;
1863  }
1864  }
1865  break;
1866  case CINEMADNG_TIME_CODES:
1867  case CINEMADNG_FRAME_RATE:
1868  case CINEMADNG_T_STOP:
1869  case CINEMADNG_REEL_NAME:
1872  break;
1873  default:
1874  if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1875  av_log(s->avctx, AV_LOG_ERROR,
1876  "Unknown or unsupported tag %d/0x%0X\n",
1877  tag, tag);
1878  return AVERROR_INVALIDDATA;
1879  }
1880  }
1881 end:
1882  if (s->bpp > 128U) {
1883  av_log(s->avctx, AV_LOG_ERROR,
1884  "This format is not supported (bpp=%d, %d components)\n",
1885  s->bpp, count);
1886  s->bpp = 0;
1887  return AVERROR_INVALIDDATA;
1888  }
1889  bytestream2_seek(&s->gb, start, SEEK_SET);
1890  return 0;
1891 }
1892 
1893 static const float xyz2rgb[3][3] = {
1894  { 0.412453f, 0.357580f, 0.180423f },
1895  { 0.212671f, 0.715160f, 0.072169f },
1896  { 0.019334f, 0.119193f, 0.950227f },
1897 };
1898 
1900  float rgb2cam[3][4],
1901  double cam2xyz[4][3])
1902 {
1903  double cam2rgb[4][3], num;
1904  int i, j, k;
1905 
1906  for (i = 0; i < 3; i++) {
1907  for (j = 0; j < 3; j++) {
1908  cam2rgb[i][j] = 0.;
1909  for (k = 0; k < 3; k++)
1910  cam2rgb[i][j] += cam2xyz[i][k] * xyz2rgb[k][j];
1911  }
1912  }
1913 
1914  for (i = 0; i < 3; i++) {
1915  for (num = j = 0; j < 3; j++)
1916  num += cam2rgb[i][j];
1917  if (!num)
1918  num = 1;
1919  for (j = 0; j < 3; j++)
1920  cam2rgb[i][j] /= num;
1921  s->premultiply[i] = 1.f / num;
1922  }
1923 }
1924 
1925 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
1926  int *got_frame, AVPacket *avpkt)
1927 {
1928  TiffContext *const s = avctx->priv_data;
1929  unsigned off, last_off = 0;
1930  int le, ret, plane, planes;
1931  int i, j, entries, stride;
1932  unsigned soff, ssize;
1933  uint8_t *dst;
1934  GetByteContext stripsizes;
1935  GetByteContext stripdata;
1936  int retry_for_subifd, retry_for_page;
1937  int is_dng;
1938  int has_tile_bits, has_strip_bits;
1939 
1940  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1941 
1942  // parse image header
1943  if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
1944  av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
1945  return ret;
1946  } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
1947  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
1948  return AVERROR_INVALIDDATA;
1949  }
1950  s->le = le;
1951  // TIFF_BPP is not a required tag and defaults to 1
1952 
1953  s->tiff_type = TIFF_TYPE_TIFF;
1954  s->use_color_matrix = 0;
1955 again:
1956  s->is_thumbnail = 0;
1957  s->bppcount = s->bpp = 1;
1958  s->photometric = TIFF_PHOTOMETRIC_NONE;
1959  s->compr = TIFF_RAW;
1960  s->fill_order = 0;
1961  s->white_level = 0;
1962  s->is_bayer = 0;
1963  s->is_tiled = 0;
1964  s->is_jpeg = 0;
1965  s->cur_page = 0;
1966  s->last_tag = 0;
1967 
1968  for (i = 0; i < 65536; i++)
1969  s->dng_lut[i] = i;
1970 
1971  for (i = 0; i < FF_ARRAY_ELEMS(s->black_level); i++)
1972  s->black_level[i] = 0.f;
1973 
1974  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_neutral); i++)
1975  s->as_shot_neutral[i] = 0.f;
1976 
1977  for (i = 0; i < FF_ARRAY_ELEMS(s->as_shot_white); i++)
1978  s->as_shot_white[i] = 1.f;
1979 
1980  for (i = 0; i < FF_ARRAY_ELEMS(s->analog_balance); i++)
1981  s->analog_balance[i] = 1.f;
1982 
1983  for (i = 0; i < FF_ARRAY_ELEMS(s->premultiply); i++)
1984  s->premultiply[i] = 1.f;
1985 
1986  for (i = 0; i < 4; i++)
1987  for (j = 0; j < 4; j++)
1988  s->camera_calibration[i][j] = i == j;
1989 
1990  free_geotags(s);
1991 
1992  // Reset these offsets so we can tell if they were set this frame
1993  s->stripsizesoff = s->strippos = 0;
1994  /* parse image file directory */
1995  bytestream2_seek(&s->gb, off, SEEK_SET);
1996  entries = ff_tget_short(&s->gb, le);
1997  if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
1998  return AVERROR_INVALIDDATA;
1999  for (i = 0; i < entries; i++) {
2000  if ((ret = tiff_decode_tag(s, p)) < 0)
2001  return ret;
2002  }
2003 
2004  if (s->get_thumbnail && !s->is_thumbnail) {
2005  av_log(avctx, AV_LOG_INFO, "No embedded thumbnail present\n");
2006  return AVERROR_EOF;
2007  }
2008 
2009  /** whether we should process this IFD's SubIFD */
2010  retry_for_subifd = s->sub_ifd && (s->get_subimage || (!s->get_thumbnail && s->is_thumbnail));
2011  /** whether we should process this multi-page IFD's next page */
2012  retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
2013 
2014  if (retry_for_page) {
2015  // set offset to the next IFD
2016  off = ff_tget_long(&s->gb, le);
2017  } else if (retry_for_subifd) {
2018  // set offset to the SubIFD
2019  off = s->sub_ifd;
2020  }
2021 
2022  if (retry_for_subifd || retry_for_page) {
2023  if (!off) {
2024  av_log(avctx, AV_LOG_ERROR, "Requested entry not found\n");
2025  return AVERROR_INVALIDDATA;
2026  }
2027  if (off <= last_off) {
2028  avpriv_request_sample(s->avctx, "non increasing IFD offset");
2029  return AVERROR_INVALIDDATA;
2030  }
2031  last_off = off;
2032  if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
2033  av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
2034  return AVERROR_INVALIDDATA;
2035  }
2036  s->sub_ifd = 0;
2037  goto again;
2038  }
2039 
2040  /* At this point we've decided on which (Sub)IFD to process */
2041 
2042  is_dng = (s->tiff_type == TIFF_TYPE_DNG || s->tiff_type == TIFF_TYPE_CINEMADNG);
2043 
2044  for (i = 0; i<s->geotag_count; i++) {
2045  const char *keyname = get_geokey_name(s->geotags[i].key);
2046  if (!keyname) {
2047  av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
2048  continue;
2049  }
2050  if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
2051  av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
2052  continue;
2053  }
2054  ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, AV_DICT_DONT_STRDUP_VAL);
2055  s->geotags[i].val = NULL;
2056  if (ret<0) {
2057  av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
2058  return ret;
2059  }
2060  }
2061 
2062  if (is_dng) {
2063  double cam2xyz[4][3];
2064  float cmatrix[3][4];
2065  float pmin = FLT_MAX;
2066  int bps;
2067 
2068  for (i = 0; i < 3; i++) {
2069  for (j = 0; j < 3; j++)
2070  s->camera_calibration[i][j] *= s->analog_balance[i];
2071  }
2072 
2073  if (!s->use_color_matrix) {
2074  for (i = 0; i < 3; i++) {
2075  if (s->camera_calibration[i][i])
2076  s->premultiply[i] /= s->camera_calibration[i][i];
2077  }
2078  } else {
2079  for (int c = 0; c < 3; c++) {
2080  for (i = 0; i < 3; i++) {
2081  cam2xyz[c][i] = 0.;
2082  for (j = 0; j < 3; j++)
2083  cam2xyz[c][i] += s->camera_calibration[c][j] * s->color_matrix[j][i] * s->as_shot_white[i];
2084  }
2085  }
2086 
2087  camera_xyz_coeff(s, cmatrix, cam2xyz);
2088  }
2089 
2090  for (int c = 0; c < 3; c++)
2091  pmin = fminf(pmin, s->premultiply[c]);
2092 
2093  for (int c = 0; c < 3; c++)
2094  s->premultiply[c] /= pmin;
2095 
2096  if (s->bpp % s->bppcount)
2097  return AVERROR_INVALIDDATA;
2098  bps = s->bpp / s->bppcount;
2099  if (bps < 8 || bps > 32)
2100  return AVERROR_INVALIDDATA;
2101 
2102  if (s->white_level == 0)
2103  s->white_level = (1LL << bps) - 1; /* Default value as per the spec */
2104 
2105  if (s->white_level <= s->black_level[0]) {
2106  av_log(avctx, AV_LOG_ERROR, "BlackLevel (%g) must be less than WhiteLevel (%"PRId32")\n",
2107  s->black_level[0], s->white_level);
2108  return AVERROR_INVALIDDATA;
2109  }
2110 
2111  if (s->planar)
2112  return AVERROR_PATCHWELCOME;
2113  }
2114 
2115  if (!s->is_tiled && !s->strippos && !s->stripoff) {
2116  av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
2117  return AVERROR_INVALIDDATA;
2118  }
2119 
2120  has_tile_bits = s->is_tiled || s->tile_byte_counts_offset || s->tile_offsets_offset || s->tile_width || s->tile_length;
2121  has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff;
2122 
2123  if (has_tile_bits && has_strip_bits) {
2124  int tiled_dng = s->is_tiled && is_dng;
2125  av_log(avctx, tiled_dng ? AV_LOG_WARNING : AV_LOG_ERROR, "Tiled TIFF is not allowed to strip\n");
2126  if (!tiled_dng)
2127  return AVERROR_INVALIDDATA;
2128  }
2129 
2130  /* now we have the data and may start decoding */
2131  if ((ret = init_image(s, p)) <= 0)
2132  return ret;
2133 
2134  if (!s->is_tiled || has_strip_bits) {
2135  if (s->strips == 1 && !s->stripsize) {
2136  av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
2137  s->stripsize = avpkt->size - s->stripoff;
2138  }
2139 
2140  if (s->stripsizesoff) {
2141  if (s->stripsizesoff >= (unsigned)avpkt->size)
2142  return AVERROR_INVALIDDATA;
2143  bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
2144  avpkt->size - s->stripsizesoff);
2145  }
2146  if (s->strippos) {
2147  if (s->strippos >= (unsigned)avpkt->size)
2148  return AVERROR_INVALIDDATA;
2149  bytestream2_init(&stripdata, avpkt->data + s->strippos,
2150  avpkt->size - s->strippos);
2151  }
2152 
2153  if (s->rps <= 0 || s->rps % s->subsampling[1]) {
2154  av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
2155  return AVERROR_INVALIDDATA;
2156  }
2157  }
2158 
2159  if (s->photometric == TIFF_PHOTOMETRIC_LINEAR_RAW ||
2160  s->photometric == TIFF_PHOTOMETRIC_CFA) {
2162  } else if (s->photometric == TIFF_PHOTOMETRIC_BLACK_IS_ZERO) {
2164  }
2165 
2166  /* Handle DNG images with JPEG-compressed tiles */
2167 
2168  if (is_dng && s->is_tiled) {
2169  if (!s->is_jpeg) {
2170  avpriv_report_missing_feature(avctx, "DNG uncompressed tiled images");
2171  return AVERROR_PATCHWELCOME;
2172  } else if (!s->is_bayer) {
2173  avpriv_report_missing_feature(avctx, "DNG JPG-compressed tiled non-bayer-encoded images");
2174  return AVERROR_PATCHWELCOME;
2175  } else {
2176  if ((ret = dng_decode_tiles(avctx, p, avpkt)) > 0)
2177  *got_frame = 1;
2178  return ret;
2179  }
2180  }
2181 
2182  /* Handle TIFF images and DNG images with uncompressed strips (non-tiled) */
2183 
2184  planes = s->planar ? s->bppcount : 1;
2185  for (plane = 0; plane < planes; plane++) {
2186  uint8_t *five_planes = NULL;
2187  int remaining = avpkt->size;
2188  int decoded_height;
2189  stride = p->linesize[plane];
2190  dst = p->data[plane];
2191  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2192  s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
2193  stride = stride * 5 / 4;
2194  five_planes =
2195  dst = av_malloc(stride * s->height);
2196  if (!dst)
2197  return AVERROR(ENOMEM);
2198  }
2199  for (i = 0; i < s->height; i += s->rps) {
2200  if (i)
2201  dst += s->rps * stride;
2202  if (s->stripsizesoff)
2203  ssize = ff_tget(&stripsizes, s->sstype, le);
2204  else
2205  ssize = s->stripsize;
2206 
2207  if (s->strippos)
2208  soff = ff_tget(&stripdata, s->sot, le);
2209  else
2210  soff = s->stripoff;
2211 
2212  if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
2213  av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
2214  av_freep(&five_planes);
2215  return AVERROR_INVALIDDATA;
2216  }
2217  remaining -= ssize;
2218  if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
2219  FFMIN(s->rps, s->height - i))) < 0) {
2220  if (avctx->err_recognition & AV_EF_EXPLODE) {
2221  av_freep(&five_planes);
2222  return ret;
2223  }
2224  break;
2225  }
2226  }
2227  decoded_height = FFMIN(i, s->height);
2228 
2229  if (s->predictor == 2) {
2230  if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
2231  av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
2232  return AVERROR_PATCHWELCOME;
2233  }
2234  dst = five_planes ? five_planes : p->data[plane];
2235  soff = s->bpp >> 3;
2236  if (s->planar)
2237  soff = FFMAX(soff / s->bppcount, 1);
2238  ssize = s->width * soff;
2239  if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
2240  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
2241  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
2242  s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
2243  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
2244  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
2245  for (i = 0; i < decoded_height; i++) {
2246  for (j = soff; j < ssize; j += 2)
2247  AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
2248  dst += stride;
2249  }
2250  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
2251  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
2252  s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
2253  s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
2254  s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
2255  s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
2256  for (i = 0; i < decoded_height; i++) {
2257  for (j = soff; j < ssize; j += 2)
2258  AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
2259  dst += stride;
2260  }
2261  } else {
2262  for (i = 0; i < decoded_height; i++) {
2263  for (j = soff; j < ssize; j++)
2264  dst[j] += dst[j - soff];
2265  dst += stride;
2266  }
2267  }
2268  }
2269 
2270  /* Floating point predictor
2271  TIFF Technical Note 3 http://chriscox.org/TIFFTN3d1.pdf */
2272  if (s->predictor == 3) {
2273  int channels = s->bppcount;
2274  int group_size;
2275  uint8_t *tmpbuf;
2276  int bpc;
2277 
2278  dst = five_planes ? five_planes : p->data[plane];
2279  soff = s->bpp >> 3;
2280  if (s->planar) {
2281  soff = FFMAX(soff / s->bppcount, 1);
2282  channels = 1;
2283  }
2284  ssize = s->width * soff;
2285  bpc = FFMAX(soff / s->bppcount, 1); /* Bytes per component */
2286  group_size = s->width * channels;
2287 
2288  tmpbuf = av_malloc(ssize);
2289  if (!tmpbuf) {
2290  av_free(five_planes);
2291  return AVERROR(ENOMEM);
2292  }
2293 
2294  if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32LE ||
2295  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32LE) {
2296  for (i = 0; i < decoded_height; i++) {
2297  /* Copy first sample byte for each channel */
2298  for (j = 0; j < channels; j++)
2299  tmpbuf[j] = dst[j];
2300 
2301  /* Decode horizontal differences */
2302  for (j = channels; j < ssize; j++)
2303  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2304 
2305  /* Combine shuffled bytes from their separate groups. Each
2306  byte of every floating point value in a row of pixels is
2307  split and combined into separate groups. A group of all
2308  the sign/exponents bytes in the row and groups for each
2309  of the upper, mid, and lower mantissa bytes in the row. */
2310  for (j = 0; j < group_size; j++) {
2311  for (int k = 0; k < bpc; k++) {
2312  dst[bpc * j + k] = tmpbuf[(bpc - k - 1) * group_size + j];
2313  }
2314  }
2315  dst += stride;
2316  }
2317  } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGBF32BE ||
2318  s->avctx->pix_fmt == AV_PIX_FMT_RGBAF32BE) {
2319  /* Same as LE only the shuffle at the end is reversed */
2320  for (i = 0; i < decoded_height; i++) {
2321  for (j = 0; j < channels; j++)
2322  tmpbuf[j] = dst[j];
2323 
2324  for (j = channels; j < ssize; j++)
2325  tmpbuf[j] = dst[j] + tmpbuf[j-channels];
2326 
2327  for (j = 0; j < group_size; j++) {
2328  for (int k = 0; k < bpc; k++) {
2329  dst[bpc * j + k] = tmpbuf[k * group_size + j];
2330  }
2331  }
2332  dst += stride;
2333  }
2334  } else {
2335  av_log(s->avctx, AV_LOG_ERROR, "unsupported floating point pixel format\n");
2336  }
2337  av_free(tmpbuf);
2338  }
2339 
2340  if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
2341  int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
2342  dst = p->data[plane];
2343  for (i = 0; i < s->height; i++) {
2344  for (j = 0; j < stride; j++)
2345  dst[j] = c - dst[j];
2346  dst += stride;
2347  }
2348  }
2349 
2350  if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2351  (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
2352  int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
2353  uint8_t *src = five_planes ? five_planes : p->data[plane];
2354  dst = p->data[plane];
2355  for (i = 0; i < s->height; i++) {
2356  for (j = 0; j < s->width; j++) {
2357  int k = 255 - src[x * j + 3];
2358  int r = (255 - src[x * j ]) * k;
2359  int g = (255 - src[x * j + 1]) * k;
2360  int b = (255 - src[x * j + 2]) * k;
2361  dst[4 * j ] = r * 257 >> 16;
2362  dst[4 * j + 1] = g * 257 >> 16;
2363  dst[4 * j + 2] = b * 257 >> 16;
2364  dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
2365  }
2366  src += stride;
2367  dst += p->linesize[plane];
2368  }
2369  av_freep(&five_planes);
2370  } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
2371  s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
2372  dst = p->data[plane];
2373  for (i = 0; i < s->height; i++) {
2374  for (j = 0; j < s->width; j++) {
2375  uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
2376  uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
2377  uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
2378  uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
2379  AV_WB16(dst + 8 * j , r * 65537 >> 32);
2380  AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
2381  AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
2382  AV_WB16(dst + 8 * j + 6, 65535);
2383  }
2384  dst += p->linesize[plane];
2385  }
2386  }
2387  }
2388 
2389  if (s->planar && s->bppcount > 2) {
2390  FFSWAP(uint8_t*, p->data[0], p->data[2]);
2391  FFSWAP(int, p->linesize[0], p->linesize[2]);
2392  FFSWAP(uint8_t*, p->data[0], p->data[1]);
2393  FFSWAP(int, p->linesize[0], p->linesize[1]);
2394  }
2395 
2396  if (s->is_bayer && s->white_level && s->bpp == 16 && !is_dng) {
2397  uint16_t *dst = (uint16_t *)p->data[0];
2398  for (i = 0; i < s->height; i++) {
2399  for (j = 0; j < s->width; j++)
2400  dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
2401  dst += stride / 2;
2402  }
2403  }
2404 
2405  *got_frame = 1;
2406 
2407  return avpkt->size;
2408 }
2409 
2411 {
2412  TiffContext *s = avctx->priv_data;
2413  int ret;
2414 
2415  s->width = 0;
2416  s->height = 0;
2417  s->subsampling[0] =
2418  s->subsampling[1] = 1;
2419  s->avctx = avctx;
2420  ff_lzw_decode_open(&s->lzw);
2421  if (!s->lzw)
2422  return AVERROR(ENOMEM);
2424 
2425  /* Allocate JPEG frame */
2426  s->jpgframe = av_frame_alloc();
2427  s->jpkt = av_packet_alloc();
2428  if (!s->jpgframe || !s->jpkt)
2429  return AVERROR(ENOMEM);
2430 
2431  /* Prepare everything needed for JPEG decoding */
2433  s->avctx_mjpeg = avcodec_alloc_context3(&ff_mjpeg_decoder.p);
2434  if (!s->avctx_mjpeg)
2435  return AVERROR(ENOMEM);
2436  s->avctx_mjpeg->flags = avctx->flags;
2437  s->avctx_mjpeg->flags2 = avctx->flags2;
2438  s->avctx_mjpeg->idct_algo = avctx->idct_algo;
2439  s->avctx_mjpeg->max_pixels = avctx->max_pixels;
2440  ret = avcodec_open2(s->avctx_mjpeg, NULL, NULL);
2441  if (ret < 0) {
2442  return ret;
2443  }
2444 
2445  return 0;
2446 }
2447 
2448 static av_cold int tiff_end(AVCodecContext *avctx)
2449 {
2450  TiffContext *const s = avctx->priv_data;
2451 
2452  free_geotags(s);
2453 
2454  ff_lzw_decode_close(&s->lzw);
2455  av_freep(&s->deinvert_buf);
2456  s->deinvert_buf_size = 0;
2457  av_freep(&s->yuv_line);
2458  s->yuv_line_size = 0;
2459  av_frame_free(&s->jpgframe);
2460  av_packet_free(&s->jpkt);
2461  avcodec_free_context(&s->avctx_mjpeg);
2462  return 0;
2463 }
2464 
2465 #define OFFSET(x) offsetof(TiffContext, x)
2466 static const AVOption tiff_options[] = {
2467  { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2468  { "thumbnail", "decode embedded thumbnail subimage instead if available", OFFSET(get_thumbnail), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2469  { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
2470  { NULL },
2471 };
2472 
2473 static const AVClass tiff_decoder_class = {
2474  .class_name = "TIFF decoder",
2475  .item_name = av_default_item_name,
2476  .option = tiff_options,
2477  .version = LIBAVUTIL_VERSION_INT,
2478 };
2479 
2481  .p.name = "tiff",
2482  CODEC_LONG_NAME("TIFF image"),
2483  .p.type = AVMEDIA_TYPE_VIDEO,
2484  .p.id = AV_CODEC_ID_TIFF,
2485  .priv_data_size = sizeof(TiffContext),
2486  .init = tiff_init,
2487  .close = tiff_end,
2489  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
2492  .p.priv_class = &tiff_decoder_class,
2493 };
TiffContext::tiff_type
enum TiffType tiff_type
Definition: tiff.c:73
AVFrame::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:665
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:430
ff_tadd_string_metadata
int ff_tadd_string_metadata(int count, const char *name, GetByteContext *gb, int le, AVDictionary **metadata)
Adds a string of count characters into the metadata dictionary.
Definition: tiff_common.c:209
TiffContext::gb
GetByteContext gb
Definition: tiff.c:62
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
TIFF_GEOG_LINEAR_UNITS_GEOKEY
@ TIFF_GEOG_LINEAR_UNITS_GEOKEY
Definition: tiff.h:147
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
ff_tiff_decoder
const FFCodec ff_tiff_decoder
Definition: tiff.c:2480
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
bytestream2_get_eof
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p)
Definition: bytestream.h:332
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
DNG_AS_SHOT_WHITE_XY
@ DNG_AS_SHOT_WHITE_XY
Definition: tiff.h:112
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_PIX_FMT_YA8
@ AV_PIX_FMT_YA8
8 bits gray, 8 bits alpha
Definition: pixfmt.h:140
get_geokey_type
static int get_geokey_type(int key)
Definition: tiff.c:159
tiff_decode_tag
static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1250
DNG_COLOR_MATRIX2
@ DNG_COLOR_MATRIX2
Definition: tiff.h:107
elements
static const ElemCat * elements[ELEMENT_COUNT]
Definition: signature.h:565
TIFF_PHOTOMETRIC_ICC_LAB
@ TIFF_PHOTOMETRIC_ICC_LAB
Definition: tiff.h:198
TIFF_JPEG
@ TIFF_JPEG
Definition: tiff.h:131
GetByteContext
Definition: bytestream.h:33
AV_PIX_FMT_GBRP16BE
@ AV_PIX_FMT_GBRP16BE
planar GBR 4:4:4 48bpp, big-endian
Definition: pixfmt.h:171
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
TiffContext::dng_lut
uint16_t dng_lut[65536]
Definition: tiff.c:103
camera_xyz_coeff
static void camera_xyz_coeff(TiffContext *s, float rgb2cam[3][4], double cam2xyz[4][3])
Definition: tiff.c:1899
AVCOL_TRC_LINEAR
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
Definition: pixfmt.h:653
TiffContext::strippos
int strippos
Definition: tiff.c:110
TIFF_CFA_PATTERN_DIM
@ TIFF_CFA_PATTERN_DIM
Definition: tiff.h:87
TIFF_PROJ_COORD_TRANS_GEOKEY
@ TIFF_PROJ_COORD_TRANS_GEOKEY
Definition: tiff.h:160
OFFSET
#define OFFSET(x)
Definition: tiff.c:2465
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1398
TiffContext::sot
int sot
Definition: tiff.c:109
int64_t
long long int64_t
Definition: coverity.c:34
doubles2str
static char * doubles2str(double *dp, int count, const char *sep)
Definition: tiff.c:246
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
tiff_projection_codes
static const TiffGeoTagKeyName tiff_projection_codes[]
Definition: tiff_data.h:1536
TIFF_CCITT_RLE
@ TIFF_CCITT_RLE
Definition: tiff.h:127
TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
@ TIFF_GEOG_AZIMUTH_UNITS_GEOKEY
Definition: tiff.h:155
av_unused
#define av_unused
Definition: attributes.h:131
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
mjpegdec.h
bytestream2_seek
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
AV_PIX_FMT_RGBA64BE
@ AV_PIX_FMT_RGBA64BE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:202
tiff_end
static av_cold int tiff_end(AVCodecContext *avctx)
Definition: tiff.c:2448
AV_PIX_FMT_GBRAPF32LE
@ AV_PIX_FMT_GBRAPF32LE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian.
Definition: pixfmt.h:344
w
uint8_t w
Definition: llviddspenc.c:38
TiffContext::tile_offsets_offset
int tile_offsets_offset
Definition: tiff.c:115
ff_mjpeg_decoder
const FFCodec ff_mjpeg_decoder
TIFF_ADOBE_DEFLATE
@ TIFF_ADOBE_DEFLATE
Definition: tiff.h:133
AV_PIX_FMT_GBRPF32BE
@ AV_PIX_FMT_GBRPF32BE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian.
Definition: pixfmt.h:341
TIFF_COPYRIGHT
@ TIFF_COPYRIGHT
Definition: tiff.h:89
AVPacket::data
uint8_t * data
Definition: packet.h:535
TIFF_PHOTOMETRIC_ITU_LAB
@ TIFF_PHOTOMETRIC_ITU_LAB
Definition: tiff.h:199
AVOption
AVOption.
Definition: opt.h:429
TIFF_LONG
@ TIFF_LONG
Definition: tiff_common.h:40
b
#define b
Definition: input.c:42
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
RET_GEOKEY_VAL
#define RET_GEOKEY_VAL(TYPE, array)
TIFF_NEWJPEG
@ TIFF_NEWJPEG
Definition: tiff.h:132
FFCodec
Definition: codec_internal.h:127
float.h
deinvert_buffer
static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
Definition: tiff.c:442
reverse.h
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:106
ff_lzw_decode
int ff_lzw_decode(LZWState *p, uint8_t *buf, int len)
Decode given number of bytes NOTE: the algorithm here is inspired from the LZW GIF decoder written by...
Definition: lzw.c:169
TIFF_ROWSPERSTRIP
@ TIFF_ROWSPERSTRIP
Definition: tiff.h:58
TiffContext::pattern
uint8_t pattern[4]
Definition: tiff.c:92
TIFF_GEOG_ELLIPSOID_GEOKEY
@ TIFF_GEOG_ELLIPSOID_GEOKEY
Definition: tiff.h:151
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
TIFF_GEO_KEY_USER_DEFINED
#define TIFF_GEO_KEY_USER_DEFINED
Definition: tiff_data.h:120
TIFF_PROJECTION_GEOKEY
@ TIFF_PROJECTION_GEOKEY
Definition: tiff.h:159
TIFF_PROJ_LINEAR_UNITS_GEOKEY
@ TIFF_PROJ_LINEAR_UNITS_GEOKEY
Definition: tiff.h:161
TIFF_RAW
@ TIFF_RAW
Definition: tiff.h:126
ff_lzw_decode_close
av_cold void ff_lzw_decode_close(LZWState **p)
Definition: lzw.c:118
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
TIFF_GEO_DOUBLE_PARAMS
@ TIFF_GEO_DOUBLE_PARAMS
Definition: tiff.h:95
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
AV_PIX_FMT_BAYER_GRBG16
#define AV_PIX_FMT_BAYER_GRBG16
Definition: pixfmt.h:557
TiffGeoTagKeyName
Definition: tiff.h:220
TIFF_PHOTOMETRIC_WHITE_IS_ZERO
@ TIFF_PHOTOMETRIC_WHITE_IS_ZERO
Definition: tiff.h:190
thread.h
TIFF_PACKBITS
@ TIFF_PACKBITS
Definition: tiff.h:134
TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
@ TIFF_GEOG_PRIME_MERIDIAN_GEOKEY
Definition: tiff.h:146
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
TiffContext::is_jpeg
int is_jpeg
Definition: tiff.c:118
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
dng_process_color16
static uint16_t av_always_inline dng_process_color16(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Map stored raw sensor values into linear reference values (see: DNG Specification - Chapter 5)
Definition: tiff.c:286
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
TIFF_GEO_KEY_UNDEFINED
#define TIFF_GEO_KEY_UNDEFINED
Definition: tiff_data.h:119
tiff_options
static const AVOption tiff_options[]
Definition: tiff.c:2466
TiffContext::get_thumbnail
int get_thumbnail
Definition: tiff.c:71
TIFF_PHOTOMETRIC_LINEAR_RAW
@ TIFF_PHOTOMETRIC_LINEAR_RAW
Definition: tiff.h:203
TIFF_FILL_ORDER
@ TIFF_FILL_ORDER
Definition: tiff.h:51
TIFF_PHOTOMETRIC_ALPHA_MASK
@ TIFF_PHOTOMETRIC_ALPHA_MASK
Definition: tiff.h:194
TiffContext::deinvert_buf_size
int deinvert_buf_size
Definition: tiff.c:121
AV_PIX_FMT_GRAY16BE
@ AV_PIX_FMT_GRAY16BE
Y , 16bpp, big-endian.
Definition: pixfmt.h:104
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
TIFF_DATE
@ TIFF_DATE
Definition: tiff.h:72
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:318
TIFF_TILE_BYTE_COUNTS
@ TIFF_TILE_BYTE_COUNTS
Definition: tiff.h:80
ff_ccitt_unpack
int ff_ccitt_unpack(AVCodecContext *avctx, const uint8_t *src, int srcsize, uint8_t *dst, int height, int stride, enum TiffCompr compr, int opts)
unpack data compressed with CCITT Group 3 1/2-D or Group 4 method
Definition: faxcompr.c:393
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
unpack_yuv
static void unpack_yuv(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum)
Definition: tiff.c:469
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
tiff_set_type
static void tiff_set_type(TiffContext *s, enum TiffType tiff_type)
Definition: tiff.c:131
dng_decode_tiles
static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, const AVPacket *avpkt)
Definition: tiff.c:969
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:194
TIFF_YCBCR_SUBSAMPLING
@ TIFF_YCBCR_SUBSAMPLING
Definition: tiff.h:84
TIFF_MAKE
@ TIFF_MAKE
Definition: tiff.h:54
GetBitContext
Definition: get_bits.h:108
TIFF_GEOG_GEODETIC_DATUM_GEOKEY
@ TIFF_GEOG_GEODETIC_DATUM_GEOKEY
Definition: tiff.h:145
TiffContext::deinvert_buf
uint8_t * deinvert_buf
Definition: tiff.c:120
TiffContext::tile_length
int tile_length
Definition: tiff.c:116
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
TIFF_T6OPTIONS
@ TIFF_T6OPTIONS
Definition: tiff.h:68
val
static double val(void *priv, double ch)
Definition: aeval.c:77
horizontal_fill
static void av_always_inline horizontal_fill(TiffContext *s, unsigned int bpp, uint8_t *dst, int usePtr, const uint8_t *src, uint8_t c, int width, int offset)
Definition: tiff.c:386
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
TiffContext::color_matrix
float color_matrix[3][4]
Definition: tiff.c:97
TIFF_VERTICAL_CS_TYPE_GEOKEY
@ TIFF_VERTICAL_CS_TYPE_GEOKEY
Definition: tiff.h:181
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:511
TIFF_SOFTWARE_NAME
@ TIFF_SOFTWARE_NAME
Definition: tiff.h:71
FF_LZW_TIFF
@ FF_LZW_TIFF
Definition: lzw.h:39
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
TiffContext::as_shot_neutral
float as_shot_neutral[4]
Definition: tiff.c:95
AVCOL_TRC_GAMMA22
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:649
TiffContext::geotags
TiffGeoTag * geotags
Definition: tiff.c:126
DNG_LINEARIZATION_TABLE
@ DNG_LINEARIZATION_TABLE
Definition: tiff.h:103
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:79
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
TIFF_SHORT
@ TIFF_SHORT
Definition: tiff_common.h:39
get_geokey_val
static const char * get_geokey_val(int key, uint16_t val)
Definition: tiff.c:187
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
TiffGeoTag
Definition: tiff.h:212
TIFF_GRAY_RESPONSE_CURVE
@ TIFF_GRAY_RESPONSE_CURVE
Definition: tiff.h:66
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TiffContext::rps
int rps
Definition: tiff.c:108
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:528
TIFF_SUBFILE
@ TIFF_SUBFILE
Definition: tiff.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
TiffContext::premultiply
float premultiply[4]
Definition: tiff.c:99
TiffContext::camera_calibration
float camera_calibration[4][4]
Definition: tiff.c:98
CINEMADNG_T_STOP
@ CINEMADNG_T_STOP
Definition: tiff.h:119
bytestream2_init_writer
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:147
float
float
Definition: af_crystalizer.c:122
AV_PIX_FMT_GBRAP16BE
@ AV_PIX_FMT_GBRAP16BE
planar GBRA 4:4:4:4 64bpp, big-endian
Definition: pixfmt.h:213
TiffContext::stripsize
int stripsize
Definition: tiff.c:110
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
attributes_internal.h
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:341
tiff_proj_cs_type_codes
static const TiffGeoTagKeyName tiff_proj_cs_type_codes[]
Definition: tiff_data.h:559
intreadwrite.h
TIFF_G4
@ TIFF_G4
Definition: tiff.h:129
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRP16LE
@ AV_PIX_FMT_GBRP16LE
planar GBR 4:4:4 48bpp, little-endian
Definition: pixfmt.h:172
TiffContext::width
int width
Definition: tiff.c:74
AV_PIX_FMT_BAYER_BGGR8
@ AV_PIX_FMT_BAYER_BGGR8
bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples
Definition: pixfmt.h:285
g
const char * g
Definition: vf_curves.c:128
TiffType
TiffType
TIFF types in ascenting priority (last in the list is highest)
Definition: tiff.h:34
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1048
ff_lzw_decode_open
av_cold void ff_lzw_decode_open(LZWState **p)
Definition: lzw.c:113
TIFF_STRIP_SIZE
@ TIFF_STRIP_SIZE
Definition: tiff.h:59
fminf
float fminf(float, float)
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:702
TiffContext::yuv_line
uint8_t * yuv_line
Definition: tiff.c:122
TIFF_GEOGRAPHIC_TYPE_GEOKEY
@ TIFF_GEOGRAPHIC_TYPE_GEOKEY
Definition: tiff.h:143
dng_decode_jpeg
static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, int tile_byte_count, int dst_x, int dst_y, int w, int h)
Definition: tiff.c:650
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
TIFF_STRING
@ TIFF_STRING
Definition: tiff_common.h:38
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
TIFF_PHOTOMETRIC_LOG_L
@ TIFF_PHOTOMETRIC_LOG_L
Definition: tiff.h:201
TiffContext::use_color_matrix
int use_color_matrix
Definition: tiff.c:91
ff_tadd_shorts_metadata
int ff_tadd_shorts_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, int is_signed, AVDictionary **metadata)
Adds count shorts converted to a string into the metadata dictionary.
Definition: tiff_common.c:166
channels
channels
Definition: aptx.h:31
decode.h
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
TiffContext::get_page
uint16_t get_page
Definition: tiff.c:70
LZWState
Definition: lzw.c:46
TIFF_IMAGE_DESCRIPTION
@ TIFF_IMAGE_DESCRIPTION
Definition: tiff.h:53
AVCodecContext::max_pixels
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:1782
TiffContext::is_bayer
int is_bayer
Definition: tiff.c:90
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
key
const char * key
Definition: hwcontext_opencl.c:189
TiffContext::jpgframe
AVFrame * jpgframe
Definition: tiff.c:67
TiffContext::compr
enum TiffCompr compr
Definition: tiff.c:79
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:326
TiffContext::photometric
enum TiffPhotometric photometric
Definition: tiff.c:80
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:100
EXTERN
#define EXTERN
Definition: attributes_internal.h:34
search_keyval
static const char * search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
Definition: tiff.c:178
AV_PIX_FMT_BAYER_RGGB8
@ AV_PIX_FMT_BAYER_RGGB8
bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples
Definition: pixfmt.h:286
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
AV_PIX_FMT_BAYER_BGGR16
#define AV_PIX_FMT_BAYER_BGGR16
Definition: pixfmt.h:554
if
if(ret)
Definition: filter_design.txt:179
dng_process_color8
static uint16_t av_always_inline dng_process_color8(uint16_t value, const uint16_t *lut, float black_level, float scale_factor)
Definition: tiff.c:305
ff_ccitt_unpack_init
av_cold void ff_ccitt_unpack_init(void)
initialize unpacker code
Definition: faxcompr.c:119
TiffContext::geotag_count
int geotag_count
Definition: tiff.c:125
TiffContext::height
int height
Definition: tiff.c:74
TIFF_PAGE_NAME
@ TIFF_PAGE_NAME
Definition: tiff.h:63
TIFF_VERTICAL_UNITS_GEOKEY
@ TIFF_VERTICAL_UNITS_GEOKEY
Definition: tiff.h:184
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
TIFF_LZW
@ TIFF_LZW
Definition: tiff.h:130
tiff_init
static av_cold int tiff_init(AVCodecContext *avctx)
Definition: tiff.c:2410
TiffContext::as_shot_white
float as_shot_white[4]
Definition: tiff.c:96
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_tget_short
unsigned ff_tget_short(GetByteContext *gb, int le)
Reads a short from the bytestream using given endianness.
Definition: tiff_common.c:45
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
AV_PIX_FMT_GBRAPF32BE
@ AV_PIX_FMT_GBRAPF32BE
IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian.
Definition: pixfmt.h:343
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
TIFF_PHOTOMETRIC_YCBCR
@ TIFF_PHOTOMETRIC_YCBCR
Definition: tiff.h:196
TiffContext
Definition: tiff.c:59
AV_WB16
#define AV_WB16(p, v)
Definition: intreadwrite.h:401
TiffContext::is_thumbnail
int is_thumbnail
Definition: tiff.c:87
tiff_data.h
TiffContext::avctx
AVCodecContext * avctx
Definition: tiff.c:61
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_PIX_FMT_RGB48LE
@ AV_PIX_FMT_RGB48LE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as lit...
Definition: pixfmt.h:110
AV_PIX_FMT_YA16LE
@ AV_PIX_FMT_YA16LE
16 bits gray, 16 bits alpha (little-endian)
Definition: pixfmt.h:210
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:83
tiff.h
TIFF_PHOTOMETRIC_PALETTE
@ TIFF_PHOTOMETRIC_PALETTE
Definition: tiff.h:193
tiff_common.h
TiffContext::get_subimage
int get_subimage
Definition: tiff.c:69
DNG_AS_SHOT_NEUTRAL
@ DNG_AS_SHOT_NEUTRAL
Definition: tiff.h:111
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:240
AV_PIX_FMT_RGBA64LE
@ AV_PIX_FMT_RGBA64LE
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:203
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
TIFF_MODEL_TIEPOINT
@ TIFF_MODEL_TIEPOINT
Definition: tiff.h:90
TIFF_PHOTOMETRIC_CIE_LAB
@ TIFF_PHOTOMETRIC_CIE_LAB
Definition: tiff.h:197
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
TiffContext::black_level
float black_level[4]
Definition: tiff.c:100
AV_PIX_FMT_BAYER_GBRG16
#define AV_PIX_FMT_BAYER_GBRG16
Definition: pixfmt.h:556
MJpegDecodeContext
Definition: mjpegdec.h:54
TIFF_PAL
@ TIFF_PAL
Definition: tiff.h:76
RET_GEOKEY_TYPE
#define RET_GEOKEY_TYPE(TYPE, array)
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
TIFF_BYTE
@ TIFF_BYTE
Definition: tiff_common.h:37
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:81
TIFF_ARTIST
@ TIFF_ARTIST
Definition: tiff.h:73
CINEMADNG_TIME_CODES
@ CINEMADNG_TIME_CODES
Definition: tiff.h:117
TIFF_SAMPLES_PER_PIXEL
@ TIFF_SAMPLES_PER_PIXEL
Definition: tiff.h:57
TIFF_SRATIONAL
@ TIFF_SRATIONAL
Definition: tiff_common.h:46
close
av_cold void CBS_FUNC() close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
Definition: cbs.c:140
TIFF_G3
@ TIFF_G3
Definition: tiff.h:128
TIFF_WIDTH
@ TIFF_WIDTH
Definition: tiff.h:46
TIFF_TILE_OFFSETS
@ TIFF_TILE_OFFSETS
Definition: tiff.h:79
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
error.h
TiffContext::palette
uint32_t palette[256]
Definition: tiff.c:76
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
PutByteContext
Definition: bytestream.h:37
ff_tread_tag
int ff_tread_tag(GetByteContext *gb, int le, unsigned *tag, unsigned *type, unsigned *count, int *next)
Reads the first 3 fields of a TIFF tag, which are the tag id, the tag type and the count of values fo...
Definition: tiff_common.c:254
AV_PIX_FMT_RGBF32BE
@ AV_PIX_FMT_RGBF32BE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., big-endian.
Definition: pixfmt.h:420
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:495
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:536
TIFF_TYPE_CINEMADNG
@ TIFF_TYPE_CINEMADNG
Digital Negative (DNG) image part of an CinemaDNG image sequence.
Definition: tiff.h:40
height
#define height
Definition: dsp.h:85
codec_internal.h
AV_PIX_FMT_FLAG_RGB
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
Definition: pixdesc.h:136
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
TiffContext::analog_balance
float analog_balance[4]
Definition: tiff.c:94
lzw.h
LZW decoding routines.
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
DNG_CAMERA_CALIBRATION1
@ DNG_CAMERA_CALIBRATION1
Definition: tiff.h:108
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
TIFF_DOUBLE
@ TIFF_DOUBLE
Definition: tiff_common.h:48
bps
unsigned bps
Definition: movenc.c:1912
AV_PIX_FMT_YA16BE
@ AV_PIX_FMT_YA16BE
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:209
TIFF_GEO_ASCII_PARAMS
@ TIFF_GEO_ASCII_PARAMS
Definition: tiff.h:96
size
int size
Definition: twinvq_data.h:10344
xyz2rgb
static const float xyz2rgb[3][3]
Definition: tiff.c:1893
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:2003
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TiffContext::bpp
unsigned int bpp
Definition: tiff.c:75
AVFrameSideData::data
uint8_t * data
Definition: frame.h:267
TIFF_GT_MODEL_TYPE_GEOKEY
@ TIFF_GT_MODEL_TYPE_GEOKEY
Definition: tiff.h:140
TiffContext::jpkt
AVPacket * jpkt
Definition: tiff.c:66
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
TIFF_DOCUMENT_NAME
@ TIFF_DOCUMENT_NAME
Definition: tiff.h:52
TiffContext::fill_order
int fill_order
Definition: tiff.c:85
TIFF_MODEL_TRANSFORMATION
@ TIFF_MODEL_TRANSFORMATION
Definition: tiff.h:92
TIFF_TILE_LENGTH
@ TIFF_TILE_LENGTH
Definition: tiff.h:78
TIFF_MODEL
@ TIFF_MODEL
Definition: tiff.h:55
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:408
TiffContext::white_level
unsigned white_level
Definition: tiff.c:102
TiffContext::stripsizesoff
int stripsizesoff
Definition: tiff.c:110
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
line
Definition: graph2dot.c:48
attributes.h
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
TiffContext::planar
int planar
Definition: tiff.c:81
TIFF_COMPR
@ TIFF_COMPR
Definition: tiff.h:49
TIFF_HEIGHT
@ TIFF_HEIGHT
Definition: tiff.h:47
cmp_id_key
static int cmp_id_key(const void *id, const void *k)
Definition: tiff.c:173
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
tiff_decoder_class
static const AVClass tiff_decoder_class
Definition: tiff.c:2473
DNG_BLACK_LEVEL
@ DNG_BLACK_LEVEL
Definition: tiff.h:104
TIFF_T4OPTIONS
@ TIFF_T4OPTIONS
Definition: tiff.h:67
TIFF_PHOTOMETRIC_LOG_LUV
@ TIFF_PHOTOMETRIC_LOG_LUV
Definition: tiff.h:202
TiffContext::le
int le
Definition: tiff.c:78
CINEMADNG_REEL_NAME
@ CINEMADNG_REEL_NAME
Definition: tiff.h:120
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:704
TiffContext::subsampling
int subsampling[2]
Definition: tiff.c:82
TIFF_PAGE_NUMBER
@ TIFF_PAGE_NUMBER
Definition: tiff.h:70
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: tiff.c:1925
AV_PIX_FMT_RGB48BE
@ AV_PIX_FMT_RGB48BE
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:109
TIFF_PHOTOMETRIC_CFA
@ TIFF_PHOTOMETRIC_CFA
Definition: tiff.h:200
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ff_tget_long
unsigned ff_tget_long(GetByteContext *gb, int le)
Reads a long from the bytestream using given endianness.
Definition: tiff_common.c:51
TIFF_PHOTOMETRIC_BLACK_IS_ZERO
@ TIFF_PHOTOMETRIC_BLACK_IS_ZERO
Definition: tiff.h:191
TiffContext::tile_width
int tile_width
Definition: tiff.c:116
TiffContext::fax_opts
int fax_opts
Definition: tiff.c:83
ff_lzw_decode_init
int ff_lzw_decode_init(LZWState *p, int csize, const uint8_t *buf, int buf_size, int mode)
Initialize LZW decoder.
Definition: lzw.c:131
TiffContext::bppcount
unsigned int bppcount
Definition: tiff.c:75
unpack_gray
static void unpack_gray(TiffContext *s, AVFrame *p, const uint8_t *src, int lnum, int width, int bpp)
Definition: tiff.c:455
TiffContext::res
uint32_t res[4]
Definition: tiff.c:86
TIFF_MODEL_PIXEL_SCALE
@ TIFF_MODEL_PIXEL_SCALE
Definition: tiff.h:91
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
TIFF_PLANAR
@ TIFF_PLANAR
Definition: tiff.h:62
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AV_PIX_FMT_BAYER_GBRG8
@ AV_PIX_FMT_BAYER_GBRG8
bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples
Definition: pixfmt.h:287
TIFF_TYPE_TIFF
@ TIFF_TYPE_TIFF
TIFF image based on the TIFF 6.0 or TIFF/EP (ISO 12234-2) specifications.
Definition: tiff.h:36
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MJpegDecodeContext::bayer
int bayer
Definition: mjpegdec.h:75
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:358
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
planes
static const struct @493 planes[]
AVCodecContext::idct_algo
int idct_algo
IDCT algorithm, see FF_IDCT_* below.
Definition: avcodec.h:1526
TIFF_TYPE_DNG
@ TIFF_TYPE_DNG
Digital Negative (DNG) image.
Definition: tiff.h:38
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
DNG_VERSION
@ DNG_VERSION
Definition: tiff.h:101
TiffContext::stripoff
int stripoff
Definition: tiff.c:110
len
int len
Definition: vorbis_enc_data.h:426
AV_PIX_FMT_GBRPF32LE
@ AV_PIX_FMT_GBRPF32LE
IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian.
Definition: pixfmt.h:342
TIFF_PHOTOMETRIC_NONE
@ TIFF_PHOTOMETRIC_NONE
Definition: tiff.h:189
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
TIFF_CFA_PATTERN
@ TIFF_CFA_PATTERN
Definition: tiff.h:88
TIFF_STRIP_OFFS
@ TIFF_STRIP_OFFS
Definition: tiff.h:56
FF_CODEC_CAP_ICC_PROFILES
#define FF_CODEC_CAP_ICC_PROFILES
Codec supports embedded ICC profiles (AV_FRAME_DATA_ICC_PROFILE).
Definition: codec_internal.h:81
TIFF_TILE_WIDTH
@ TIFF_TILE_WIDTH
Definition: tiff.h:77
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
AV_PIX_FMT_GBRAP16LE
@ AV_PIX_FMT_GBRAP16LE
planar GBRA 4:4:4:4 64bpp, little-endian
Definition: pixfmt.h:214
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
tag
uint32_t tag
Definition: movenc.c:1911
ret
ret
Definition: filter_design.txt:187
TIFF_HOST_COMPUTER
@ TIFF_HOST_COMPUTER
Definition: tiff.h:74
DNG_WHITE_LEVEL
@ DNG_WHITE_LEVEL
Definition: tiff.h:105
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
TiffContext::palette_is_set
int palette_is_set
Definition: tiff.c:77
TIFF_BPP
@ TIFF_BPP
Definition: tiff.h:48
d65_white
static const float d65_white[3]
Definition: tiff.c:129
pos
unsigned int pos
Definition: spdifenc.c:414
get_geokey_name
static const char * get_geokey_name(int key)
Definition: tiff.c:144
TIFF_PHOTOMETRIC
@ TIFF_PHOTOMETRIC
Definition: tiff.h:50
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_tget_double
double ff_tget_double(GetByteContext *gb, int le)
Reads a double from the bytestream using given endianness.
Definition: tiff_common.c:57
TiffPhotometric
TiffPhotometric
list of TIFF, TIFF/AP and DNG PhotometricInterpretation (TIFF_PHOTOMETRIC) values
Definition: tiff.h:188
TiffContext::last_tag
unsigned last_tag
Definition: tiff.c:88
AVCodecContext
main external API structure.
Definition: avcodec.h:431
ADD_METADATA
#define ADD_METADATA(count, name, sep)
AV_PIX_FMT_RGBAF32BE
@ AV_PIX_FMT_RGBAF32BE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., big-endian.
Definition: pixfmt.h:423
TiffContext::sstype
int sstype
Definition: tiff.c:108
again
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
Definition: filter_design.txt:25
TIFF_PREDICTOR
@ TIFF_PREDICTOR
Definition: tiff.h:75
TIFF_RATIONAL
@ TIFF_RATIONAL
Definition: tiff_common.h:41
bytestream2_seek_p
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:236
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:688
TiffContext::lzw
LZWState * lzw
Definition: tiff.c:111
set_sar
static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
Definition: tiff.c:1231
TIFF_LZMA
@ TIFF_LZMA
Definition: tiff.h:136
tiff_unpack_fax
static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride, const uint8_t *src, int size, int width, int lines)
Definition: tiff.c:629
TIFF_GEO_KEY_DIRECTORY
@ TIFF_GEO_KEY_DIRECTORY
Definition: tiff.h:94
CINEMADNG_CAMERA_LABEL
@ CINEMADNG_CAMERA_LABEL
Definition: tiff.h:121
TiffContext::is_tiled
int is_tiled
Definition: tiff.c:114
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
ff_tdecode_header
int ff_tdecode_header(GetByteContext *gb, int *le, int *ifd_offset)
Decodes a TIFF header from the input bytestream and sets the endianness in *le and the offset to the ...
Definition: tiff_common.c:229
AV_PIX_FMT_RGBF32LE
@ AV_PIX_FMT_RGBF32LE
IEEE-754 single precision packed RGB 32:32:32, 96bpp, RGBRGB..., little-endian.
Definition: pixfmt.h:421
RET_GEOKEY_STR
#define RET_GEOKEY_STR(TYPE, array)
TIFF_YRES
@ TIFF_YRES
Definition: tiff.h:61
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
av_clip_uint16
#define av_clip_uint16
Definition: common.h:112
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
TIFF_ICC_PROFILE
@ TIFF_ICC_PROFILE
Definition: tiff.h:93
faxcompr.h
DNG_CAMERA_CALIBRATION2
@ DNG_CAMERA_CALIBRATION2
Definition: tiff.h:109
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:356
desc
const char * desc
Definition: libsvtav1.c:79
AV_PIX_FMT_RGBAF32LE
@ AV_PIX_FMT_RGBAF32LE
IEEE-754 single precision packed RGBA 32:32:32:32, 128bpp, RGBARGBA..., little-endian.
Definition: pixfmt.h:424
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_GRAY16LE
@ AV_PIX_FMT_GRAY16LE
Y , 16bpp, little-endian.
Definition: pixfmt.h:105
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
init_image
static int init_image(TiffContext *s, AVFrame *frame)
Definition: tiff.c:1042
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:265
free_geotags
static void free_geotags(TiffContext *const s)
Definition: tiff.c:136
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
TIFF_DEFLATE
@ TIFF_DEFLATE
Definition: tiff.h:135
TIFF_PHOTOMETRIC_RGB
@ TIFF_PHOTOMETRIC_RGB
Definition: tiff.h:192
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:512
TIFF_SUB_IFDS
@ TIFF_SUB_IFDS
Definition: tiff.h:81
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
dng_blit
static void av_always_inline dng_blit(TiffContext *s, uint8_t *dst, int dst_stride, const uint8_t *src, int src_stride, int width, int height, int is_single_comp, int is_u16, int odd_line)
Definition: tiff.c:313
tiff_unpack_strip
static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, const uint8_t *src, int size, int strip_start, int lines)
Definition: tiff.c:740
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:80
DNG_COLOR_MATRIX1
@ DNG_COLOR_MATRIX1
Definition: tiff.h:106
TiffContext::tile_byte_counts_offset
int tile_byte_counts_offset
Definition: tiff.c:115
ff_tadd_doubles_metadata
int ff_tadd_doubles_metadata(int count, const char *name, const char *sep, GetByteContext *gb, int le, AVDictionary **metadata)
Adds count doubles converted to a string into the metadata dictionary.
Definition: tiff_common.c:145
TiffContext::avctx_mjpeg
AVCodecContext * avctx_mjpeg
Definition: tiff.c:65
TIFF_XRES
@ TIFF_XRES
Definition: tiff.h:60
add_metadata
static int add_metadata(int count, int type, const char *name, const char *sep, TiffContext *s, AVFrame *frame)
Definition: tiff.c:272
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
TiffCompr
TiffCompr
list of TIFF, TIFF/EP and DNG compression types
Definition: tiff.h:125
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:79
TIFF_GEOG_ANGULAR_UNITS_GEOKEY
@ TIFF_GEOG_ANGULAR_UNITS_GEOKEY
Definition: tiff.h:149
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
TiffContext::cur_page
uint16_t cur_page
Definition: tiff.c:106
h
h
Definition: vp9dsp_template.c:2070
AV_CODEC_ID_TIFF
@ AV_CODEC_ID_TIFF
Definition: codec_id.h:148
avstring.h
type_sizes
static const uint8_t type_sizes[14]
sizes of various TIFF field types (string size = 100)
Definition: tiff_common.h:53
width
#define width
Definition: dsp.h:85
AV_PIX_FMT_GRAY12
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:509
TiffContext::predictor
int predictor
Definition: tiff.c:84
AV_PIX_FMT_BAYER_RGGB16
#define AV_PIX_FMT_BAYER_RGGB16
Definition: pixfmt.h:555
snprintf
#define snprintf
Definition: snprintf.h:34
ff_tget
unsigned ff_tget(GetByteContext *gb, int type, int le)
Reads a byte from the bytestream using given endianness.
Definition: tiff_common.c:64
TIFF_PHOTOMETRIC_SEPARATED
@ TIFF_PHOTOMETRIC_SEPARATED
Definition: tiff.h:195
TiffContext::strips
int strips
Definition: tiff.c:108
TIFF_PROJECTED_CS_TYPE_GEOKEY
@ TIFF_PROJECTED_CS_TYPE_GEOKEY
Definition: tiff.h:157
CINEMADNG_FRAME_RATE
@ CINEMADNG_FRAME_RATE
Definition: tiff.h:118
TiffContext::sub_ifd
uint32_t sub_ifd
Definition: tiff.c:105
AV_PIX_FMT_BAYER_GRBG8
@ AV_PIX_FMT_BAYER_GRBG8
bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples
Definition: pixfmt.h:288
src
#define src
Definition: vp8dsp.c:248
line
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted line
Definition: swscale.txt:40
TiffContext::yuv_line_size
unsigned int yuv_line_size
Definition: tiff.c:123
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
DNG_ANALOG_BALANCE
@ DNG_ANALOG_BALANCE
Definition: tiff.h:110
TIFF_GT_RASTER_TYPE_GEOKEY
@ TIFF_GT_RASTER_TYPE_GEOKEY
Definition: tiff.h:141