Go to the documentation of this file.
28 #define MAXELEN 0x7fff
37 c = bytestream2_get_byte(gb);
50 const float v =
val / 256.f;
63 scanline[0] = bytestream2_get_byte(gb);
64 scanline[1] = bytestream2_get_byte(gb);
65 scanline[2] = bytestream2_get_byte(gb);
66 scanline[3] = bytestream2_get_byte(gb);
68 if (scanline[0] == 1 &&
71 int run = scanline[3];
72 for (
int i =
run << rshift;
i > 0 &&
w > 0 && scanline >= start + 4;
i--) {
73 memcpy(scanline, scanline - 4, 4);
101 if (memcmp(
"#?RADIANCE\n",
line, 11))
106 if (sscanf(
line,
"PIXASPECT=%f\n", &sar) == 1)
108 }
while (
line[0] !=
'\n' &&
line[0]);
140 for (
int y = 0; y <
height; y++) {
141 float *dst_r = (
float *)(p->
data[2] + y * p->
linesize[2]);
142 float *dst_g = (
float *)(p->
data[0] + y * p->
linesize[0]);
143 float *dst_b = (
float *)(p->
data[1] + y * p->
linesize[1]);
147 if (width < MINELEN || width >
MAXELEN) {
154 i = bytestream2_peek_byte(&gb);
163 scanline[1] = bytestream2_get_byte(&gb);
164 scanline[2] = bytestream2_get_byte(&gb);
165 i = bytestream2_get_byte(&gb);
167 if (scanline[1] != 2 || scanline[2] & 128) {
176 for (
int i = 0;
i < 4;
i++) {
180 int run = bytestream2_get_byte(&gb);
182 uint8_t
val = bytestream2_get_byte(&gb);
190 }
else if (
run > 0) {
194 scanline[j] = bytestream2_get_byte(&gb);
202 for (
int x = 0; x <
width; x++) {
206 memcpy(rgbe, p->
data[0] + y * p->
linesize[0] + x * 4, 4);
207 expo = rgbe[3] - 128;
209 dst_r[x] =
convert(expo, rgbe[0]);
210 dst_b[x] =
convert(expo, rgbe[2]);
211 dst_g[x] =
convert(expo, rgbe[1]);
This structure describes decoded (raw) audio or video data.
static int decompress(uint8_t *scanline, int w, GetByteContext *gb, const uint8_t *start)
static float convert(int expo, int val)
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
const FFCodec ff_hdr_decoder
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
AVCodec p
The public AVCodec.
enum AVDiscard skip_frame
Skip decoding for selected frames.
static double val(void *priv, double ch)
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
#define FF_CODEC_DECODE_CB(func)
static int hdr_get_line(GetByteContext *gb, uint8_t *buffer, int size)
#define CODEC_LONG_NAME(str)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
@ AV_PICTURE_TYPE_I
Intra.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_GBRPF32
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
#define i(width, name, range_min, range_max)
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
const char * name
Name of the codec implementation.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
main external API structure.
static int hdr_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
This structure stores compressed data.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
@ AV_CODEC_ID_RADIANCE_HDR
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.