Go to the documentation of this file.
39 #define JP2_SIG_TYPE 0x6A502020
40 #define JP2_SIG_VALUE 0x0D0A870A
44 #define RGB_PIXEL_FORMATS AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, \
45 AV_PIX_FMT_RGB48, AV_PIX_FMT_RGBA64
47 #define GRAY_PIXEL_FORMATS AV_PIX_FMT_GRAY8, AV_PIX_FMT_YA8, \
48 AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, \
49 AV_PIX_FMT_GRAY16, AV_PIX_FMT_YA16
51 #define YUV_PIXEL_FORMATS AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUVA420P, \
52 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA422P, \
53 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA444P, \
54 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, \
55 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, \
56 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, \
57 AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, \
58 AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, \
59 AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14, \
60 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, \
61 AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16
63 #define XYZ_PIXEL_FORMATS AV_PIX_FMT_XYZ12
110 if (reader->
pos == reader->
size) {
111 return (OPJ_SIZE_T)-1;
113 remaining = reader->
size - reader->
pos;
114 if (nb_bytes > remaining) {
115 nb_bytes = remaining;
117 memcpy(out_buffer, reader->
buffer + reader->
pos, nb_bytes);
118 reader->
pos += (
int)nb_bytes;
126 if (reader->
pos == 0) {
127 return (OPJ_SIZE_T)-1;
129 if (nb_bytes + reader->
pos < 0) {
130 nb_bytes = -reader->
pos;
135 if (reader->
pos == reader->
size) {
136 return (OPJ_SIZE_T)-1;
138 remaining = reader->
size - reader->
pos;
139 if (nb_bytes > remaining) {
140 nb_bytes = remaining;
143 reader->
pos += (
int)nb_bytes;
150 if (nb_bytes < 0 || nb_bytes > reader->
size) {
153 reader->
pos = (
int)nb_bytes;
162 if (
desc->nb_components != image->numcomps) {
166 switch (
desc->nb_components) {
169 desc->comp[3].depth >= image->comps[3].prec &&
170 1 == image->comps[3].dx &&
171 1 == image->comps[3].dy;
174 desc->comp[2].depth >= image->comps[2].prec &&
175 1 <<
desc->log2_chroma_w == image->comps[2].dx &&
176 1 <<
desc->log2_chroma_h == image->comps[2].dy;
179 desc->comp[1].depth >= image->comps[1].prec &&
180 1 <<
desc->log2_chroma_w == image->comps[1].dx &&
181 1 <<
desc->log2_chroma_h == image->comps[1].dy;
184 desc->comp[0].depth >= image->comps[0].prec &&
185 1 == image->comps[0].dx &&
186 1 == image->comps[0].dy;
197 int possible_fmts_nb = 0;
199 switch (image->color_space) {
200 case OPJ_CLRSPC_SRGB:
204 case OPJ_CLRSPC_GRAY:
208 case OPJ_CLRSPC_SYCC:
220 return possible_fmts[
index];
229 int i, component_plane;
234 component_plane =
desc->comp[0].plane;
235 for (
i = 1;
i <
desc->nb_components;
i++)
236 if (component_plane !=
desc->comp[
i].plane)
244 for (y = 0; y < picture->
height; y++) {
248 for (
c = 0;
c < image->numcomps;
c++)
249 *img_ptr++ = 0x80 * image->comps[
c].sgnd + image->comps[
c].data[
index];
258 for (x = 0; x < image->numcomps; x++)
261 for (y = 0; y < picture->
height; y++) {
263 img_ptr = (uint16_t *) (picture->
data[0] + y * picture->
linesize[0]);
265 for (
c = 0;
c < image->numcomps;
c++)
266 *img_ptr++ = (1 << image->comps[
c].prec - 1) * image->comps[
c].sgnd +
277 comp_data = image->comps[
index].data;
278 for (y = 0; y < image->comps[
index].h; y++) {
280 for (x = 0; x < image->comps[
index].w; x++) {
281 *img_ptr = 0x80 * image->comps[
index].sgnd + *comp_data;
295 for (x = 0; x < image->numcomps; x++)
299 comp_data = image->comps[
index].data;
300 for (y = 0; y < image->comps[
index].h; y++) {
302 for (x = 0; x < image->comps[
index].w; x++) {
303 *img_ptr = (1 << image->comps[
index].prec - 1) * image->comps[
index].sgnd +
316 opj_set_default_decoder_parameters(&
ctx->dec_params);
321 void *
data,
int *got_frame,
325 int buf_size = avpkt->
size;
334 opj_image_t *image =
NULL;
336 opj_codec_t *dec =
NULL;
337 opj_stream_t *stream =
NULL;
345 dec = opj_create_decompress(OPJ_CODEC_JP2);
351 dec = opj_create_decompress(OPJ_CODEC_J2K);
368 ctx->dec_params.cp_layer =
ctx->lowqual;
369 ctx->dec_params.cp_reduce = avctx->
lowres;
372 opj_setup_decoder(dec, &
ctx->dec_params);
374 stream = opj_stream_default_create(OPJ_STREAM_READ);
378 "Codestream could not be opened for reading.\n");
386 opj_stream_set_user_data(stream, &reader,
NULL);
387 opj_stream_set_user_data_length(stream, avpkt->
size);
389 ret = !opj_read_header(stream, dec, &image);
397 width = image->x1 - image->x0;
398 height = image->y1 - image->y0;
416 for (
i = 0;
i < image->numcomps;
i++)
423 ret = !opj_decode(dec, stream, image);
431 for (
i = 0;
i < image->numcomps;
i++) {
432 if (!image->comps[
i].data) {
434 "Image component %d contains no data.\n",
i);
441 pixel_size =
desc->comp[0].step;
444 switch (pixel_size) {
483 opj_image_destroy(image);
484 opj_stream_destroy(stream);
485 opj_destroy_codec(dec);
489 #define OFFSET(x) offsetof(LibOpenJPEGContext, x)
490 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
493 {
"lowqual",
"Limit the number of layers used for decoding",
506 .
name =
"libopenjpeg",
516 .wrapper_name =
"libopenjpeg",
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
static void libopenjpeg_copy_to_packed8(AVFrame *picture, opj_image_t *image)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
static enum AVPixelFormat libopenjpeg_yuv_pix_fmts[]
static enum AVPixelFormat libopenjpeg_gray_pix_fmts[]
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define XYZ_PIXEL_FORMATS
static av_cold int libopenjpeg_decode_init(AVCodecContext *avctx)
static const AVOption options[]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
int key_frame
1 -> keyframe, 0-> not
static void warning_callback(const char *msg, void *data)
#define AV_PIX_FMT_GRAY16
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static OPJ_OFF_T stream_skip(OPJ_OFF_T nb_bytes, void *user_data)
static enum AVPixelFormat libopenjpeg_rgb_pix_fmts[]
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int libopenjpeg_ispacked(enum AVPixelFormat pix_fmt)
static enum AVPixelFormat pix_fmt
#define GRAY_PIXEL_FORMATS
static void info_callback(const char *msg, void *data)
#define RGB_PIXEL_FORMATS
static enum AVPixelFormat libopenjpeg_all_pix_fmts[]
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static const AVClass openjpeg_class
static OPJ_SIZE_T stream_read(void *out_buffer, OPJ_SIZE_T nb_bytes, void *user_data)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static void libopenjpeg_copy_to_packed16(AVFrame *picture, opj_image_t *image)
static int FUNC() user_data(CodedBitstreamContext *ctx, RWContext *rw, MPEG2RawUserData *current)
static int libopenjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define AVERROR_EXTERNAL
Generic error in an external library.
static void libopenjpeg_copyto8(AVFrame *picture, opj_image_t *image)
#define i(width, name, range_min, range_max)
opj_dparameters_t dec_params
static enum AVPixelFormat libopenjpeg_guess_pix_fmt(const opj_image_t *image)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FF_ARRAY_ELEMS(a)
main external API structure.
static int libopenjpeg_matches_pix_fmt(const opj_image_t *image, enum AVPixelFormat pix_fmt)
static void error_callback(const char *msg, void *data)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
This structure stores compressed data.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static OPJ_BOOL stream_seek(OPJ_OFF_T nb_bytes, void *user_data)
#define YUV_PIXEL_FORMATS
AVCodec ff_libopenjpeg_decoder
static void libopenjpeg_copyto16(AVFrame *picture, opj_image_t *image)