35 #if HAVE_OPENJPEG_2_1_OPENJPEG_H
36 # include <openjpeg-2.1/openjpeg.h>
37 #elif HAVE_OPENJPEG_2_0_OPENJPEG_H
38 # include <openjpeg-2.0/openjpeg.h>
39 #elif HAVE_OPENJPEG_1_5_OPENJPEG_H
40 # include <openjpeg-1.5/openjpeg.h>
42 # include <openjpeg.h>
45 #if HAVE_OPENJPEG_2_1_OPENJPEG_H || HAVE_OPENJPEG_2_0_OPENJPEG_H
46 # define OPENJPEG_MAJOR_VERSION 2
47 # define OPJ(x) OPJ_##x
49 # define OPENJPEG_MAJOR_VERSION 1
55 #if OPENJPEG_MAJOR_VERSION == 1
57 #endif // OPENJPEG_MAJOR_VERSION == 1
59 #if OPENJPEG_MAJOR_VERSION == 1
61 #endif // OPENJPEG_MAJOR_VERSION == 1
88 #if OPENJPEG_MAJOR_VERSION == 2
89 typedef struct PacketWriter {
94 static OPJ_SIZE_T stream_write(
void *out_buffer, OPJ_SIZE_T nb_bytes,
void *
user_data)
98 int remaining = packet->
size - writer->pos;
99 if (nb_bytes > remaining) {
100 OPJ_SIZE_T needed = nb_bytes - remaining;
102 if (needed > max_growth) {
103 return (OPJ_SIZE_T)-1;
106 return (OPJ_SIZE_T)-1;
109 memcpy(packet->
data + writer->pos, out_buffer, nb_bytes);
110 writer->pos += (int)nb_bytes;
114 static OPJ_OFF_T stream_skip(OPJ_OFF_T nb_bytes,
void *user_data)
119 if (writer->pos == 0) {
120 return (OPJ_SIZE_T)-1;
122 if (nb_bytes + writer->pos < 0) {
123 nb_bytes = -writer->pos;
126 int remaining = packet->
size - writer->pos;
127 if (nb_bytes > remaining) {
128 OPJ_SIZE_T needed = nb_bytes - remaining;
130 if (needed > max_growth) {
131 return (OPJ_SIZE_T)-1;
134 return (OPJ_SIZE_T)-1;
138 writer->pos += (int)nb_bytes;
142 static OPJ_BOOL
stream_seek(OPJ_OFF_T nb_bytes,
void *user_data)
149 if (nb_bytes > packet->
size) {
155 writer->pos = (int)nb_bytes;
158 #endif // OPENJPEG_MAJOR_VERSION == 2
173 p->image_offset_x0 = 0;
174 p->image_offset_y0 = 0;
177 p->cblockw_init = 32;
178 p->cblockh_init = 32;
182 p->prog_order =
OPJ(CPRL);
188 p->subsampling_dx = 1;
189 p->subsampling_dy = 1;
200 opj_image_cmptparm_t cmptparm[4] = {{0}};
206 OPJ_COLOR_SPACE color_space =
OPJ(CLRSPC_UNKNOWN);
208 sub_dx[0] = sub_dx[3] = 1;
209 sub_dy[0] = sub_dy[3] = 1;
220 color_space =
OPJ(CLRSPC_GRAY);
233 color_space =
OPJ(CLRSPC_SRGB);
268 color_space =
OPJ(CLRSPC_SYCC);
272 "The requested pixel format '%s' is not supported\n",
277 for (i = 0; i < numcomps; i++) {
280 cmptparm[i].sgnd = 0;
281 cmptparm[i].dx = sub_dx[i];
282 cmptparm[i].dy = sub_dy[i];
283 cmptparm[i].w = (avctx->
width + sub_dx[i] - 1) / sub_dx[i];
284 cmptparm[i].h = (avctx->
height + sub_dy[i] - 1) / sub_dy[i];
287 img = opj_image_create(numcomps, cmptparm, color_space);
296 img->x1 = (avctx->
width - 1) * parameters->subsampling_dx + 1;
297 img->y1 = (avctx->
height - 1) * parameters->subsampling_dy + 1;
307 opj_set_default_encoder_parameters(&ctx->
enc_params);
309 #if HAVE_OPENJPEG_2_1_OPENJPEG_H
311 case OPJ_CINEMA2K_24:
313 ctx->
enc_params.max_cs_size = OPJ_CINEMA_24_CS;
314 ctx->
enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
316 case OPJ_CINEMA2K_48:
318 ctx->
enc_params.max_cs_size = OPJ_CINEMA_48_CS;
319 ctx->
enc_params.max_comp_size = OPJ_CINEMA_48_COMP;
321 case OPJ_CINEMA4K_24:
323 ctx->
enc_params.max_cs_size = OPJ_CINEMA_24_CS;
324 ctx->
enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
330 if (ctx->
enc_params.rsiz == OPJ_PROFILE_CINEMA_4K) {
337 if (ctx->
enc_params.rsiz == OPJ_PROFILE_CINEMA_2K) {
347 "Invalid parameter pairing: cinema_mode and profile conflict.\n");
374 #if OPENJPEG_MAJOR_VERSION == 1
381 #endif // OPENJPEG_MAJOR_VERSION == 1
386 #if OPENJPEG_MAJOR_VERSION == 1
387 opj_image_destroy(ctx->
image);
389 #endif // OPENJPEG_MAJOR_VERSION == 1
400 const int numcomps = image->numcomps;
402 for (compno = 0; compno < numcomps; ++compno) {
403 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
409 for (compno = 0; compno < numcomps; ++compno) {
410 for (y = 0; y < avctx->
height; ++y) {
411 image_line = image->comps[compno].data + y * image->comps[compno].w;
412 frame_index = y * frame->
linesize[0] + compno;
413 for (x = 0; x < avctx->
width; ++x) {
414 image_line[x] = frame->
data[0][frame_index];
415 frame_index += numcomps;
417 for (; x < image->comps[compno].w; ++x) {
418 image_line[x] = image_line[x - 1];
421 for (; y < image->comps[compno].h; ++y) {
422 image_line = image->comps[compno].data + y * image->comps[compno].w;
423 for (x = 0; x < image->comps[compno].w; ++x) {
424 image_line[x] = image_line[x - (int)image->comps[compno].w];
439 const int numcomps = image->numcomps;
440 uint16_t *frame_ptr = (uint16_t *)frame->
data[0];
442 for (compno = 0; compno < numcomps; ++compno) {
443 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
449 for (compno = 0; compno < numcomps; ++compno) {
450 for (y = 0; y < avctx->
height; ++y) {
451 image_line = image->comps[compno].data + y * image->comps[compno].w;
452 frame_index = y * (frame->
linesize[0] / 2) + compno;
453 for (x = 0; x < avctx->
width; ++x) {
454 image_line[x] = frame_ptr[frame_index] >> 4;
455 frame_index += numcomps;
457 for (; x < image->comps[compno].w; ++x) {
458 image_line[x] = image_line[x - 1];
461 for (; y < image->comps[compno].h; ++y) {
462 image_line = image->comps[compno].data + y * image->comps[compno].w;
463 for (x = 0; x < image->comps[compno].w; ++x) {
464 image_line[x] = image_line[x - (int)image->comps[compno].w];
479 const int numcomps = image->numcomps;
480 uint16_t *frame_ptr = (uint16_t*)frame->
data[0];
482 for (compno = 0; compno < numcomps; ++compno) {
483 if (image->comps[compno].w > frame->
linesize[0] / numcomps) {
489 for (compno = 0; compno < numcomps; ++compno) {
490 for (y = 0; y < avctx->
height; ++y) {
491 image_line = image->comps[compno].data + y * image->comps[compno].w;
492 frame_index = y * (frame->
linesize[0] / 2) + compno;
493 for (x = 0; x < avctx->
width; ++x) {
494 image_line[x] = frame_ptr[frame_index];
495 frame_index += numcomps;
497 for (; x < image->comps[compno].w; ++x) {
498 image_line[x] = image_line[x - 1];
501 for (; y < image->comps[compno].h; ++y) {
502 image_line = image->comps[compno].data + y * image->comps[compno].w;
503 for (x = 0; x < image->comps[compno].w; ++x) {
504 image_line[x] = image_line[x - (int)image->comps[compno].w];
521 const int numcomps = image->numcomps;
523 for (compno = 0; compno < numcomps; ++compno) {
524 if (image->comps[compno].w > frame->
linesize[compno]) {
530 for (compno = 0; compno < numcomps; ++compno) {
531 width = (avctx->
width + image->comps[compno].dx - 1) / image->comps[compno].dx;
532 height = (avctx->
height + image->comps[compno].dy - 1) / image->comps[compno].dy;
533 for (y = 0; y <
height; ++y) {
534 image_line = image->comps[compno].data + y * image->comps[compno].w;
535 frame_index = y * frame->
linesize[compno];
536 for (x = 0; x <
width; ++x)
537 image_line[x] = frame->
data[compno][frame_index++];
538 for (; x < image->comps[compno].w; ++x) {
539 image_line[x] = image_line[x - 1];
542 for (; y < image->comps[compno].h; ++y) {
543 image_line = image->comps[compno].data + y * image->comps[compno].w;
544 for (x = 0; x < image->comps[compno].w; ++x) {
545 image_line[x] = image_line[x - (int)image->comps[compno].w];
562 const int numcomps = image->numcomps;
565 for (compno = 0; compno < numcomps; ++compno) {
566 if (image->comps[compno].w > frame->
linesize[compno]) {
572 for (compno = 0; compno < numcomps; ++compno) {
573 width = (avctx->
width + image->comps[compno].dx - 1) / image->comps[compno].dx;
574 height = (avctx->
height + image->comps[compno].dy - 1) / image->comps[compno].dy;
575 frame_ptr = (uint16_t *)frame->
data[compno];
577 image_line = image->comps[compno].data + y * image->comps[compno].w;
578 frame_index = y * (frame->
linesize[compno] / 2);
579 for (x = 0; x <
width; ++x)
580 image_line[x] = frame_ptr[frame_index++];
581 for (; x < image->comps[compno].w; ++x) {
582 image_line[x] = image_line[x - 1];
585 for (; y < image->comps[compno].h; ++y) {
586 image_line = image->comps[compno].data + y * image->comps[compno].w;
587 for (x = 0; x < image->comps[compno].w; ++x) {
588 image_line[x] = image_line[x - (int)image->comps[compno].w];
603 #if OPENJPEG_MAJOR_VERSION == 1
604 opj_image_t *image = ctx->
image;
605 opj_cinfo_t *compress =
NULL;
606 opj_cio_t *stream =
NULL;
608 #else // OPENJPEG_MAJOR_VERSION == 2
609 PacketWriter writer = { 0 };
610 opj_codec_t *compress =
NULL;
611 opj_stream_t *stream =
NULL;
618 #endif // OPENJPEG_MAJOR_VERSION == 1
699 "The frame's pixel format '%s' is not supported\n",
708 "Could not copy the frame data to the internal image buffer\n");
713 #if OPENJPEG_MAJOR_VERSION == 2
717 #endif // OPENJPEG_MAJOR_VERSION == 2
719 compress = opj_create_compress(ctx->
format);
726 #if OPENJPEG_MAJOR_VERSION == 1
727 opj_setup_encoder(compress, &ctx->
enc_params, image);
728 stream = opj_cio_open((opj_common_ptr) compress,
NULL, 0);
729 #else // OPENJPEG_MAJOR_VERSION == 2
738 if (!opj_setup_encoder(compress, &ctx->
enc_params, image)) {
743 stream = opj_stream_default_create(OPJ_STREAM_WRITE);
744 #endif // OPENJPEG_MAJOR_VERSION == 1
751 #if OPENJPEG_MAJOR_VERSION == 1
756 opj_set_event_mgr((opj_common_ptr) compress, &ctx->
event_mgr, avctx);
757 if (!opj_encode(compress, stream, image,
NULL)) {
763 len = cio_tell(stream);
768 memcpy(pkt->
data, stream->buffer, len);
769 #else // OPENJPEG_MAJOR_VERSION == 2
771 opj_stream_set_write_function(stream, stream_write);
772 opj_stream_set_skip_function(stream, stream_skip);
774 #if HAVE_OPENJPEG_2_1_OPENJPEG_H
775 opj_stream_set_user_data(stream, &writer,
NULL);
776 #elif HAVE_OPENJPEG_2_0_OPENJPEG_H
777 opj_stream_set_user_data(stream, &writer);
779 #error Missing call to opj_stream_set_user_data
782 if (!opj_start_compress(compress, image, stream) ||
783 !opj_encode(compress, stream) ||
784 !opj_end_compress(compress, stream)) {
791 #endif // OPENJPEG_MAJOR_VERSION == 1
798 #if OPENJPEG_MAJOR_VERSION == 2
799 opj_stream_destroy(stream);
800 opj_destroy_codec(compress);
801 opj_image_destroy(image);
803 opj_cio_close(stream);
804 opj_destroy_compress(compress);
811 #if OPENJPEG_MAJOR_VERSION == 1
814 opj_image_destroy(ctx->
image);
816 #endif // OPENJPEG_MAJOR_VERSION == 1
820 #define OFFSET(x) offsetof(LibOpenJPEGContext, x)
821 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
857 .
name =
"libopenjpeg",
#define AV_PIX_FMT_YUVA422P16
static int libopenjpeg_copy_unpacked16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
8 bits gray, 8 bits alpha
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
#define LIBAVUTIL_VERSION_INT
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_shrink_packet(AVPacket *pkt, int size)
Reduce packet size, correctly zeroing padding.
static av_cold int init(AVCodecContext *avctx)
#define AV_PIX_FMT_RGBA64
AVCodec ff_libopenjpeg_encoder
#define AV_PIX_FMT_GBRP10
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static const AVClass openjpeg_class
#define AV_PIX_FMT_YUV420P12
static int libopenjpeg_copy_packed8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static av_cold int libopenjpeg_encode_close(AVCodecContext *avctx)
#define AV_CODEC_CAP_INTRA_ONLY
Codec is intra only.
opj_cparameters_t enc_params
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static int libopenjpeg_copy_unpacked8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static void cinema_parameters(opj_cparameters_t *p)
#define AV_PIX_FMT_YUVA420P9
static void warning_callback(const char *msg, void *data)
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
static opj_image_t * mj2_create_image(AVCodecContext *avctx, opj_cparameters_t *parameters)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
AVS_Value void * user_data
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUVA444P16
simple assert() macros that are a bit more flexible than ISO C assert().
static int libopenjpeg_copy_packed16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
int flags
A combination of AV_PKT_FLAG values.
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define AV_PIX_FMT_YUV422P9
uint8_t nb_components
The number of components each pixel has, (1-4)
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
int width
picture width / height.
#define AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
static av_cold int libopenjpeg_encode_init(AVCodecContext *avctx)
#define AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P14
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
static int libopenjpeg_copy_packed12(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static const char * format
Describe the class of an AVClass context structure.
opj_event_mgr_t event_mgr
#define AV_PIX_FMT_YUV420P9
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static void info_callback(const char *msg, void *data)
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
int global_quality
Global quality for codecs which cannot change it per frame.
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
common internal and external API header
#define AV_PIX_FMT_YUVA444P9
int av_grow_packet(AVPacket *pkt, int grow_by)
Increase packet size, correctly zeroing padding.
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
static const AVOption options[]
static void error_callback(const char *msg, void *data)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int depth
Number of bits in the component.
#define AVERROR_EXTERNAL
Generic error in an external library.
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_PIX_FMT_YUV422P16