Go to the documentation of this file.
47 bytestream2_put_be32u(&
s->p, avctx->
width);
48 bytestream2_put_be32u(&
s->p, avctx->
height);
49 bytestream2_put_be32u(&
s->p,
s->depth);
50 bytestream2_put_be32u(&
s->p,
s->length);
51 bytestream2_put_be32u(&
s->p,
s->type);
52 bytestream2_put_be32u(&
s->p,
s->maptype);
53 bytestream2_put_be32u(&
s->p,
s->maplength);
57 const uint8_t *pixels,
58 const uint32_t *palette_data,
67 int len =
s->maplength / 3;
74 for (x = 0; x <
len; x++) {
75 uint32_t
pixel = palette_data[x];
77 bytestream2_put_byteu(&pb_r, (
pixel >> 16) & 0xFF);
78 bytestream2_put_byteu(&pb_g, (
pixel >> 8) & 0xFF);
79 bytestream2_put_byteu(&
s->p,
pixel & 0xFF);
88 uint8_t
value, value2;
93 #define GET_VALUE y >= avctx->height ? 0 : x >= len ? ptr[len-1] : ptr[x]
97 while (y < avctx->
height) {
103 ptr += linesize, y++;
112 ptr += linesize, y++;
119 bytestream2_put_byteu(&
s->p,
run - 1);
121 bytestream2_put_byteu(&
s->p,
value);
122 }
else if (
run == 1) {
123 bytestream2_put_byteu(&
s->p,
value);
125 bytestream2_put_be16u(&
s->p, (
value << 8) |
value);
131 for (y = 0; y < avctx->
height; y++) {
134 bytestream2_put_byteu(&
s->p, 0);
156 s->maplength = 3 * 256;
168 s->size = 32 +
s->maplength +
s->length *
s->type;
185 (
const uint32_t *)
frame->data[1],
196 #define OFFSET(x) offsetof(SUNRASTContext, x)
197 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVPixelFormat
Pixel format.
This structure describes decoded (raw) audio or video data.
int length
length (bytes) of image
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
@ AV_PIX_FMT_MONOWHITE
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
static void sunrast_image_write_header(AVCodecContext *avctx)
static const AVOption options[]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static void sunrast_image_write_image(AVCodecContext *avctx, const uint8_t *pixels, const uint32_t *palette_data, int linesize)
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
static enum AVPixelFormat pix_fmts[]
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
const char * av_default_item_name(void *ptr)
Return the context name.
static av_cold int sunrast_encode_init(AVCodecContext *avctx)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int sunrast_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
const AVCodec ff_sunrast_encoder
static av_always_inline void bytestream2_skip_p(PutByteContext *p, unsigned int size)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const AVClass sunrast_class
int maptype
type of colormap
main external API structure.
This structure stores compressed data.
int width
picture width / height.
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int maplength
length (bytes) of colormap
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.