Go to the documentation of this file.
44 if (*x +
run >=
s->width) {
45 int n =
s->width - *x;
46 memset(d + *x,
value, n);
61 int *x,
int *y,
int *plane,
int bits_per_plane)
64 int shift = *plane * bits_per_plane;
65 unsigned mask = ((1
U << bits_per_plane) - 1) <<
shift;
69 int pixels_per_value = 8/bits_per_plane;
75 for (j = 8-bits_per_plane; j >= 0; j -= bits_per_plane) {
78 while (xl ==
s->width) {
84 if (planel >=
s->nb_planes)
86 value <<= bits_per_plane;
87 mask <<= bits_per_plane;
90 if (
s->nb_planes == 1 &&
91 run*pixels_per_value >=
s->width &&
92 pixels_per_value < s->
width &&
93 s->width % pixels_per_value == 0
95 for (; xl < pixels_per_value; xl ++) {
96 j = (j < bits_per_plane ? 8 : j) - bits_per_plane;
100 run -=
s->width / pixels_per_value;
114 [0] = { 0, 3, 5, 7 },
115 [1] = { 0, 2, 4, 6 },
116 [2] = { 0, 3, 4, 7 },
117 [3] = { 0, 11, 13, 15 },
118 [4] = { 0, 10, 12, 14 },
119 [5] = { 0, 11, 12, 15 },
123 void *
data,
int *got_frame,
129 int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
137 if (bytestream2_get_le16u(&
s->g) != 0x1234)
140 s->width = bytestream2_get_le16u(&
s->g);
141 s->height = bytestream2_get_le16u(&
s->g);
143 tmp = bytestream2_get_byteu(&
s->g);
144 bits_per_plane =
tmp & 0xF;
145 s->nb_planes = (
tmp >> 4) + 1;
146 bpp = bits_per_plane *
s->nb_planes;
147 if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
152 if (bytestream2_peek_byte(&
s->g) == 0xFF || bpp == 1 || bpp == 4 || bpp == 8) {
154 etype = bytestream2_get_le16(&
s->g);
155 esize = bytestream2_get_le16(&
s->g);
167 if (
s->width != avctx->
width ||
s->height != avctx->
height) {
175 memset(
frame->data[0], 0,
s->height *
frame->linesize[0]);
177 frame->palette_has_changed = 1;
180 palette = (uint32_t*)
frame->data[1];
181 if (etype == 1 && esize > 1 && bytestream2_peek_byte(&
s->g) < 6) {
182 int idx = bytestream2_get_byte(&
s->g);
184 for (
i = 0;
i < npal;
i++)
186 }
else if (etype == 2) {
187 npal =
FFMIN(esize, 16);
188 for (
i = 0;
i < npal;
i++) {
189 int pal_idx = bytestream2_get_byte(&
s->g);
192 }
else if (etype == 3) {
193 npal =
FFMIN(esize, 16);
194 for (
i = 0;
i < npal;
i++) {
195 int pal_idx = bytestream2_get_byte(&
s->g);
198 }
else if (etype == 4 || etype == 5) {
199 npal =
FFMIN(esize / 3, 256);
200 for (
i = 0;
i < npal;
i++) {
201 palette[
i] = bytestream2_get_be24(&
s->g) << 2;
202 palette[
i] |= 0xFFU << 24 | palette[i] >> 6 & 0x30303;
207 palette[0] = 0xFF000000;
208 palette[1] = 0xFFFFFFFF;
209 }
else if (bpp == 2) {
211 for (
i = 0;
i < npal;
i++)
225 if (bytestream2_get_le16(&
s->g)) {
229 int stop_size, marker,
t1,
t2;
232 t2 = bytestream2_get_le16(&
s->g);
236 marker = bytestream2_get_byte(&
s->g);
238 while (plane < s->nb_planes &&
241 val = bytestream2_get_byte(&
s->g);
243 run = bytestream2_get_byte(&
s->g);
245 run = bytestream2_get_le16(&
s->g);
246 val = bytestream2_get_byte(&
s->g);
251 if (bits_per_plane == 8) {
261 if (
s->nb_planes - plane > 1)
264 if (plane < s->nb_planes && x < avctx->
width) {
265 int run = (y + 1) * avctx->
width - x;
266 if (bits_per_plane == 8)
static void picmemset(PicContext *s, AVFrame *frame, unsigned value, int run, int *x, int *y, int *plane, int bits_per_plane)
static const uint8_t cga_mode45_index[6][4]
const uint32_t ff_cga_palette[16]
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static av_cold int end(AVCodecContext *avctx)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
AVCodec ff_pictor_decoder
static double val(void *priv, double ch)
static const uint16_t mask[17]
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
const uint32_t ff_ega_palette[64]
@ AV_PICTURE_TYPE_I
Intra.
static void picmemset_8bpp(PicContext *s, AVFrame *frame, int value, int run, int *x, int *y)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define i(width, name, range_min, range_max)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
static int shift(int a, int b)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define avpriv_request_sample(...)
This structure stores compressed data.
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...