Go to the documentation of this file.
29 #define CLIP(v) av_clip(v, 4, 1019)
30 #define CLIP8(v) av_clip(v, 1, 254)
32 #define WRITE_PIXELS(a, b, c) \
35 val |= (CLIP(*b++) << 10) | \
41 #define WRITE_PIXELS8(a, b, c) \
43 val = (CLIP8(*a++) << 2); \
44 val |= (CLIP8(*b++) << 12) | \
45 (CLIP8(*c++) << 22); \
58 for (
i = 0;
i <
width - 11;
i += 12) {
71 const uint16_t *v,
uint8_t *dst,
89 s->sample_factor_8 = 1;
90 s->sample_factor_10 = 1;
100 if (avctx->
width & 1) {
105 #if FF_API_CODED_FRAME
120 const AVFrame *pic,
int *got_packet)
123 int aligned_width = ((avctx->
width + 47) / 48) * 48;
124 int stride = aligned_width * 8 / 3;
125 int line_padding =
stride - ((avctx->
width * 8 + 11) / 12) * 4;
138 const uint16_t *y = (
const uint16_t *)pic->
data[0];
139 const uint16_t *
u = (
const uint16_t *)pic->
data[1];
140 const uint16_t *v = (
const uint16_t *)pic->
data[2];
142 const int sample_size = 6 *
s->sample_factor_10;
143 const int sample_w = avctx->
width / sample_size;
147 w = sample_w * sample_size;
148 s->pack_line_10(y,
u, v, dst,
w);
153 dst += sample_w * 16 *
s->sample_factor_10;
155 for (;
w < avctx->
width - 5;
w += 6) {
161 if (w < avctx->
width - 1) {
165 if (
w == avctx->
width - 2) {
170 if (w < avctx->
width - 3) {
180 memset(dst, 0, line_padding);
191 const int sample_size = 12 *
s->sample_factor_8;
192 const int sample_w = avctx->
width / sample_size;
196 w = sample_w * sample_size;
197 s->pack_line_8(y,
u, v, dst,
w);
202 dst += sample_w * 32 *
s->sample_factor_8;
204 for (;
w < avctx->
width - 5;
w += 6) {
210 if (w < avctx->
width - 1) {
214 if (
w == avctx->
width - 2) {
219 if (w < avctx->
width - 3) {
228 memset(dst, 0, line_padding);
238 if (side_data && side_data->
size) {
246 if (side_data && side_data->
size) {
int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
Get an estimated video bitrate based on frame size, frame rate and coded bits per pixel.
#define FF_ENABLE_DEPRECATION_WARNINGS
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
#define u(width, name, range_min, range_max)
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
This structure describes decoded (raw) audio or video data.
@ AV_PKT_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static enum AVPixelFormat pix_fmts[]
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
int64_t bit_rate
the average bitrate
@ AV_PICTURE_TYPE_I
Intra.
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
#define AV_PIX_FMT_YUV422P10
av_cold void ff_v210enc_init(V210EncContext *s)
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
enum AVPictureType pict_type
Picture type of the frame.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static av_cold int encode_init(AVCodecContext *avctx)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
const char const char void * val
int flags
A combination of AV_PKT_FLAG values.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
#define i(width, name, range_min, range_max)
@ AV_PKT_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
const char * name
Name of the codec implementation.
void ff_v210enc_init_x86(V210EncContext *s)
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
main external API structure.
#define WRITE_PIXELS8(a, b, c)
#define FF_DISABLE_DEPRECATION_WARNINGS
static void v210_planar_pack_10_c(const uint16_t *y, const uint16_t *u, const uint16_t *v, uint8_t *dst, ptrdiff_t width)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Structure to hold side data for an AVFrame.
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
#define WRITE_PIXELS(a, b, c)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static void v210_planar_pack_8_c(const uint8_t *y, const uint8_t *u, const uint8_t *v, uint8_t *dst, ptrdiff_t width)