00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024 #include "avcodec.h"
00025 #include "bytestream.h"
00026
00027 static av_cold int encode_init(AVCodecContext *avctx)
00028 {
00029 if (avctx->width & 1) {
00030 av_log(avctx, AV_LOG_ERROR, "v210 needs even width\n");
00031 return -1;
00032 }
00033
00034 if (avctx->pix_fmt != PIX_FMT_YUV422P10) {
00035 av_log(avctx, AV_LOG_ERROR, "v210 needs YUV422P10\n");
00036 return -1;
00037 }
00038
00039 if (avctx->bits_per_raw_sample != 10)
00040 av_log(avctx, AV_LOG_WARNING, "bits per raw sample: %d != 10-bit\n",
00041 avctx->bits_per_raw_sample);
00042
00043 avctx->coded_frame = avcodec_alloc_frame();
00044
00045 avctx->coded_frame->key_frame = 1;
00046 avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
00047
00048 return 0;
00049 }
00050
00051 static int encode_frame(AVCodecContext *avctx, unsigned char *buf,
00052 int buf_size, void *data)
00053 {
00054 const AVFrame *pic = data;
00055 int aligned_width = ((avctx->width + 47) / 48) * 48;
00056 int stride = aligned_width * 8 / 3;
00057 int h, w;
00058 const uint16_t *y = (const uint16_t*)pic->data[0];
00059 const uint16_t *u = (const uint16_t*)pic->data[1];
00060 const uint16_t *v = (const uint16_t*)pic->data[2];
00061 uint8_t *p = buf;
00062 uint8_t *pdst = buf;
00063
00064 if (buf_size < aligned_width * avctx->height * 8 / 3) {
00065 av_log(avctx, AV_LOG_ERROR, "output buffer too small\n");
00066 return -1;
00067 }
00068
00069 #define CLIP(v) av_clip(v, 4, 1019)
00070
00071 #define WRITE_PIXELS(a, b, c) \
00072 do { \
00073 val = CLIP(*a++); \
00074 val |= (CLIP(*b++) << 10) | \
00075 (CLIP(*c++) << 20); \
00076 bytestream_put_le32(&p, val); \
00077 } while (0)
00078
00079 for (h = 0; h < avctx->height; h++) {
00080 uint32_t val;
00081 for (w = 0; w < avctx->width - 5; w += 6) {
00082 WRITE_PIXELS(u, y, v);
00083 WRITE_PIXELS(y, u, y);
00084 WRITE_PIXELS(v, y, u);
00085 WRITE_PIXELS(y, v, y);
00086 }
00087 if (w < avctx->width - 1) {
00088 WRITE_PIXELS(u, y, v);
00089
00090 val = CLIP(*y++);
00091 if (w == avctx->width - 2)
00092 bytestream_put_le32(&p, val);
00093 if (w < avctx->width - 3) {
00094 val |= (CLIP(*u++) << 10) | (CLIP(*y++) << 20);
00095 bytestream_put_le32(&p, val);
00096
00097 val = CLIP(*v++) | (CLIP(*y++) << 10);
00098 bytestream_put_le32(&p, val);
00099 }
00100 }
00101
00102 pdst += stride;
00103 memset(p, 0, pdst - p);
00104 p = pdst;
00105 y += pic->linesize[0] / 2 - avctx->width;
00106 u += pic->linesize[1] / 2 - avctx->width / 2;
00107 v += pic->linesize[2] / 2 - avctx->width / 2;
00108 }
00109
00110 return p - buf;
00111 }
00112
00113 static av_cold int encode_close(AVCodecContext *avctx)
00114 {
00115 av_freep(&avctx->coded_frame);
00116
00117 return 0;
00118 }
00119
00120 AVCodec ff_v210_encoder = {
00121 .name = "v210",
00122 .type = AVMEDIA_TYPE_VIDEO,
00123 .id = CODEC_ID_V210,
00124 .init = encode_init,
00125 .encode = encode_frame,
00126 .close = encode_close,
00127 .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P10, PIX_FMT_NONE},
00128 .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
00129 };