Go to the documentation of this file.
45 #define DNX10BIT_QMAT_SHIFT 18
46 #define RC_VARIANCE 1 // use variance or ssd for fast rc
47 #define LAMBDA_FRAC_BITS 10
49 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
51 {
"nitris_compat",
"encode with Avid Nitris compatibility",
53 {
"ibias",
"intra quant bias",
55 { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
60 0, 0,
VE, .unit =
"profile" },
62 0, 0,
VE, .unit =
"profile" },
64 0, 0,
VE, .unit =
"profile" },
66 0, 0,
VE, .unit =
"profile" },
68 0, 0,
VE, .unit =
"profile" },
70 0, 0,
VE, .unit =
"profile" },
82 const uint8_t *pixels,
86 for (
i = 0;
i < 4;
i++) {
106 const uint8_t *pixels,
109 memcpy(
block + 0 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
110 memcpy(
block + 7 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
111 memcpy(
block + 1 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
112 memcpy(
block + 6 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
113 memcpy(
block + 2 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
114 memcpy(
block + 5 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
115 memcpy(
block + 3 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
116 memcpy(
block + 4 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
122 int i, j,
level, last_non_zero, start_i;
124 const uint8_t *scantable=
ctx->intra_scantable.scantable;
127 unsigned int threshold1, threshold2;
134 qmat = n < 4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
135 bias=
ctx->intra_quant_bias * (1 << (16 - 8));
136 threshold1 = (1 << 16) -
bias - 1;
137 threshold2 = (threshold1 << 1);
139 for (
i = 63;
i >= start_i;
i--) {
143 if (((
unsigned)(
level + threshold1)) > threshold2) {
151 for (
i = start_i;
i <= last_non_zero;
i++) {
155 if (((
unsigned)(
level + threshold1)) > threshold2) {
173 scantable, last_non_zero);
175 return last_non_zero;
181 const uint8_t *scantable=
ctx->intra_scantable.scantable;
182 const int *qmat = n<4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
183 int last_non_zero = 0;
191 for (
i = 1;
i < 64; ++
i) {
192 int j = scantable[
i];
204 scantable, last_non_zero);
206 return last_non_zero;
212 int max_level = 1 << (
ctx->bit_depth + 2);
219 ctx->vlc_codes =
ctx->orig_vlc_codes + max_level * 2;
220 ctx->vlc_bits =
ctx->orig_vlc_bits + max_level * 2;
228 offset = (alevel - 1) >> 6;
231 for (j = 0; j < 257; j++) {
232 if (
ctx->cid_table->ac_info[2*j+0] >> 1 == alevel &&
234 (!
run || (
ctx->cid_table->ac_info[2*j+1] & 2) &&
run)) {
238 (
ctx->cid_table->ac_codes[j] << 1) | (sign & 1);
239 ctx->vlc_bits[
index] =
ctx->cid_table->ac_bits[j] + 1;
241 ctx->vlc_codes[
index] =
ctx->cid_table->ac_codes[j];
251 ctx->vlc_bits[
index] +=
ctx->cid_table->index_bits;
255 for (
i = 0;
i < 62;
i++) {
256 int run =
ctx->cid_table->run[
i];
258 ctx->run_codes[
run] =
ctx->cid_table->run_codes[
i];
259 ctx->run_bits[
run] =
ctx->cid_table->run_bits[
i];
267 uint16_t weight_matrix[64] = { 1, };
269 const uint8_t *luma_weight_table =
ctx->cid_table->luma_weight;
270 const uint8_t *chroma_weight_table =
ctx->cid_table->chroma_weight;
278 if (
ctx->bit_depth == 8) {
279 for (
i = 1;
i < 64;
i++) {
281 weight_matrix[j] =
ctx->cid_table->luma_weight[
i];
284 weight_matrix,
ctx->intra_quant_bias, 1,
285 ctx->m.avctx->qmax, 1);
286 for (
i = 1;
i < 64;
i++) {
288 weight_matrix[j] =
ctx->cid_table->chroma_weight[
i];
291 weight_matrix,
ctx->intra_quant_bias, 1,
292 ctx->m.avctx->qmax, 1);
294 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
295 for (
i = 0;
i < 64;
i++) {
296 ctx->qmatrix_l[qscale][
i] <<= 2;
297 ctx->qmatrix_c[qscale][
i] <<= 2;
298 ctx->qmatrix_l16[qscale][0][
i] <<= 2;
299 ctx->qmatrix_l16[qscale][1][
i] <<= 2;
300 ctx->qmatrix_c16[qscale][0][
i] <<= 2;
301 ctx->qmatrix_c16[qscale][1][
i] <<= 2;
306 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
307 for (
i = 1;
i < 64;
i++) {
322 (qscale * luma_weight_table[
i]);
324 (qscale * chroma_weight_table[
i]);
329 ctx->m.q_chroma_intra_matrix16 =
ctx->qmatrix_c16;
330 ctx->m.q_chroma_intra_matrix =
ctx->qmatrix_c;
331 ctx->m.q_intra_matrix16 =
ctx->qmatrix_l16;
332 ctx->m.q_intra_matrix =
ctx->qmatrix_l;
347 ctx->frame_bits = (
ctx->coding_unit_size -
348 ctx->data_offset - 4 -
ctx->min_padding) * 8;
375 "pixel format is incompatible with DNxHD profile\n");
381 "pixel format is incompatible with DNxHR HQX profile\n");
389 "pixel format is incompatible with DNxHR LB/SQ/HQ profile\n");
398 "video parameters incompatible with DNxHD. Valid DNxHD profiles:\n");
404 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
409 "Input dimensions too small, input must be at least 256x120\n");
432 if (!
ctx->m.dct_quantize)
438 ctx->block_width_l2 = 4;
439 }
else if (
ctx->bit_depth == 10) {
442 ctx->block_width_l2 = 4;
445 ctx->block_width_l2 = 3;
455 ctx->m.mb_height /= 2;
460 "Interlaced encoding is not supported for DNxHR profiles.\n");
464 ctx->m.mb_num =
ctx->m.mb_height *
ctx->m.mb_width;
470 ctx->coding_unit_size =
ctx->frame_size;
472 ctx->frame_size =
ctx->cid_table->frame_size;
473 ctx->coding_unit_size =
ctx->cid_table->coding_unit_size;
476 if (
ctx->m.mb_height > 68)
477 ctx->data_offset = 0x170 + (
ctx->m.mb_height << 2);
479 ctx->data_offset = 0x280;
487 if (
ctx->nitris_compat)
488 ctx->min_padding = 1600;
529 memset(
buf, 0,
ctx->data_offset);
533 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
538 buf[5] =
ctx->interlaced ?
ctx->cur_field + 2 : 0x01;
545 buf[0x21] =
ctx->bit_depth == 10 ? 0x58 : 0x38;
546 buf[0x22] = 0x88 + (
ctx->interlaced << 2);
571 (
ctx->cid_table->dc_codes[nbits] << nbits) +
577 int last_index,
int n)
579 int last_non_zero = 0;
585 for (
i = 1;
i <= last_index;
i++) {
586 j =
ctx->m.intra_scantable.permutated[
i];
589 int run_level =
i - last_non_zero - 1;
590 int rlevel = slevel * (1 << 1) | !!run_level;
594 ctx->run_codes[run_level]);
603 int qscale,
int last_index)
605 const uint8_t *weight_matrix;
610 weight_matrix = ((n % 6) < 2) ?
ctx->cid_table->luma_weight
611 :
ctx->cid_table->chroma_weight;
613 weight_matrix = (n & 2) ?
ctx->cid_table->chroma_weight
614 :
ctx->cid_table->luma_weight;
617 for (
i = 1;
i <= last_index;
i++) {
618 int j =
ctx->m.intra_scantable.permutated[
i];
622 level = (1 - 2 *
level) * qscale * weight_matrix[
i];
623 if (
ctx->bit_depth == 10) {
624 if (weight_matrix[
i] != 8)
628 if (weight_matrix[
i] != 32)
634 level = (2 *
level + 1) * qscale * weight_matrix[
i];
635 if (
ctx->bit_depth == 10) {
636 if (weight_matrix[
i] != 8)
640 if (weight_matrix[
i] != 32)
654 for (
i = 0;
i < 64;
i++)
662 int last_non_zero = 0;
665 for (
i = 1;
i <= last_index;
i++) {
666 j =
ctx->m.intra_scantable.permutated[
i];
669 int run_level =
i - last_non_zero - 1;
671 !!run_level] +
ctx->run_bits[run_level];
681 const int bs =
ctx->block_width_l2;
682 const int bw = 1 << bs;
683 int dct_y_offset =
ctx->dct_y_offset;
684 int dct_uv_offset =
ctx->dct_uv_offset;
685 int linesize =
ctx->m.linesize;
686 int uvlinesize =
ctx->m.uvlinesize;
687 const uint8_t *ptr_y =
ctx->thread[0]->src[0] +
688 ((mb_y << 4) *
ctx->m.linesize) + (mb_x << bs + 1);
689 const uint8_t *ptr_u =
ctx->thread[0]->src[1] +
690 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
691 const uint8_t *ptr_v =
ctx->thread[0]->src[2] +
692 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
697 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
698 int y_w =
ctx->m.avctx->width - (mb_x << 4);
699 int y_h =
ctx->m.avctx->height - (mb_y << 4);
700 int uv_w = (y_w + 1) / 2;
706 linesize,
ctx->m.linesize,
710 uvlinesize,
ctx->m.uvlinesize,
714 uvlinesize,
ctx->m.uvlinesize,
718 dct_y_offset = bw * linesize;
719 dct_uv_offset = bw * uvlinesize;
720 ptr_y = &
ctx->edge_buf_y[0];
721 ptr_u = &
ctx->edge_buf_uv[0][0];
722 ptr_v = &
ctx->edge_buf_uv[1][0];
724 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
725 int y_w =
ctx->m.avctx->width - (mb_x << 4);
726 int y_h =
ctx->m.avctx->height - (mb_y << 4);
727 int uv_w =
ctx->is_444 ? y_w : (y_w + 1) / 2;
730 uvlinesize = 16 + 16 *
ctx->is_444;
733 linesize,
ctx->m.linesize,
737 uvlinesize,
ctx->m.uvlinesize,
741 uvlinesize,
ctx->m.uvlinesize,
745 dct_y_offset = bw * linesize / 2;
746 dct_uv_offset = bw * uvlinesize / 2;
747 ptr_y = &
ctx->edge_buf_y[0];
748 ptr_u = &
ctx->edge_buf_uv[0][0];
749 ptr_v = &
ctx->edge_buf_uv[1][0];
758 if (mb_y + 1 ==
ctx->m.mb_height &&
ctx->m.avctx->height == 1080) {
759 if (
ctx->interlaced) {
760 ctx->get_pixels_8x4_sym(
ctx->blocks[4],
761 ptr_y + dct_y_offset,
763 ctx->get_pixels_8x4_sym(
ctx->blocks[5],
764 ptr_y + dct_y_offset + bw,
766 ctx->get_pixels_8x4_sym(
ctx->blocks[6],
767 ptr_u + dct_uv_offset,
769 ctx->get_pixels_8x4_sym(
ctx->blocks[7],
770 ptr_v + dct_uv_offset,
773 ctx->bdsp.clear_block(
ctx->blocks[4]);
774 ctx->bdsp.clear_block(
ctx->blocks[5]);
775 ctx->bdsp.clear_block(
ctx->blocks[6]);
776 ctx->bdsp.clear_block(
ctx->blocks[7]);
780 ptr_y + dct_y_offset, linesize);
782 ptr_y + dct_y_offset + bw, linesize);
784 ptr_u + dct_uv_offset, uvlinesize);
786 ptr_v + dct_uv_offset, uvlinesize);
791 pdsp->
get_pixels(
ctx->blocks[6], ptr_y + dct_y_offset, linesize);
792 pdsp->
get_pixels(
ctx->blocks[7], ptr_y + dct_y_offset + bw, linesize);
796 pdsp->
get_pixels(
ctx->blocks[8], ptr_u + dct_uv_offset, uvlinesize);
797 pdsp->
get_pixels(
ctx->blocks[9], ptr_u + dct_uv_offset + bw, uvlinesize);
801 pdsp->
get_pixels(
ctx->blocks[10], ptr_v + dct_uv_offset, uvlinesize);
802 pdsp->
get_pixels(
ctx->blocks[11], ptr_v + dct_uv_offset + bw, uvlinesize);
814 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
821 int jobnr,
int threadnr)
824 int mb_y = jobnr, mb_x;
825 int qscale =
ctx->qscale;
827 ctx =
ctx->thread[threadnr];
831 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
833 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
834 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
842 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
843 int16_t *src_block =
ctx->blocks[
i];
847 memcpy(
block, src_block, 64 *
sizeof(*
block));
849 ctx->is_444 ? 4 * (n > 0): 4 & (2*
i),
860 dc_bits +=
ctx->cid_table->dc_bits[nbits] + nbits;
870 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].ssd = ssd;
871 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].
bits = ac_bits + dc_bits + 12 +
872 (1 +
ctx->is_444) * 8 *
ctx->vlc_bits[0];
878 int jobnr,
int threadnr)
881 int mb_y = jobnr, mb_x;
882 ctx =
ctx->thread[threadnr];
884 ctx->slice_size[jobnr]);
888 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
889 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
890 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
891 int qscale =
ctx->mb_qscale[
mb];
899 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
902 int last_index =
ctx->m.dct_quantize(&
ctx->m,
block,
903 ctx->is_444 ? (((
i >> 1) % 3) < 1 ? 0 : 4): 4 & (2*
i),
920 for (mb_y = 0; mb_y <
ctx->m.mb_height; mb_y++) {
923 ctx->slice_size[mb_y] = 0;
924 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
925 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
926 ctx->slice_size[mb_y] +=
ctx->mb_bits[
mb];
928 ctx->slice_size[mb_y] = (
ctx->slice_size[mb_y] + 31
U) & ~31
U;
929 ctx->slice_size[mb_y] >>= 3;
930 thread_size =
ctx->slice_size[mb_y];
936 int jobnr,
int threadnr)
939 int mb_y = jobnr, mb_x, x, y;
940 int partial_last_row = (mb_y ==
ctx->m.mb_height - 1) &&
943 ctx =
ctx->thread[threadnr];
944 if (
ctx->bit_depth == 8) {
945 const uint8_t *pix =
ctx->thread[0]->src[0] + ((mb_y << 4) *
ctx->m.linesize);
946 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x, pix += 16) {
947 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
952 sum =
ctx->m.mpvencdsp.pix_sum(pix,
ctx->m.linesize);
953 varc =
ctx->m.mpvencdsp.pix_norm1(pix,
ctx->m.linesize);
958 for (y = 0; y < bh; y++) {
959 for (x = 0; x < bw; x++) {
960 uint8_t
val = pix[x + y *
ctx->m.linesize];
966 varc = (varc - (((unsigned) sum * sum) >> 8) + 128) >> 8;
968 ctx->mb_cmp[
mb].value = varc;
972 const int linesize =
ctx->m.linesize >> 1;
973 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x) {
974 const uint16_t *pix = (
const uint16_t *)
ctx->thread[0]->src[0] +
975 ((mb_y << 4) * linesize) + (mb_x << 4);
976 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
984 for (
i = 0;
i < bh; ++
i) {
985 for (j = 0; j < bw; ++j) {
987 const int sample = (unsigned) pix[j] >> 6;
1005 int lambda, up_step, down_step;
1006 int last_lower = INT_MAX, last_higher = 0;
1015 lambda =
ctx->lambda;
1020 if (lambda == last_higher) {
1024 for (y = 0; y <
ctx->m.mb_height; y++) {
1025 for (x = 0; x <
ctx->m.mb_width; x++) {
1026 unsigned min = UINT_MAX;
1028 int mb = y *
ctx->m.mb_width + x;
1031 int i = (q*
ctx->m.mb_num) +
mb;
1032 unsigned score =
ctx->mb_rc[
i].bits * lambda +
1041 ctx->mb_qscale[
mb] = qscale;
1042 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1053 if (bits < ctx->frame_bits) {
1054 last_lower =
FFMIN(lambda, last_lower);
1055 if (last_higher != 0)
1056 lambda = (lambda+last_higher)>>1;
1058 lambda -= down_step;
1061 lambda =
FFMAX(1, lambda);
1062 if (lambda == last_lower)
1065 last_higher =
FFMAX(lambda, last_higher);
1066 if (last_lower != INT_MAX)
1067 lambda = (lambda+last_lower)>>1;
1068 else if ((
int64_t)lambda + up_step > INT_MAX)
1076 ctx->lambda = lambda;
1085 int last_higher = 0;
1086 int last_lower = INT_MAX;
1090 qscale =
ctx->qscale;
1093 ctx->qscale = qscale;
1097 for (y = 0; y <
ctx->m.mb_height; y++) {
1098 for (x = 0; x <
ctx->m.mb_width; x++)
1104 if (bits < ctx->frame_bits) {
1107 if (last_higher == qscale - 1) {
1108 qscale = last_higher;
1111 last_lower =
FFMIN(qscale, last_lower);
1112 if (last_higher != 0)
1113 qscale = (qscale + last_higher) >> 1;
1115 qscale -= down_step++;
1120 if (last_lower == qscale + 1)
1122 last_higher =
FFMAX(qscale, last_higher);
1123 if (last_lower != INT_MAX)
1124 qscale = (qscale + last_lower) >> 1;
1126 qscale += up_step++;
1128 if (qscale >=
ctx->m.avctx->qmax)
1132 ctx->qscale = qscale;
1136 #define BUCKET_BITS 8
1137 #define RADIX_PASSES 4
1138 #define NBUCKETS (1 << BUCKET_BITS)
1153 int v =
data[
i].value;
1163 buckets[j][
i] =
offset -= buckets[j][
i];
1175 int pos = buckets[v]++;
1198 for (y = 0; y <
ctx->m.mb_height; y++) {
1199 for (x = 0; x <
ctx->m.mb_width; x++) {
1200 int mb = y *
ctx->m.mb_width + x;
1201 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1204 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1205 max_bits +=
ctx->mb_rc[rc].bits;
1207 delta_bits =
ctx->mb_rc[rc].bits -
1208 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1210 ctx->mb_cmp[
mb].value =
1211 delta_bits ? ((
ctx->mb_rc[rc].ssd -
1212 ctx->mb_rc[rc +
ctx->m.mb_num].ssd) * 100) /
1225 for (x = 0; x <
ctx->m.mb_num && max_bits >
ctx->frame_bits; x++) {
1226 int mb =
ctx->mb_cmp[x].mb;
1227 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1228 max_bits -=
ctx->mb_rc[rc].bits -
1229 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1230 if (
ctx->mb_qscale[
mb] < 255)
1231 ctx->mb_qscale[
mb]++;
1232 ctx->mb_bits[
mb] =
ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1235 if (max_bits >
ctx->frame_bits)
1245 for (
i = 0;
i <
ctx->m.avctx->thread_count;
i++) {
1248 ctx->thread[
i]->dct_y_offset =
ctx->m.linesize *8;
1249 ctx->thread[
i]->dct_uv_offset =
ctx->m.uvlinesize*8;
1271 for (
i = 0;
i < 3;
i++) {
1273 if (
ctx->interlaced &&
ctx->cur_field)
1285 "picture could not fit ratecontrol constraints, increase qmax\n");
1292 for (
i = 0;
i <
ctx->m.mb_height;
i++) {
1302 ctx->coding_unit_size - 4 -
offset -
ctx->data_offset);
1308 ctx->cur_field ^= 1;
1309 buf +=
ctx->coding_unit_size;
1310 goto encode_coding_unit;
1342 if (
ctx->thread[1]) {
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
static const AVOption options[]
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, int16_t *block, int last_index, int n)
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
int av_log2_16bit(unsigned v)
static void dnxhd_8bit_get_pixels_8x4_sym(int16_t *restrict block, const uint8_t *pixels, ptrdiff_t line_size)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define AV_PROFILE_DNXHR_444
#define DNX10BIT_QMAT_SHIFT
#define MASK_ABS(mask, level)
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
int mb_decision
macroblock decision mode
int qmax
maximum quantizer
static const FFCodecDefault dnxhd_defaults[]
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
AVCodec p
The public AVCodec.
static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(int16_t *restrict block, const uint8_t *pixels, ptrdiff_t line_size)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
#define AV_PIX_FMT_GBRP10
void ff_dnxhd_print_profiles(AVCodecContext *avctx, int loglevel)
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
#define AV_PROFILE_DNXHR_SQ
#define FF_CODEC_ENCODE_CB(func)
static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
static int put_bytes_left(const PutBitContext *s, int round_up)
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
#define AV_PIX_FMT_YUV444P10
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_PROFILE_DNXHR_LB
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
#define AV_PROFILE_DNXHR_HQ
av_cold void ff_blockdsp_init(BlockDSPContext *c)
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
int ff_dnxhd_get_hr_frame_size(int cid, int w, int h)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
#define LOCAL_ALIGNED_16(t, v,...)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_PROFILE_DNXHR_HQX
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
void(* get_pixels)(int16_t *restrict block, const uint8_t *pixels, ptrdiff_t stride)
#define CODEC_LONG_NAME(str)
const AVProfile ff_dnxhd_profiles[]
static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static int bias(int x, int c)
av_cold void ff_mpv_idct_init(MpegEncContext *s)
static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
#define DNXHD_VARIABLE
Indicate that a CIDEntry value must be read in the bitstream.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
AVIOContext * pb
I/O context.
static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
static const AVClass dnxhd_class
#define AV_PIX_FMT_YUV422P10
static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx)
static int shift(int a, int b)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_ssd_block(int16_t *qblock, int16_t *block)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int dnxhd_find_qscale(DNXHDEncContext *ctx)
const CIDEntry * ff_dnxhd_get_cid_table(int cid)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
const FFCodec ff_dnxhd_encoder
void ff_dnxhdenc_init(DNXHDEncContext *ctx)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
const uint8_t ff_zigzag_direct[64]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static int get_bucket(int value, int shift)
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
int ff_get_encode_buffer(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int flags)
Get a buffer for a packet.
static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
static float mean(const float *input, int size)
#define FF_MB_DECISION_RD
rate distortion
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Undefined Behavior In the C some operations are like signed integer overflow
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
int width
picture width / height.
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
#define MKTAG(a, b, c, d)
static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static int first_field(const struct video_data *s)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n, int qscale, int last_index)