Go to the documentation of this file.
42 #define DNX10BIT_QMAT_SHIFT 18
43 #define RC_VARIANCE 1 // use variance or ssd for fast rc
44 #define LAMBDA_FRAC_BITS 10
46 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
48 {
"nitris_compat",
"encode with Avid Nitris compatibility",
50 {
"ibias",
"intra quant bias",
52 { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
57 0, 0,
VE,
"profile" },
59 0, 0,
VE,
"profile" },
61 0, 0,
VE,
"profile" },
63 0, 0,
VE,
"profile" },
65 0, 0,
VE,
"profile" },
67 0, 0,
VE,
"profile" },
83 for (
i = 0;
i < 4;
i++) {
106 memcpy(
block + 0 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
107 memcpy(
block + 7 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
108 memcpy(
block + 1 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
109 memcpy(
block + 6 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
110 memcpy(
block + 2 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
111 memcpy(
block + 5 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
112 memcpy(
block + 3 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
113 memcpy(
block + 4 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
119 int i, j,
level, last_non_zero, start_i;
124 unsigned int threshold1, threshold2;
131 qmat = n < 4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
132 bias=
ctx->intra_quant_bias * (1 << (16 - 8));
133 threshold1 = (1 << 16) - bias - 1;
134 threshold2 = (threshold1 << 1);
136 for (
i = 63;
i >= start_i;
i--) {
140 if (((
unsigned)(
level + threshold1)) > threshold2) {
148 for (
i = start_i;
i <= last_non_zero;
i++) {
152 if (((
unsigned)(
level + threshold1)) > threshold2) {
172 return last_non_zero;
179 const int *qmat = n<4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
180 int last_non_zero = 0;
188 for (
i = 1;
i < 64; ++
i) {
203 return last_non_zero;
209 int max_level = 1 << (
ctx->bit_depth + 2);
216 ctx->vlc_codes =
ctx->orig_vlc_codes + max_level * 2;
217 ctx->vlc_bits =
ctx->orig_vlc_bits + max_level * 2;
225 offset = (alevel - 1) >> 6;
228 for (j = 0; j < 257; j++) {
229 if (
ctx->cid_table->ac_info[2*j+0] >> 1 == alevel &&
231 (!
run || (
ctx->cid_table->ac_info[2*j+1] & 2) &&
run)) {
235 (
ctx->cid_table->ac_codes[j] << 1) | (sign & 1);
236 ctx->vlc_bits[
index] =
ctx->cid_table->ac_bits[j] + 1;
238 ctx->vlc_codes[
index] =
ctx->cid_table->ac_codes[j];
248 ctx->vlc_bits[
index] +=
ctx->cid_table->index_bits;
252 for (
i = 0;
i < 62;
i++) {
253 int run =
ctx->cid_table->run[
i];
255 ctx->run_codes[
run] =
ctx->cid_table->run_codes[
i];
256 ctx->run_bits[
run] =
ctx->cid_table->run_bits[
i];
264 uint16_t weight_matrix[64] = { 1, };
266 const uint8_t *luma_weight_table =
ctx->cid_table->luma_weight;
267 const uint8_t *chroma_weight_table =
ctx->cid_table->chroma_weight;
275 if (
ctx->bit_depth == 8) {
276 for (
i = 1;
i < 64;
i++) {
278 weight_matrix[j] =
ctx->cid_table->luma_weight[
i];
281 weight_matrix,
ctx->intra_quant_bias, 1,
282 ctx->m.avctx->qmax, 1);
283 for (
i = 1;
i < 64;
i++) {
285 weight_matrix[j] =
ctx->cid_table->chroma_weight[
i];
288 weight_matrix,
ctx->intra_quant_bias, 1,
289 ctx->m.avctx->qmax, 1);
291 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
292 for (
i = 0;
i < 64;
i++) {
293 ctx->qmatrix_l[qscale][
i] <<= 2;
294 ctx->qmatrix_c[qscale][
i] <<= 2;
295 ctx->qmatrix_l16[qscale][0][
i] <<= 2;
296 ctx->qmatrix_l16[qscale][1][
i] <<= 2;
297 ctx->qmatrix_c16[qscale][0][
i] <<= 2;
298 ctx->qmatrix_c16[qscale][1][
i] <<= 2;
303 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
304 for (
i = 1;
i < 64;
i++) {
319 (qscale * luma_weight_table[
i]);
321 (qscale * chroma_weight_table[
i]);
326 ctx->m.q_chroma_intra_matrix16 =
ctx->qmatrix_c16;
327 ctx->m.q_chroma_intra_matrix =
ctx->qmatrix_c;
328 ctx->m.q_intra_matrix16 =
ctx->qmatrix_l16;
329 ctx->m.q_intra_matrix =
ctx->qmatrix_l;
344 ctx->frame_bits = (
ctx->coding_unit_size -
345 ctx->data_offset - 4 -
ctx->min_padding) * 8;
367 "pixel format is incompatible with DNxHD\n");
376 "pixel format is incompatible with DNxHD profile\n");
382 "pixel format is incompatible with DNxHR HQX profile\n");
390 "pixel format is incompatible with DNxHR LB/SQ/HQ profile\n");
399 "video parameters incompatible with DNxHD. Valid DNxHD profiles:\n");
405 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
410 "Input dimensions too small, input must be at least 256x120\n");
433 if (!
ctx->m.dct_quantize)
439 ctx->block_width_l2 = 4;
440 }
else if (
ctx->bit_depth == 10) {
443 ctx->block_width_l2 = 4;
446 ctx->block_width_l2 = 3;
457 ctx->m.mb_height /= 2;
462 "Interlaced encoding is not supported for DNxHR profiles.\n");
466 ctx->m.mb_num =
ctx->m.mb_height *
ctx->m.mb_width;
472 ctx->coding_unit_size =
ctx->frame_size;
474 ctx->frame_size =
ctx->cid_table->frame_size;
475 ctx->coding_unit_size =
ctx->cid_table->coding_unit_size;
478 if (
ctx->m.mb_height > 68)
479 ctx->data_offset = 0x170 + (
ctx->m.mb_height << 2);
481 ctx->data_offset = 0x280;
489 if (
ctx->nitris_compat)
490 ctx->min_padding = 1600;
502 #if FF_API_CODED_FRAME
538 memset(
buf, 0,
ctx->data_offset);
542 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
547 buf[5] =
ctx->interlaced ?
ctx->cur_field + 2 : 0x01;
554 buf[0x21] =
ctx->bit_depth == 10 ? 0x58 : 0x38;
555 buf[0x22] = 0x88 + (
ctx->interlaced << 2);
580 (
ctx->cid_table->dc_codes[nbits] << nbits) +
586 int last_index,
int n)
588 int last_non_zero = 0;
594 for (
i = 1;
i <= last_index;
i++) {
595 j =
ctx->m.intra_scantable.permutated[
i];
598 int run_level =
i - last_non_zero - 1;
599 int rlevel = slevel * (1 << 1) | !!run_level;
603 ctx->run_codes[run_level]);
612 int qscale,
int last_index)
619 weight_matrix = ((n % 6) < 2) ?
ctx->cid_table->luma_weight
620 :
ctx->cid_table->chroma_weight;
622 weight_matrix = (n & 2) ?
ctx->cid_table->chroma_weight
623 :
ctx->cid_table->luma_weight;
626 for (
i = 1;
i <= last_index;
i++) {
627 int j =
ctx->m.intra_scantable.permutated[
i];
631 level = (1 - 2 *
level) * qscale * weight_matrix[
i];
632 if (
ctx->bit_depth == 10) {
633 if (weight_matrix[
i] != 8)
637 if (weight_matrix[
i] != 32)
643 level = (2 *
level + 1) * qscale * weight_matrix[
i];
644 if (
ctx->bit_depth == 10) {
645 if (weight_matrix[
i] != 8)
649 if (weight_matrix[
i] != 32)
663 for (
i = 0;
i < 64;
i++)
671 int last_non_zero = 0;
674 for (
i = 1;
i <= last_index;
i++) {
675 j =
ctx->m.intra_scantable.permutated[
i];
678 int run_level =
i - last_non_zero - 1;
680 !!run_level] +
ctx->run_bits[run_level];
690 const int bs =
ctx->block_width_l2;
691 const int bw = 1 << bs;
692 int dct_y_offset =
ctx->dct_y_offset;
693 int dct_uv_offset =
ctx->dct_uv_offset;
694 int linesize =
ctx->m.linesize;
695 int uvlinesize =
ctx->m.uvlinesize;
696 const uint8_t *ptr_y =
ctx->thread[0]->src[0] +
697 ((mb_y << 4) *
ctx->m.linesize) + (mb_x << bs + 1);
698 const uint8_t *ptr_u =
ctx->thread[0]->src[1] +
699 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
700 const uint8_t *ptr_v =
ctx->thread[0]->src[2] +
701 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
706 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
707 int y_w =
ctx->m.avctx->width - (mb_x << 4);
708 int y_h =
ctx->m.avctx->height - (mb_y << 4);
709 int uv_w = (y_w + 1) / 2;
715 linesize,
ctx->m.linesize,
719 uvlinesize,
ctx->m.uvlinesize,
723 uvlinesize,
ctx->m.uvlinesize,
727 dct_y_offset = bw * linesize;
728 dct_uv_offset = bw * uvlinesize;
729 ptr_y = &
ctx->edge_buf_y[0];
730 ptr_u = &
ctx->edge_buf_uv[0][0];
731 ptr_v = &
ctx->edge_buf_uv[1][0];
733 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
734 int y_w =
ctx->m.avctx->width - (mb_x << 4);
735 int y_h =
ctx->m.avctx->height - (mb_y << 4);
736 int uv_w =
ctx->is_444 ? y_w : (y_w + 1) / 2;
739 uvlinesize = 16 + 16 *
ctx->is_444;
742 linesize,
ctx->m.linesize,
746 uvlinesize,
ctx->m.uvlinesize,
750 uvlinesize,
ctx->m.uvlinesize,
754 dct_y_offset = bw * linesize / 2;
755 dct_uv_offset = bw * uvlinesize / 2;
756 ptr_y = &
ctx->edge_buf_y[0];
757 ptr_u = &
ctx->edge_buf_uv[0][0];
758 ptr_v = &
ctx->edge_buf_uv[1][0];
767 if (mb_y + 1 ==
ctx->m.mb_height &&
ctx->m.avctx->height == 1080) {
768 if (
ctx->interlaced) {
769 ctx->get_pixels_8x4_sym(
ctx->blocks[4],
770 ptr_y + dct_y_offset,
772 ctx->get_pixels_8x4_sym(
ctx->blocks[5],
773 ptr_y + dct_y_offset + bw,
775 ctx->get_pixels_8x4_sym(
ctx->blocks[6],
776 ptr_u + dct_uv_offset,
778 ctx->get_pixels_8x4_sym(
ctx->blocks[7],
779 ptr_v + dct_uv_offset,
782 ctx->bdsp.clear_block(
ctx->blocks[4]);
783 ctx->bdsp.clear_block(
ctx->blocks[5]);
784 ctx->bdsp.clear_block(
ctx->blocks[6]);
785 ctx->bdsp.clear_block(
ctx->blocks[7]);
789 ptr_y + dct_y_offset, linesize);
791 ptr_y + dct_y_offset + bw, linesize);
793 ptr_u + dct_uv_offset, uvlinesize);
795 ptr_v + dct_uv_offset, uvlinesize);
800 pdsp->
get_pixels(
ctx->blocks[6], ptr_y + dct_y_offset, linesize);
801 pdsp->
get_pixels(
ctx->blocks[7], ptr_y + dct_y_offset + bw, linesize);
805 pdsp->
get_pixels(
ctx->blocks[8], ptr_u + dct_uv_offset, uvlinesize);
806 pdsp->
get_pixels(
ctx->blocks[9], ptr_u + dct_uv_offset + bw, uvlinesize);
810 pdsp->
get_pixels(
ctx->blocks[10], ptr_v + dct_uv_offset, uvlinesize);
811 pdsp->
get_pixels(
ctx->blocks[11], ptr_v + dct_uv_offset + bw, uvlinesize);
823 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
830 int jobnr,
int threadnr)
833 int mb_y = jobnr, mb_x;
834 int qscale =
ctx->qscale;
836 ctx =
ctx->thread[threadnr];
840 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
842 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
843 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
851 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
852 int16_t *src_block =
ctx->blocks[
i];
856 memcpy(
block, src_block, 64 *
sizeof(*
block));
858 ctx->is_444 ? 4 * (n > 0): 4 & (2*
i),
869 dc_bits +=
ctx->cid_table->dc_bits[nbits] + nbits;
879 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].ssd = ssd;
880 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].
bits = ac_bits + dc_bits + 12 +
881 (1 +
ctx->is_444) * 8 *
ctx->vlc_bits[0];
887 int jobnr,
int threadnr)
890 int mb_y = jobnr, mb_x;
891 ctx =
ctx->thread[threadnr];
893 ctx->slice_size[jobnr]);
897 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
898 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
899 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
900 int qscale =
ctx->mb_qscale[
mb];
908 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
911 int last_index =
ctx->m.dct_quantize(&
ctx->m,
block,
912 ctx->is_444 ? (((
i >> 1) % 3) < 1 ? 0 : 4): 4 & (2*
i),
928 for (mb_y = 0; mb_y <
ctx->m.mb_height; mb_y++) {
931 ctx->slice_size[mb_y] = 0;
932 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
933 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
934 ctx->slice_size[mb_y] +=
ctx->mb_bits[
mb];
936 ctx->slice_size[mb_y] = (
ctx->slice_size[mb_y] + 31) & ~31;
937 ctx->slice_size[mb_y] >>= 3;
938 thread_size =
ctx->slice_size[mb_y];
944 int jobnr,
int threadnr)
947 int mb_y = jobnr, mb_x, x, y;
948 int partial_last_row = (mb_y ==
ctx->m.mb_height - 1) &&
951 ctx =
ctx->thread[threadnr];
952 if (
ctx->bit_depth == 8) {
953 uint8_t *pix =
ctx->thread[0]->src[0] + ((mb_y << 4) *
ctx->m.linesize);
954 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x, pix += 16) {
955 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
960 sum =
ctx->m.mpvencdsp.pix_sum(pix,
ctx->m.linesize);
961 varc =
ctx->m.mpvencdsp.pix_norm1(pix,
ctx->m.linesize);
966 for (y = 0; y < bh; y++) {
967 for (x = 0; x < bw; x++) {
974 varc = (varc - (((unsigned) sum * sum) >> 8) + 128) >> 8;
976 ctx->mb_cmp[
mb].value = varc;
980 const int linesize =
ctx->m.linesize >> 1;
981 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x) {
982 uint16_t *pix = (uint16_t *)
ctx->thread[0]->src[0] +
983 ((mb_y << 4) * linesize) + (mb_x << 4);
984 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
992 for (
i = 0;
i < bh; ++
i) {
993 for (j = 0; j < bw; ++j) {
995 const int sample = (unsigned) pix[j] >> 6;
1003 sqmean = sqsum >> 8;
1013 int lambda, up_step, down_step;
1014 int last_lower = INT_MAX, last_higher = 0;
1023 lambda =
ctx->lambda;
1028 if (lambda == last_higher) {
1032 for (y = 0; y <
ctx->m.mb_height; y++) {
1033 for (x = 0; x <
ctx->m.mb_width; x++) {
1034 unsigned min = UINT_MAX;
1036 int mb = y *
ctx->m.mb_width + x;
1039 int i = (q*
ctx->m.mb_num) +
mb;
1040 unsigned score =
ctx->mb_rc[
i].bits * lambda +
1049 ctx->mb_qscale[
mb] = qscale;
1050 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1061 if (bits < ctx->frame_bits) {
1062 last_lower =
FFMIN(lambda, last_lower);
1063 if (last_higher != 0)
1064 lambda = (lambda+last_higher)>>1;
1066 lambda -= down_step;
1067 down_step =
FFMIN((int64_t)down_step*5, INT_MAX);
1069 lambda =
FFMAX(1, lambda);
1070 if (lambda == last_lower)
1073 last_higher =
FFMAX(lambda, last_higher);
1074 if (last_lower != INT_MAX)
1075 lambda = (lambda+last_lower)>>1;
1076 else if ((int64_t)lambda + up_step > INT_MAX)
1080 up_step =
FFMIN((int64_t)up_step*5, INT_MAX);
1084 ctx->lambda = lambda;
1093 int last_higher = 0;
1094 int last_lower = INT_MAX;
1098 qscale =
ctx->qscale;
1101 ctx->qscale = qscale;
1105 for (y = 0; y <
ctx->m.mb_height; y++) {
1106 for (x = 0; x <
ctx->m.mb_width; x++)
1112 if (bits < ctx->frame_bits) {
1115 if (last_higher == qscale - 1) {
1116 qscale = last_higher;
1119 last_lower =
FFMIN(qscale, last_lower);
1120 if (last_higher != 0)
1121 qscale = (qscale + last_higher) >> 1;
1123 qscale -= down_step++;
1128 if (last_lower == qscale + 1)
1130 last_higher =
FFMAX(qscale, last_higher);
1131 if (last_lower != INT_MAX)
1132 qscale = (qscale + last_lower) >> 1;
1134 qscale += up_step++;
1136 if (qscale >=
ctx->m.avctx->qmax)
1140 ctx->qscale = qscale;
1144 #define BUCKET_BITS 8
1145 #define RADIX_PASSES 4
1146 #define NBUCKETS (1 << BUCKET_BITS)
1161 int v =
data[
i].value;
1171 buckets[j][
i] =
offset -= buckets[j][
i];
1183 int pos = buckets[v]++;
1206 for (y = 0; y <
ctx->m.mb_height; y++) {
1207 for (x = 0; x <
ctx->m.mb_width; x++) {
1208 int mb = y *
ctx->m.mb_width + x;
1209 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1212 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1213 max_bits +=
ctx->mb_rc[rc].bits;
1215 delta_bits =
ctx->mb_rc[rc].bits -
1216 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1218 ctx->mb_cmp[
mb].value =
1219 delta_bits ? ((
ctx->mb_rc[rc].ssd -
1220 ctx->mb_rc[rc +
ctx->m.mb_num].ssd) * 100) /
1232 for (x = 0; x <
ctx->m.mb_num && max_bits >
ctx->frame_bits; x++) {
1233 int mb =
ctx->mb_cmp[x].mb;
1234 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1235 max_bits -=
ctx->mb_rc[rc].bits -
1236 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1237 ctx->mb_qscale[
mb] =
ctx->qscale + 1;
1238 ctx->mb_bits[
mb] =
ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1248 for (
i = 0;
i <
ctx->m.avctx->thread_count;
i++) {
1249 ctx->thread[
i]->m.linesize =
frame->linesize[0] <<
ctx->interlaced;
1250 ctx->thread[
i]->m.uvlinesize =
frame->linesize[1] <<
ctx->interlaced;
1251 ctx->thread[
i]->dct_y_offset =
ctx->m.linesize *8;
1252 ctx->thread[
i]->dct_uv_offset =
ctx->m.uvlinesize*8;
1255 #if FF_API_CODED_FRAME
1257 ctx->m.avctx->coded_frame->interlaced_frame =
frame->interlaced_frame;
1260 ctx->cur_field =
frame->interlaced_frame && !
frame->top_field_first;
1278 for (
i = 0;
i < 3;
i++) {
1280 if (
ctx->interlaced &&
ctx->cur_field)
1292 "picture could not fit ratecontrol constraints, increase qmax\n");
1299 for (
i = 0;
i <
ctx->m.mb_height;
i++) {
1309 ctx->coding_unit_size - 4 -
offset -
ctx->data_offset);
1315 ctx->cur_field ^= 1;
1316 buf +=
ctx->coding_unit_size;
1317 goto encode_coding_unit;
1320 #if FF_API_CODED_FRAME
1356 if (
ctx->thread[1]) {
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
#define FF_ENABLE_DEPRECATION_WARNINGS
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
static const AVOption options[]
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, int16_t *block, int last_index, int n)
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
#define MKTAG(a, b, c, d)
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
int av_log2_16bit(unsigned v)
static void dnxhd_8bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define DNX10BIT_QMAT_SHIFT
#define MASK_ABS(mask, level)
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
int mb_decision
macroblock decision mode
int qmax
maximum quantizer
static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
#define AV_PIX_FMT_GBRP10
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
void ff_dnxhd_print_profiles(AVCodecContext *avctx, int loglevel)
int key_frame
1 -> keyframe, 0-> not
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
int avpriv_dnxhd_get_hr_frame_size(int cid, int w, int h)
#define FF_PROFILE_DNXHR_LB
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
#define AV_PIX_FMT_YUV444P10
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define LOCAL_ALIGNED_16(t, v,...)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
const AVProfile ff_dnxhd_profiles[]
static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
#define FF_PROFILE_DNXHR_HQ
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
#define DNXHD_VARIABLE
Indicate that a CIDEntry value must be read in the bitstream.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
AVIOContext * pb
I/O context.
static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
static const AVClass dnxhd_class
#define AV_PIX_FMT_YUV422P10
static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
enum AVPictureType pict_type
Picture type of the frame.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_ssd_block(int16_t *qblock, int16_t *block)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
static int dnxhd_find_qscale(DNXHDEncContext *ctx)
const CIDEntry * ff_dnxhd_get_cid_table(int cid)
#define FF_PROFILE_DNXHR_SQ
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
static int put_bits_count(PutBitContext *s)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const uint8_t ff_zigzag_direct[64]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int get_bucket(int value, int shift)
int ff_dct_encode_init(MpegEncContext *s)
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
static float mean(const float *input, int size)
#define FF_PROFILE_DNXHR_HQX
#define FF_MB_DECISION_RD
rate distortion
static int shift(int a, int b)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Undefined Behavior In the C some operations are like signed integer overflow
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
#define FF_PROFILE_DNXHR_444
int width
picture width / height.
The exact code depends on how similar the blocks are and how related they are to the block
static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int first_field(const struct video_data *s)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n, int qscale, int last_index)
static const AVCodecDefault dnxhd_defaults[]