53 uint32_t v32 = v * 0x01010101;
62 uint64_t v64 = v * 0x0101010101010101ULL;
68 uint32_t v32 = v * 0x01010101;
83 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
86 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
94 int row = s->
row, col = s->
col, row7 = s->
row7;
95 enum TxfmMode max_tx = max_tx_for_bl_bp[b->
bs];
117 for (y = 0; y < h4; y++) {
118 int idx_base = (y + row) * 8 * s->
sb_cols + col;
119 for (x = 0; x < w4; x++)
120 pred =
FFMIN(pred, refsegmap[idx_base + x]);
158 if (have_a && have_l) {
270 }
else if (b->
intra) {
299 static const uint8_t size_group[10] = {
300 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
302 int sz = size_group[b->
bs];
315 static const uint8_t inter_mode_ctx_lut[14][14] = {
316 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
317 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
318 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
319 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
320 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
321 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
322 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
323 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
324 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
325 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
326 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
327 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
328 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
329 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
402 c = (refa == refl) ? 3 : 1;
419 c = (refl == refa) ? 4 : 2;
583 static const uint8_t off[10] = {
584 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
682 #define SPLAT_CTX(var, val, n) \
684 case 1: var = val; break; \
685 case 2: AV_WN16A(&var, val * 0x0101); break; \
686 case 4: AV_WN32A(&var, val * 0x01010101); break; \
687 case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \
689 uint64_t v64 = val * 0x0101010101010101ULL; \
690 AV_WN64A( &var, v64); \
691 AV_WN64A(&((uint8_t *) &var)[8], v64); \
696 #define SPLAT_CTX(var, val, n) \
698 case 1: var = val; break; \
699 case 2: AV_WN16A(&var, val * 0x0101); break; \
700 case 4: AV_WN32A(&var, val * 0x01010101); break; \
702 uint32_t v32 = val * 0x01010101; \
703 AV_WN32A( &var, v32); \
704 AV_WN32A(&((uint8_t *) &var)[4], v32); \
708 uint32_t v32 = val * 0x01010101; \
709 AV_WN32A( &var, v32); \
710 AV_WN32A(&((uint8_t *) &var)[4], v32); \
711 AV_WN32A(&((uint8_t *) &var)[8], v32); \
712 AV_WN32A(&((uint8_t *) &var)[12], v32); \
719 #define SET_CTXS(dir, off, n) \
721 SPLAT_CTX(s->dir##_skip_ctx[off], b->skip, n); \
722 SPLAT_CTX(s->dir##_txfm_ctx[off], b->tx, n); \
723 SPLAT_CTX(s->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \
724 if (!s->s.h.keyframe && !s->s.h.intraonly) { \
725 SPLAT_CTX(s->dir##_intra_ctx[off], b->intra, n); \
726 SPLAT_CTX(s->dir##_comp_ctx[off], b->comp, n); \
727 SPLAT_CTX(s->dir##_mode_ctx[off], b->mode[3], n); \
729 SPLAT_CTX(s->dir##_ref_ctx[off], vref, n); \
730 if (s->s.h.filtermode == FILTER_SWITCHABLE) { \
731 SPLAT_CTX(s->dir##_filter_ctx[off], filter_id, n); \
736 case 1:
SET_CTXS(above, col, 1);
break;
737 case 2:
SET_CTXS(above, col, 2);
break;
738 case 4:
SET_CTXS(above, col, 4);
break;
739 case 8:
SET_CTXS(above, col, 8);
break;
742 case 1:
SET_CTXS(left, row7, 1);
break;
743 case 2:
SET_CTXS(left, row7, 2);
break;
744 case 4:
SET_CTXS(left, row7, 4);
break;
745 case 8:
SET_CTXS(left, row7, 8);
break;
765 for (n = 0; n < w4 * 2; n++) {
769 for (n = 0; n < h4 * 2; n++) {
777 for (y = 0; y < h4; y++) {
778 int x, o = (row + y) * s->
sb_cols * 8 + col;
782 for (x = 0; x < w4; x++) {
786 }
else if (b->
comp) {
787 for (x = 0; x < w4; x++) {
788 mv[x].ref[0] = b->
ref[0];
789 mv[x].ref[1] = b->
ref[1];
794 for (x = 0; x < w4; x++) {
795 mv[x].ref[0] = b->
ref[0];
806 int is_tx32x32,
int is8bitsperpixel,
int bpp,
unsigned (*cnt)[6][3],
807 unsigned (*eob)[6][2],
uint8_t (*p)[6][11],
808 int nnz,
const int16_t *scan,
const int16_t (*nb)[2],
809 const int16_t *band_counts,
const int16_t *qmul)
811 int i = 0,
band = 0, band_left = band_counts[
band];
827 band_left = band_counts[++
band];
829 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
883 if (!is8bitsperpixel) {
908 #define STORE_COEF(c, i, v) do { \
909 if (is8bitsperpixel) { \
912 AV_WN32A(&c[i * 2], v); \
916 band_left = band_counts[++
band];
921 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
923 }
while (++i < n_coeffs);
929 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
930 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
931 const int16_t (*nb)[2],
const int16_t *band_counts,
935 nnz, scan, nb, band_counts, qmul);
939 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
940 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
941 const int16_t (*nb)[2],
const int16_t *band_counts,
945 nnz, scan, nb, band_counts, qmul);
949 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
950 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
951 const int16_t (*nb)[2],
const int16_t *band_counts,
955 nnz, scan, nb, band_counts, qmul);
959 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
960 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
961 const int16_t (*nb)[2],
const int16_t *band_counts,
965 nnz, scan, nb, band_counts, qmul);
972 int row = s->
row, col = s->
col;
977 int end_x =
FFMIN(2 * (s->
cols - col), w4);
978 int end_y =
FFMIN(2 * (s->
rows - row), h4);
979 int n, pl, x, y, ret;
988 static const int16_t band_counts[4][8] = {
989 { 1, 2, 3, 4, 3, 16 - 13 },
990 { 1, 2, 3, 4, 11, 64 - 21 },
991 { 1, 2, 3, 4, 11, 256 - 21 },
992 { 1, 2, 3, 4, 11, 1024 - 21 },
994 const int16_t *y_band_counts = band_counts[b->tx];
995 const int16_t *uv_band_counts = band_counts[b->
uvtx];
996 int bytesperpixel = is8bitsperpixel ? 1 : 2;
999 #define MERGE(la, end, step, rd) \
1000 for (n = 0; n < end; n += step) \
1001 la[n] = !!rd(&la[n])
1002 #define MERGE_CTX(step, rd) \
1004 MERGE(l, end_y, step, rd); \
1005 MERGE(a, end_x, step, rd); \
1008 #define DECODE_Y_COEF_LOOP(step, mode_index, v) \
1009 for (n = 0, y = 0; y < end_y; y += step) { \
1010 for (x = 0; x < end_x; x += step, n += step * step) { \
1011 enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \
1012 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
1013 (s, s->block + 16 * n * bytesperpixel, 16 * step * step, \
1014 c, e, p, a[x] + l[y], yscans[txtp], \
1015 ynbs[txtp], y_band_counts, qmul[0]); \
1016 a[x] = l[y] = !!ret; \
1017 total_coeff |= !!ret; \
1019 AV_WN16A(&s->eob[n], ret); \
1026 #define SPLAT(la, end, step, cond) \
1028 for (n = 1; n < end; n += step) \
1029 la[n] = la[n - 1]; \
1030 } else if (step == 4) { \
1032 for (n = 0; n < end; n += step) \
1033 AV_WN32A(&la[n], la[n] * 0x01010101); \
1035 for (n = 0; n < end; n += step) \
1036 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \
1040 if (HAVE_FAST_64BIT) { \
1041 for (n = 0; n < end; n += step) \
1042 AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \
1044 for (n = 0; n < end; n += step) { \
1045 uint32_t v32 = la[n] * 0x01010101; \
1046 AV_WN32A(&la[n], v32); \
1047 AV_WN32A(&la[n + 4], v32); \
1051 for (n = 0; n < end; n += step) \
1052 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \
1055 #define SPLAT_CTX(step) \
1057 SPLAT(a, end_x, step, end_x == w4); \
1058 SPLAT(l, end_y, step, end_y == h4); \
1083 #define DECODE_UV_COEF_LOOP(step, v) \
1084 for (n = 0, y = 0; y < end_y; y += step) { \
1085 for (x = 0; x < end_x; x += step, n += step * step) { \
1086 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
1087 (s, s->uvblock[pl] + 16 * n * bytesperpixel, \
1088 16 * step * step, c, e, p, a[x] + l[y], \
1089 uvscan, uvnb, uv_band_counts, qmul[1]); \
1090 a[x] = l[y] = !!ret; \
1091 total_coeff |= !!ret; \
1093 AV_WN16A(&s->uveob[pl][n], ret); \
1095 s->uveob[pl][n] = ret; \
1107 for (pl = 0; pl < 2; pl++) {
1146 int row_and_7,
int col_and_7,
1147 int w,
int h,
int col_end,
int row_end,
1150 static const unsigned wide_filter_col_mask[2] = { 0x11, 0x01 };
1151 static const unsigned wide_filter_row_mask[2] = { 0x03, 0x07 };
1163 if (tx ==
TX_4X4 && (ss_v | ss_h)) {
1178 if (tx ==
TX_4X4 && !skip_inter) {
1179 int t = 1 << col_and_7, m_col = (t << w) - t, y;
1181 int m_row_8 = m_col & wide_filter_col_mask[ss_h], m_row_4 = m_col - m_row_8;
1183 for (y = row_and_7; y < h + row_and_7; y++) {
1184 int col_mask_id = 2 - !(y & wide_filter_row_mask[ss_v]);
1186 mask[0][y][1] |= m_row_8;
1187 mask[0][y][2] |= m_row_4;
1198 if ((ss_h & ss_v) && (col_end & 1) && (y & 1)) {
1199 mask[1][y][col_mask_id] |= (t << (w - 1)) - t;
1201 mask[1][y][col_mask_id] |= m_col;
1204 mask[0][y][3] |= m_col;
1206 if (ss_h && (col_end & 1))
1207 mask[1][y][3] |= (t << (w - 1)) - t;
1209 mask[1][y][3] |= m_col;
1213 int y, t = 1 << col_and_7, m_col = (t << w) - t;
1216 int mask_id = (tx ==
TX_8X8);
1217 int l2 = tx + ss_h - 1, step1d;
1218 static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
1219 int m_row = m_col & masks[l2];
1223 if (ss_h && tx >
TX_8X8 && (w ^ (w - 1)) == 1) {
1224 int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
1225 int m_row_8 = m_row - m_row_16;
1227 for (y = row_and_7; y < h + row_and_7; y++) {
1228 mask[0][y][0] |= m_row_16;
1229 mask[0][y][1] |= m_row_8;
1232 for (y = row_and_7; y < h + row_and_7; y++)
1233 mask[0][y][mask_id] |= m_row;
1238 if (ss_v && tx >
TX_8X8 && (h ^ (h - 1)) == 1) {
1239 for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
1240 mask[1][y][0] |= m_col;
1241 if (y - row_and_7 == h - 1)
1242 mask[1][y][1] |= m_col;
1244 for (y = row_and_7; y < h + row_and_7; y += step1d)
1245 mask[1][y][mask_id] |= m_col;
1247 }
else if (tx !=
TX_4X4) {
1250 mask_id = (tx ==
TX_8X8) || (h == ss_v);
1251 mask[1][row_and_7][mask_id] |= m_col;
1252 mask_id = (tx ==
TX_8X8) || (w == ss_h);
1253 for (y = row_and_7; y < h + row_and_7; y++)
1254 mask[0][y][mask_id] |= t;
1256 int t8 = t & wide_filter_col_mask[ss_h],
t4 = t -
t8;
1258 for (y = row_and_7; y < h + row_and_7; y++) {
1262 mask[1][row_and_7][2 - !(row_and_7 & wide_filter_row_mask[ss_v])] |= m_col;
1268 VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
1284 s->
min_mv.
x = -(128 + col * 64);
1285 s->
min_mv.
y = -(128 + row * 64);
1294 b->
uvtx = b->tx - ((s->
ss_h && w4 * 2 == (1 << b->tx)) ||
1295 (s->
ss_v && h4 * 2 == (1 << b->tx)));
1300 if (bytesperpixel == 1) {
1313 #define SPLAT_ZERO_CTX(v, n) \
1315 case 1: v = 0; break; \
1316 case 2: AV_ZERO16(&v); break; \
1317 case 4: AV_ZERO32(&v); break; \
1318 case 8: AV_ZERO64(&v); break; \
1319 case 16: AV_ZERO128(&v); break; \
1321 #define SPLAT_ZERO_YUV(dir, var, off, n, dir2) \
1323 SPLAT_ZERO_CTX(s->dir##_y_##var[off * 2], n * 2); \
1324 if (s->ss_##dir2) { \
1325 SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off], n); \
1326 SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off], n); \
1328 SPLAT_ZERO_CTX(s->dir##_uv_##var[0][off * 2], n * 2); \
1329 SPLAT_ZERO_CTX(s->dir##_uv_##var[1][off * 2], n * 2); \
1349 s->
block += w4 * h4 * 64 * bytesperpixel;
1352 s->
eob += 4 * w4 * h4;
1363 emu[0] = (col + w4) * 8 * bytesperpixel > f->
linesize[0] ||
1364 (row + h4) > s->
rows;
1365 emu[1] = ((col + w4) * 8 >> s->
ss_h) * bytesperpixel > f->
linesize[1] ||
1366 (row + h4) > s->
rows;
1371 s->
dst[0] = f->
data[0] + yoff;
1379 s->
dst[1] = f->
data[1] + uvoff;
1380 s->
dst[2] = f->
data[2] + uvoff;
1384 if (s->
s.
h.
bpp > 8) {
1390 if (s->
s.
h.
bpp > 8) {
1399 for (
n = 0; o < w;
n++) {
1405 s->
tmp_y + o * bytesperpixel, 128,
h, 0, 0);
1414 for (
n = s->
ss_h; o < w;
n++) {
1420 s->
tmp_uv[0] + o * bytesperpixel, 128,
h, 0, 0);
1422 s->
tmp_uv[1] + o * bytesperpixel, 128,
h, 0, 0);
1436 mask_edges(lflvl->
mask[0], 0, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
1441 b->
uvtx, skip_inter);
1448 limit >>= (sharp + 3) >> 2;
1449 limit =
FFMIN(limit, 9 - sharp);
1451 limit =
FFMAX(limit, 1);
1460 s->
block += w4 * h4 * 64 * bytesperpixel;
1463 s->
eob += 4 * w4 * h4;
vp9_mc_func mc[5][N_FILTERS][2][2][2]
const char const char void * val
#define SET_CTXS(dir, off, n)
uint8_t * segmentation_map
This structure describes decoded (raw) audio or video data.
uint8_t left_segpred_ctx[8]
VP5 and VP6 compatible video decoder (common features)
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
static av_always_inline int decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, int is_tx32x32, int is8bitsperpixel, int bpp, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, const int16_t *qmul)
uint8_t left_uv_nnz_ctx[2][16]
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
const uint8_t ff_vp9_default_kf_ymode_probs[10][10][9]
static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h, ptrdiff_t stride, int v)
const uint8_t ff_vp9_default_kf_uvmode_probs[10][9]
void ff_vp9_inter_recon_8bpp(AVCodecContext *avctx)
uint8_t coef[4][2][2][6][6][3]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperpixel)
static av_always_inline void mask_edges(uint8_t(*mask)[8][4], int ss_h, int ss_v, int row_and_7, int col_and_7, int w, int h, int col_end, int row_end, enum TxfmMode tx, int skip_inter)
const int16_t *const ff_vp9_scans[5][4]
uint8_t * above_uv_nnz_ctx[2]
static int decode_coeffs_b_16bpp(VP9Context *s, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, const int16_t *qmul)
#define DECODE_Y_COEF_LOOP(step, mode_index, v)
void ff_vp9_intra_recon_8bpp(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off)
static const uint16_t mask[17]
const int8_t ff_vp9_intramode_tree[9][2]
#define STORE_COEF(c, i, v)
simple assert() macros that are a bit more flexible than ISO C assert().
static int decode_coeffs_16bpp(AVCodecContext *avctx)
uint8_t * above_filter_ctx
const uint8_t ff_vp9_model_pareto8[256][8]
void ff_vp9_inter_recon_16bpp(AVCodecContext *avctx)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
VP56mv left_mv_ctx[16][2]
uint8_t left_y_nnz_ctx[16]
const int8_t ff_vp9_inter_mode_tree[3][2]
static int decode_coeffs_b_8bpp(VP9Context *s, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, const int16_t *qmul)
uint8_t left_mode_ctx[16]
unsigned eob[4][2][2][6][6][2]
const int8_t ff_vp9_segmentation_tree[7][2]
static int decode_coeffs_b32_16bpp(VP9Context *s, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, const int16_t *qmul)
enum FilterMode ff_vp9_filter_lut[3]
#define vp56_rac_get_prob
uint8_t * above_segpred_ctx
void ff_vp9_intra_recon_16bpp(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off)
static const float pred[4]
static const int8_t mv[256][2]
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
VP56mv(* above_mv_ctx)[2]
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
static void decode_mode(AVCodecContext *avctx)
void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
unsigned single_ref[5][2][2]
uint8_t * above_y_nnz_ctx
#define SPLAT_CTX(var, val, n)
#define MERGE_CTX(step, rd)
uint8_t left_filter_ctx[8]
uint8_t * above_intra_ctx
const uint8_t ff_vp9_bwh_tab[2][N_BS_SIZES][2]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define DECODE_UV_COEF_LOOP(step, v)
GLint GLenum GLboolean GLsizei stride
common internal api header.
static int decode_coeffs_b32_8bpp(VP9Context *s, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, const int16_t *qmul)
uint8_t tmp_uv[2][64 *64 *2]
static int decode_coeffs_8bpp(AVCodecContext *avctx)
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
struct VP9Context::@148 prob
Core video DSP helper functions.
struct VP9Context::@149 counts
struct VP9Context::@146 filter_lut
#define SPLAT_ZERO_YUV(dir, var, off, n, dir2)
const int8_t ff_vp9_filter_tree[2][2]
struct VP9Context::@150 min_mv
const int16_t(*const [5][4] ff_vp9_scans_nb)[2]
struct VP9Context::@150 max_mv
uint8_t left_intra_ctx[8]
void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)