38 for (y = 0; y <
size; y++) {
39 for (x = 0; x <
size; x++)
53 for (y = 0; y < 4; y++) {
54 for (x = 0; x < 4; x++) {
70 for (y = 0; y < 8; y++) {
71 for (x = 0; x < 8; x++) {
87 for (y = 0; y < 16; y++) {
88 for (x = 0; x < 16; x++) {
104 for (y = 0; y < 32; y++) {
105 for (x = 0; x < 32; x++) {
119 int offset = 1 << (shift - 1);
127 for (y = 0; y < 4 * 4; y += 4) {
128 for (x = 0; x < 4; x++)
134 #define SET(dst, x) (dst) = (x)
135 #define SCALE(dst, x) (dst) = av_clip_int16(((x) + add) >> shift)
136 #define ADD_AND_SCALE(dst, x) \
137 (dst) = av_clip_pixel((dst) + av_clip_int16(((x) + add) >> shift))
139 #define TR_4x4_LUMA(dst, src, step, assign) \
141 int c0 = src[0 * step] + src[2 * step]; \
142 int c1 = src[2 * step] + src[3 * step]; \
143 int c2 = src[0 * step] - src[3 * step]; \
144 int c3 = 74 * src[1 * step]; \
146 assign(dst[2 * step], 74 * (src[0 * step] - \
149 assign(dst[0 * step], 29 * c0 + 55 * c1 + c3); \
150 assign(dst[1 * step], 55 * c2 - 29 * c1 + c3); \
151 assign(dst[3 * step], 55 * c0 + 29 * c2 - c3); \
160 int add = 1 << (shift - 1);
165 for (i = 0; i < 4; i++) {
171 add = 1 << (shift - 1);
172 for (i = 0; i < 4; i++) {
181 #define TR_4(dst, src, dstep, sstep, assign) \
183 const int e0 = transform[8 * 0][0] * src[0 * sstep] + \
184 transform[8 * 2][0] * src[2 * sstep]; \
185 const int e1 = transform[8 * 0][1] * src[0 * sstep] + \
186 transform[8 * 2][1] * src[2 * sstep]; \
187 const int o0 = transform[8 * 1][0] * src[1 * sstep] + \
188 transform[8 * 3][0] * src[3 * sstep]; \
189 const int o1 = transform[8 * 1][1] * src[1 * sstep] + \
190 transform[8 * 3][1] * src[3 * sstep]; \
192 assign(dst[0 * dstep], e0 + o0); \
193 assign(dst[1 * dstep], e1 + o1); \
194 assign(dst[2 * dstep], e1 - o1); \
195 assign(dst[3 * dstep], e0 - o0); \
204 int add = 1 << (shift - 1);
209 for (i = 0; i < 4; i++) {
215 add = 1 << (shift - 1);
216 for (i = 0; i < 4; i++) {
223 #define TR_8(dst, src, dstep, sstep, assign) \
227 int o_8[4] = { 0 }; \
228 for (i = 0; i < 4; i++) \
229 for (j = 1; j < 8; j += 2) \
230 o_8[i] += transform[4 * j][i] * src[j * sstep]; \
231 TR_4(e_8, src, 1, 2 * sstep, SET); \
233 for (i = 0; i < 4; i++) { \
234 assign(dst[i * dstep], e_8[i] + o_8[i]); \
235 assign(dst[(7 - i) * dstep], e_8[i] - o_8[i]); \
239 #define TR_16(dst, src, dstep, sstep, assign) \
243 int o_16[8] = { 0 }; \
244 for (i = 0; i < 8; i++) \
245 for (j = 1; j < 16; j += 2) \
246 o_16[i] += transform[2 * j][i] * src[j * sstep]; \
247 TR_8(e_16, src, 1, 2 * sstep, SET); \
249 for (i = 0; i < 8; i++) { \
250 assign(dst[i * dstep], e_16[i] + o_16[i]); \
251 assign(dst[(15 - i) * dstep], e_16[i] - o_16[i]); \
255 #define TR_32(dst, src, dstep, sstep, assign) \
259 int o_32[16] = { 0 }; \
260 for (i = 0; i < 16; i++) \
261 for (j = 1; j < 32; j += 2) \
262 o_32[i] += transform[j][i] * src[j * sstep]; \
263 TR_16(e_32, src, 1, 2 * sstep, SET); \
265 for (i = 0; i < 16; i++) { \
266 assign(dst[i * dstep], e_32[i] + o_32[i]); \
267 assign(dst[(31 - i) * dstep], e_32[i] - o_32[i]); \
279 int add = 1 << (shift - 1);
284 for (i = 0; i < 8; i++) {
290 add = 1 << (shift - 1);
291 for (i = 0; i < 8; i++) {
304 int add = 1 << (shift - 1);
309 for (i = 0; i < 16; i++) {
315 add = 1 << (shift - 1);
316 for (i = 0; i < 16; i++) {
329 int add = 1 << (shift - 1);
334 for (i = 0; i < 32; i++) {
340 add = 1 << (shift - 1);
341 for (i = 0; i < 32; i++) {
351 int c_idx,
int class)
357 int chroma = !!c_idx;
359 int *sao_offset_val = sao->offset_val[c_idx];
360 int sao_left_class = sao->band_position[c_idx];
361 int init_y = 0, init_x = 0;
368 width -= (8 >> chroma) + 2;
370 height -= (4 >> chroma) + 2;
373 init_y = -(4 >> chroma) - 2;
375 width -= (8 >> chroma) + 2;
376 height = (4 >> chroma) + 2;
379 init_x = -(8 >> chroma) - 2;
380 width = (8 >> chroma) + 2;
382 height -= (4 >> chroma) + 2;
385 init_y = -(4 >> chroma) - 2;
386 init_x = -(8 >> chroma) - 2;
387 width = (8 >> chroma) + 2;
388 height = (4 >> chroma) + 2;
392 dst = dst + (init_y *
stride + init_x);
393 src = src + (init_y *
stride + init_x);
394 for (k = 0; k < 4; k++)
395 offset_table[(k + sao_left_class) & 31] = sao_offset_val[k + 1];
396 for (y = 0; y <
height; y++) {
397 for (x = 0; x <
width; x++)
398 dst[x] =
av_clip_pixel(src[x] + offset_table[src[x] >> shift]);
442 int *borders,
int _width,
int _height,
449 int chroma = !!c_idx;
450 int *sao_offset_val = sao->offset_val[c_idx];
451 int sao_eo_class = sao->eo_class[c_idx];
452 int init_x = 0, init_y = 0,
width = _width,
height = _height;
454 static const int8_t pos[4][2][2] = {
455 { { -1, 0 }, { 1, 0 } },
456 { { 0, -1 }, { 0, 1 } },
457 { { -1, -1 }, { 1, 1 } },
458 { { 1, -1 }, { -1, 1 } },
460 static const uint8_t edge_idx[] = { 1, 2, 0, 3, 4 };
462 #define CMP(a, b) ((a) > (b) ? 1 : ((a) == (b) ? 0 : -1))
467 width -= (8 >> chroma) + 2;
469 height -= (4 >> chroma) + 2;
471 dst = dst + (init_y *
stride + init_x);
472 src = src + (init_y *
stride + init_x);
478 for (y = 0; y <
height; y++) {
486 int x_stride =
width - 1;
487 for (x = 0; x <
height; x++) {
497 for (x = init_x; x <
width; x++)
503 int y_stride =
stride * (height - 1);
504 for (x = init_x; x <
width; x++)
505 dst[x + y_stride] =
av_clip_pixel(src[x + y_stride] + offset_val);
510 int y_stride = init_y *
stride;
511 int pos_0_0 = pos[sao_eo_class][0][0];
512 int pos_0_1 = pos[sao_eo_class][0][1];
513 int pos_1_0 = pos[sao_eo_class][1][0];
514 int pos_1_1 = pos[sao_eo_class][1][1];
516 int y_stride_0_1 = (init_y + pos_0_1) * stride;
517 int y_stride_1_1 = (init_y + pos_1_1) * stride;
518 for (y = init_y; y <
height; y++) {
519 for (x = init_x; x <
width; x++) {
520 int diff0 =
CMP(src[x + y_stride], src[x + pos_0_0 + y_stride_0_1]);
521 int diff1 =
CMP(src[x + y_stride], src[x + pos_1_0 + y_stride_1_1]);
523 dst[x + y_stride] =
av_clip_pixel(src[x + y_stride] + sao_offset_val[offset_val]);
526 y_stride_0_1 += stride;
527 y_stride_1_1 += stride;
533 int save_upper_left = !diag_edge && sao_eo_class ==
SAO_EO_135D && !borders[0] && !borders[1];
535 for (y = init_y+save_upper_left; y<
height; y++)
538 for(x = init_x+save_upper_left; x<
width; x++)
549 int *borders,
int _width,
int _height,
556 int chroma = !!c_idx;
557 int *sao_offset_val = sao->offset_val[c_idx];
558 int sao_eo_class = sao->eo_class[c_idx];
559 int init_x = 0, init_y = 0,
width = _width,
height = _height;
561 static const int8_t pos[4][2][2] = {
562 { { -1, 0 }, { 1, 0 } },
563 { { 0, -1 }, { 0, 1 } },
564 { { -1, -1 }, { 1, 1 } },
565 { { 1, -1 }, { -1, 1 } },
567 static const uint8_t edge_idx[] = { 1, 2, 0, 3, 4 };
569 #define CMP(a, b) ((a) > (b) ? 1 : ((a) == (b) ? 0 : -1))
573 init_y = -(4 >> chroma) - 2;
575 width -= (8 >> chroma) + 2;
576 height = (4 >> chroma) + 2;
578 dst = dst + (init_y *
stride + init_x);
579 src = src + (init_y *
stride + init_x);
585 for (y = 0; y <
height; y++) {
593 int x_stride =
width - 1;
594 for (x = 0; x <
height; x++) {
602 int y_stride = init_y *
stride;
603 int pos_0_0 = pos[sao_eo_class][0][0];
604 int pos_0_1 = pos[sao_eo_class][0][1];
605 int pos_1_0 = pos[sao_eo_class][1][0];
606 int pos_1_1 = pos[sao_eo_class][1][1];
608 int y_stride_0_1 = (init_y + pos_0_1) * stride;
609 int y_stride_1_1 = (init_y + pos_1_1) * stride;
610 for (y = init_y; y <
height; y++) {
611 for (x = init_x; x <
width; x++) {
612 int diff0 =
CMP(src[x + y_stride], src[x + pos_0_0 + y_stride_0_1]);
613 int diff1 =
CMP(src[x + y_stride], src[x + pos_1_0 + y_stride_1_1]);
615 dst[x + y_stride] =
av_clip_pixel(src[x + y_stride] + sao_offset_val[offset_val]);
618 y_stride_0_1 += stride;
619 y_stride_1_1 += stride;
625 int save_lower_left = !diag_edge && sao_eo_class ==
SAO_EO_45D && !borders[0];
627 for(y = init_y; y< height-save_lower_left; y++)
630 for(x = init_x+save_lower_left; x<
width; x++)
641 int *borders,
int _width,
int _height,
648 int chroma = !!c_idx;
649 int *sao_offset_val = sao->offset_val[c_idx];
650 int sao_eo_class = sao->eo_class[c_idx];
651 int init_x = 0, init_y = 0,
width = _width,
height = _height;
653 static const int8_t pos[4][2][2] = {
654 { { -1, 0 }, { 1, 0 } },
655 { { 0, -1 }, { 0, 1 } },
656 { { -1, -1 }, { 1, 1 } },
657 { { 1, -1 }, { -1, 1 } },
659 static const uint8_t edge_idx[] = { 1, 2, 0, 3, 4 };
661 #define CMP(a, b) ((a) > (b) ? 1 : ((a) == (b) ? 0 : -1))
665 init_x = -(8 >> chroma) - 2;
666 width = (8 >> chroma) + 2;
668 height -= (4 >> chroma) + 2;
670 dst = dst + (init_y *
stride + init_x);
671 src = src + (init_y *
stride + init_x);
676 for (x = init_x; x <
width; x++)
682 int y_stride =
stride * (height - 1);
683 for (x = init_x; x <
width; x++)
684 dst[x + y_stride] =
av_clip_pixel(src[x + y_stride] + offset_val);
689 int y_stride = init_y *
stride;
690 int pos_0_0 = pos[sao_eo_class][0][0];
691 int pos_0_1 = pos[sao_eo_class][0][1];
692 int pos_1_0 = pos[sao_eo_class][1][0];
693 int pos_1_1 = pos[sao_eo_class][1][1];
695 int y_stride_0_1 = (init_y + pos_0_1) * stride;
696 int y_stride_1_1 = (init_y + pos_1_1) * stride;
697 for (y = init_y; y <
height; y++) {
698 for (x = init_x; x <
width; x++) {
699 int diff0 =
CMP(src[x + y_stride], src[x + pos_0_0 + y_stride_0_1]);
700 int diff1 =
CMP(src[x + y_stride], src[x + pos_1_0 + y_stride_1_1]);
702 dst[x + y_stride] =
av_clip_pixel(src[x + y_stride] + sao_offset_val[offset_val]);
705 y_stride_0_1 += stride;
706 y_stride_1_1 += stride;
712 int save_upper_right = !diag_edge && sao_eo_class ==
SAO_EO_45D && !borders[1];
714 for(y = init_y+save_upper_right; y<
height; y++)
717 for(x = init_x; x<
width-save_upper_right; x++)
727 int *borders,
int _width,
int _height,
734 int chroma = !!c_idx;
735 int *sao_offset_val = sao->offset_val[c_idx];
736 int sao_eo_class = sao->eo_class[c_idx];
737 int init_x = 0, init_y = 0,
width = _width,
height = _height;
739 static const int8_t pos[4][2][2] = {
740 { { -1, 0 }, { 1, 0 } },
741 { { 0, -1 }, { 0, 1 } },
742 { { -1, -1 }, { 1, 1 } },
743 { { 1, -1 }, { -1, 1 } },
745 static const uint8_t edge_idx[] = { 1, 2, 0, 3, 4 };
747 #define CMP(a, b) ((a) > (b) ? 1 : ((a) == (b) ? 0 : -1))
751 init_y = -(4 >> chroma) - 2;
752 init_x = -(8 >> chroma) - 2;
753 width = (8 >> chroma) + 2;
754 height = (4 >> chroma) + 2;
757 dst = dst + (init_y *
stride + init_x);
758 src = src + (init_y *
stride + init_x);
762 int y_stride = init_y *
stride;
763 int pos_0_0 = pos[sao_eo_class][0][0];
764 int pos_0_1 = pos[sao_eo_class][0][1];
765 int pos_1_0 = pos[sao_eo_class][1][0];
766 int pos_1_1 = pos[sao_eo_class][1][1];
768 int y_stride_0_1 = (init_y + pos_0_1) * stride;
769 int y_stride_1_1 = (init_y + pos_1_1) * stride;
771 for (y = init_y; y <
height; y++) {
772 for (x = init_x; x <
width; x++) {
773 int diff0 =
CMP(src[x + y_stride], src[x + pos_0_0 + y_stride_0_1]);
774 int diff1 =
CMP(src[x + y_stride], src[x + pos_1_0 + y_stride_1_1]);
776 dst[x + y_stride] =
av_clip_pixel(src[x + y_stride] + sao_offset_val[offset_val]);
779 y_stride_0_1 += stride;
780 y_stride_1_1 += stride;
786 int save_lower_right = !diag_edge && sao_eo_class ==
SAO_EO_135D;
788 for(y = init_y; y< height-save_lower_right; y++)
791 for(x = init_x; x<
width-save_lower_right; x++)
808 uint8_t *_src, ptrdiff_t _srcstride,
813 ptrdiff_t srcstride = _srcstride /
sizeof(
pixel);
815 for (y = 0; y <
height; y++) {
816 for (x = 0; x <
width; x++)
823 #define QPEL_FILTER_1(src, stride) \
824 (1 * -src[x - 3 * stride] + \
825 4 * src[x - 2 * stride] - \
826 10 * src[x - stride] + \
828 17 * src[x + stride] - \
829 5 * src[x + 2 * stride] + \
830 1 * src[x + 3 * stride])
832 #define QPEL_FILTER_2(src, stride) \
833 (1 * -src[x - 3 * stride] + \
834 4 * src[x - 2 * stride] - \
835 11 * src[x - stride] + \
837 40 * src[x + stride] - \
838 11 * src[x + 2 * stride] + \
839 4 * src[x + 3 * stride] - \
840 1 * src[x + 4 * stride])
842 #define QPEL_FILTER_3(src, stride) \
843 (1 * src[x - 2 * stride] - \
844 5 * src[x - stride] + \
846 58 * src[x + stride] - \
847 10 * src[x + 2 * stride] + \
848 4 * src[x + 3 * stride] - \
849 1 * src[x + 4 * stride])
852 #define PUT_HEVC_QPEL_H(H) \
853 static void FUNC(put_hevc_qpel_h ## H)(int16_t *dst, ptrdiff_t dststride, \
854 uint8_t *_src, ptrdiff_t _srcstride, \
855 int width, int height, \
859 pixel *src = (pixel*)_src; \
860 ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
862 for (y = 0; y < height; y++) { \
863 for (x = 0; x < width; x++) \
864 dst[x] = QPEL_FILTER_ ## H(src, 1) >> (BIT_DEPTH - 8); \
870 #define PUT_HEVC_QPEL_V(V) \
871 static void FUNC(put_hevc_qpel_v ## V)(int16_t *dst, ptrdiff_t dststride, \
872 uint8_t *_src, ptrdiff_t _srcstride, \
873 int width, int height, \
877 pixel *src = (pixel*)_src; \
878 ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
880 for (y = 0; y < height; y++) { \
881 for (x = 0; x < width; x++) \
882 dst[x] = QPEL_FILTER_ ## V(src, srcstride) >> (BIT_DEPTH - 8); \
888 #define PUT_HEVC_QPEL_HV(H, V) \
889 static void FUNC(put_hevc_qpel_h ## H ## v ## V)(int16_t *dst, \
890 ptrdiff_t dststride, \
892 ptrdiff_t _srcstride, \
893 int width, int height, \
897 pixel *src = (pixel*)_src; \
898 ptrdiff_t srcstride = _srcstride / sizeof(pixel); \
900 int16_t tmp_array[(MAX_PB_SIZE + 7) * MAX_PB_SIZE]; \
901 int16_t *tmp = tmp_array; \
903 src -= ff_hevc_qpel_extra_before[V] * srcstride; \
905 for (y = 0; y < height + ff_hevc_qpel_extra[V]; y++) { \
906 for (x = 0; x < width; x++) \
907 tmp[x] = QPEL_FILTER_ ## H(src, 1) >> (BIT_DEPTH - 8); \
909 tmp += MAX_PB_SIZE; \
912 tmp = tmp_array + ff_hevc_qpel_extra_before[V] * MAX_PB_SIZE; \
914 for (y = 0; y < height; y++) { \
915 for (x = 0; x < width; x++) \
916 dst[x] = QPEL_FILTER_ ## V(tmp, MAX_PB_SIZE) >> 6; \
917 tmp += MAX_PB_SIZE; \
929 PUT_HEVC_QPEL_HV(1, 2)
930 PUT_HEVC_QPEL_HV(1, 3)
931 PUT_HEVC_QPEL_HV(2, 1)
932 PUT_HEVC_QPEL_HV(2, 2)
933 PUT_HEVC_QPEL_HV(2, 3)
934 PUT_HEVC_QPEL_HV(3, 1)
935 PUT_HEVC_QPEL_HV(3, 2)
936 PUT_HEVC_QPEL_HV(3, 3)
939 uint8_t *_src, ptrdiff_t _srcstride,
945 ptrdiff_t srcstride = _srcstride /
sizeof(
pixel);
947 for (y = 0; y <
height; y++) {
948 for (x = 0; x <
width; x++)
955 #define EPEL_FILTER(src, stride) \
956 (filter_0 * src[x - stride] + \
957 filter_1 * src[x] + \
958 filter_2 * src[x + stride] + \
959 filter_3 * src[x + 2 * stride])
962 uint8_t *_src, ptrdiff_t _srcstride,
968 ptrdiff_t srcstride = _srcstride /
sizeof(
pixel);
970 int8_t filter_0 = filter[0];
971 int8_t filter_1 = filter[1];
972 int8_t filter_2 = filter[2];
973 int8_t filter_3 = filter[3];
974 for (y = 0; y <
height; y++) {
975 for (x = 0; x <
width; x++)
983 uint8_t *_src, ptrdiff_t _srcstride,
989 ptrdiff_t srcstride = _srcstride /
sizeof(
pixel);
991 int8_t filter_0 = filter[0];
992 int8_t filter_1 = filter[1];
993 int8_t filter_2 = filter[2];
994 int8_t filter_3 = filter[3];
996 for (y = 0; y <
height; y++) {
997 for (x = 0; x <
width; x++)
1005 uint8_t *_src, ptrdiff_t _srcstride,
1011 ptrdiff_t srcstride = _srcstride /
sizeof(
pixel);
1014 int8_t filter_0 = filter_h[0];
1015 int8_t filter_1 = filter_h[1];
1016 int8_t filter_2 = filter_h[2];
1017 int8_t filter_3 = filter_h[3];
1019 int16_t *tmp = tmp_array;
1024 for (x = 0; x <
width; x++)
1031 filter_0 = filter_v[0];
1032 filter_1 = filter_v[1];
1033 filter_2 = filter_v[2];
1034 filter_3 = filter_v[3];
1035 for (y = 0; y <
height; y++) {
1036 for (x = 0; x <
width; x++)
1044 int16_t *
src, ptrdiff_t srcstride,
1049 ptrdiff_t dststride = _dststride /
sizeof(
pixel);
1053 int offset = 1 << (shift - 1);
1057 for (y = 0; y <
height; y++) {
1058 for (x = 0; x <
width; x++)
1066 int16_t *src1, int16_t *src2,
1067 ptrdiff_t srcstride,
1072 ptrdiff_t dststride = _dststride /
sizeof(
pixel);
1076 int offset = 1 << (shift - 1);
1081 for (y = 0; y <
height; y++) {
1082 for (x = 0; x <
width; x++)
1091 uint8_t *_dst, ptrdiff_t _dststride,
1092 int16_t *
src, ptrdiff_t srcstride,
1097 ptrdiff_t dststride = _dststride /
sizeof(
pixel);
1100 log2Wd = denom +
shift;
1101 offset = 1 << (log2Wd - 1);
1105 for (y = 0; y <
height; y++) {
1106 for (x = 0; x <
width; x++) {
1119 int16_t wl0Flag, int16_t wl1Flag,
1120 int16_t ol0Flag, int16_t ol1Flag,
1121 uint8_t *_dst, ptrdiff_t _dststride,
1122 int16_t *src1, int16_t *src2,
1123 ptrdiff_t srcstride,
1126 int shift, log2Wd, w0, w1, o0, o1, x,
y;
1128 ptrdiff_t dststride = _dststride /
sizeof(
pixel);
1131 log2Wd = denom +
shift;
1137 for (y = 0; y <
height; y++) {
1138 for (x = 0; x <
width; x++)
1140 ((o0 + o1 + 1) << log2Wd)) >> (log2Wd + 1));
1148 #define P3 pix[-4 * xstride]
1149 #define P2 pix[-3 * xstride]
1150 #define P1 pix[-2 * xstride]
1151 #define P0 pix[-1 * xstride]
1152 #define Q0 pix[0 * xstride]
1153 #define Q1 pix[1 * xstride]
1154 #define Q2 pix[2 * xstride]
1155 #define Q3 pix[3 * xstride]
1158 #define TP3 pix[-4 * xstride + 3 * ystride]
1159 #define TP2 pix[-3 * xstride + 3 * ystride]
1160 #define TP1 pix[-2 * xstride + 3 * ystride]
1161 #define TP0 pix[-1 * xstride + 3 * ystride]
1162 #define TQ0 pix[0 * xstride + 3 * ystride]
1163 #define TQ1 pix[1 * xstride + 3 * ystride]
1164 #define TQ2 pix[2 * xstride + 3 * ystride]
1165 #define TQ3 pix[3 * xstride + 3 * ystride]
1168 ptrdiff_t _xstride, ptrdiff_t _ystride,
1169 int *_beta,
int *_tc,
1174 ptrdiff_t xstride = _xstride /
sizeof(
pixel);
1175 ptrdiff_t ystride = _ystride /
sizeof(
pixel);
1177 for (j = 0; j < 2; j++) {
1178 const int dp0 = abs(
P2 - 2 *
P1 +
P0);
1179 const int dq0 = abs(
Q2 - 2 *
Q1 +
Q0);
1180 const int dp3 = abs(
TP2 - 2 *
TP1 +
TP0);
1181 const int dq3 = abs(
TQ2 - 2 *
TQ1 +
TQ0);
1182 const int d0 = dp0 + dq0;
1183 const int d3 = dp3 + dq3;
1184 const int beta = _beta[j] << (
BIT_DEPTH - 8);
1186 const int no_p = _no_p[j];
1187 const int no_q = _no_q[j];
1189 if (d0 + d3 >= beta) {
1193 const int beta_3 = beta >> 3;
1194 const int beta_2 = beta >> 2;
1195 const int tc25 = ((tc * 5 + 1) >> 1);
1197 if (abs(
P3 -
P0) + abs(
Q3 -
Q0) < beta_3 && abs(
P0 -
Q0) < tc25 &&
1199 (d0 << 1) < beta_2 && (d3 << 1) < beta_2) {
1201 const int tc2 = tc << 1;
1202 for (d = 0; d < 4; d++) {
1212 P0 = p0 + av_clip(((p2 + 2 * p1 + 2 * p0 + 2 * q0 + q1 + 4) >> 3) - p0, -tc2, tc2);
1213 P1 = p1 + av_clip(((p2 + p1 + p0 + q0 + 2) >> 2) - p1, -tc2, tc2);
1214 P2 = p2 + av_clip(((2 * p3 + 3 * p2 + p1 + p0 + q0 + 4) >> 3) - p2, -tc2, tc2);
1217 Q0 = q0 + av_clip(((p1 + 2 * p0 + 2 * q0 + 2 * q1 + q2 + 4) >> 3) - q0, -tc2, tc2);
1218 Q1 = q1 + av_clip(((p0 + q0 + q1 + q2 + 2) >> 2) - q1, -tc2, tc2);
1219 Q2 = q2 + av_clip(((2 * q3 + 3 * q2 + q1 + q0 + p0 + 4) >> 3) - q2, -tc2, tc2);
1226 const int tc_2 = tc >> 1;
1227 if (dp0 + dp3 < ((beta + (beta >> 1)) >> 3))
1229 if (dq0 + dq3 < ((beta + (beta >> 1)) >> 3))
1232 for (d = 0; d < 4; d++) {
1239 int delta0 = (9 * (q0 - p0) - 3 * (q1 - p1) + 8) >> 4;
1240 if (abs(delta0) < 10 *
tc) {
1241 delta0 = av_clip(delta0, -tc, tc);
1246 if (!no_p && nd_p > 1) {
1247 const int deltap1 = av_clip((((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1, -tc_2, tc_2);
1250 if (!no_q && nd_q > 1) {
1251 const int deltaq1 = av_clip((((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1, -tc_2, tc_2);
1263 ptrdiff_t _ystride,
int *_tc,
1266 int d, j, no_p, no_q;
1268 ptrdiff_t xstride = _xstride /
sizeof(
pixel);
1269 ptrdiff_t ystride = _ystride /
sizeof(
pixel);
1271 for (j = 0; j < 2; j++) {
1280 for (d = 0; d < 4; d++) {
1286 delta0 = av_clip((((q0 - p0) * 4) + p1 - q1 + 4) >> 3, -tc, tc);
1315 beta,
tc, no_p, no_q);
1323 beta,
tc, no_p, no_q);