FFmpeg
mathops.h
Go to the documentation of this file.
1 /*
2  * simple math operations
3  * Copyright (c) 2001, 2002 Fabrice Bellard
4  * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #ifndef AVCODEC_MATHOPS_H
23 #define AVCODEC_MATHOPS_H
24 
25 #include <stdint.h>
26 
28 #include "libavutil/common.h"
29 #include "config.h"
30 
31 #define MAX_NEG_CROP 1024
32 
33 extern const uint32_t ff_inverse[257];
34 extern const uint8_t ff_log2_run[41];
35 extern const uint8_t ff_sqrt_tab[256];
36 extern const uint8_t attribute_visibility_hidden ff_crop_tab[256 + 2 * MAX_NEG_CROP];
37 extern const uint8_t ff_zigzag_direct[64];
38 extern const uint8_t ff_zigzag_scan[16+1];
39 
40 #if ARCH_ARM
41 # include "arm/mathops.h"
42 #elif ARCH_AVR32
43 # include "avr32/mathops.h"
44 #elif ARCH_MIPS
45 # include "mips/mathops.h"
46 #elif ARCH_PPC
47 # include "ppc/mathops.h"
48 #elif ARCH_X86
49 # include "x86/mathops.h"
50 #endif
51 
52 /* generic implementation */
53 
54 #ifndef MUL64
55 # define MUL64(a,b) ((int64_t)(a) * (int64_t)(b))
56 #endif
57 
58 #ifndef MULL
59 # define MULL(a,b,s) (MUL64(a, b) >> (s))
60 #endif
61 
62 #ifndef MULH
63 static av_always_inline int MULH(int a, int b){
64  return MUL64(a, b) >> 32;
65 }
66 #endif
67 
68 #ifndef UMULH
69 static av_always_inline unsigned UMULH(unsigned a, unsigned b){
70  return ((uint64_t)(a) * (uint64_t)(b))>>32;
71 }
72 #endif
73 
74 #ifndef MAC64
75 # define MAC64(d, a, b) ((d) += MUL64(a, b))
76 #endif
77 
78 #ifndef MLS64
79 # define MLS64(d, a, b) ((d) -= MUL64(a, b))
80 #endif
81 
82 /* signed 16x16 -> 32 multiply add accumulate */
83 #ifndef MAC16
84 # define MAC16(rt, ra, rb) rt += (ra) * (rb)
85 #endif
86 
87 /* signed 16x16 -> 32 multiply */
88 #ifndef MUL16
89 # define MUL16(ra, rb) ((ra) * (rb))
90 #endif
91 
92 #ifndef MLS16
93 # define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
94 #endif
95 
96 /* median of 3 */
97 #ifndef mid_pred
98 #define mid_pred mid_pred
99 static inline av_const int mid_pred(int a, int b, int c)
100 {
101  if(a>b){
102  if(c>b){
103  if(c>a) b=a;
104  else b=c;
105  }
106  }else{
107  if(b>c){
108  if(c>a) b=c;
109  else b=a;
110  }
111  }
112  return b;
113 }
114 #endif
115 
116 #ifndef median4
117 #define median4 median4
118 static inline av_const int median4(int a, int b, int c, int d)
119 {
120  if (a < b) {
121  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
122  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
123  } else {
124  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
125  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
126  }
127 }
128 #endif
129 
130 #define FF_SIGNBIT(x) ((x) >> CHAR_BIT * sizeof(x) - 1)
131 
132 #ifndef sign_extend
133 static inline av_const int sign_extend(int val, unsigned bits)
134 {
135  unsigned shift = 8 * sizeof(int) - bits;
136  union { unsigned u; int s; } v = { (unsigned) val << shift };
137  return v.s >> shift;
138 }
139 #endif
140 
141 #ifndef sign_extend64
142 static inline av_const int64_t sign_extend64(int64_t val, unsigned bits)
143 {
144  unsigned shift = 8 * sizeof(int64_t) - bits;
145  union { uint64_t u; int64_t s; } v = { (uint64_t) val << shift };
146  return v.s >> shift;
147 }
148 #endif
149 
150 #ifndef zero_extend
151 static inline av_const unsigned zero_extend(unsigned val, unsigned bits)
152 {
153  return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
154 }
155 #endif
156 
157 #ifndef COPY3_IF_LT
158 #define COPY3_IF_LT(x, y, a, b, c, d)\
159 if ((y) < (x)) {\
160  (x) = (y);\
161  (a) = (b);\
162  (c) = (d);\
163 }
164 #endif
165 
166 #ifndef MASK_ABS
167 #define MASK_ABS(mask, level) do { \
168  mask = level >> 31; \
169  level = (level ^ mask) - mask; \
170  } while (0)
171 #endif
172 
173 #ifndef NEG_SSR32
174 # define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
175 #endif
176 
177 #ifndef NEG_USR32
178 # define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
179 #endif
180 
181 #if HAVE_BIGENDIAN
182 # ifndef PACK_2U8
183 # define PACK_2U8(a,b) (((a) << 8) | (b))
184 # endif
185 # ifndef PACK_4U8
186 # define PACK_4U8(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
187 # endif
188 # ifndef PACK_2U16
189 # define PACK_2U16(a,b) (((a) << 16) | (b))
190 # endif
191 #else
192 # ifndef PACK_2U8
193 # define PACK_2U8(a,b) (((b) << 8) | (a))
194 # endif
195 # ifndef PACK_4U2
196 # define PACK_4U8(a,b,c,d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
197 # endif
198 # ifndef PACK_2U16
199 # define PACK_2U16(a,b) (((b) << 16) | (a))
200 # endif
201 #endif
202 
203 #ifndef PACK_2S8
204 # define PACK_2S8(a,b) PACK_2U8((a)&255, (b)&255)
205 #endif
206 #ifndef PACK_4S8
207 # define PACK_4S8(a,b,c,d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
208 #endif
209 #ifndef PACK_2S16
210 # define PACK_2S16(a,b) PACK_2U16((a)&0xffff, (b)&0xffff)
211 #endif
212 
213 #ifndef FASTDIV
214 # define FASTDIV(a,b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
215 #endif /* FASTDIV */
216 
217 #ifndef ff_sqrt
218 #define ff_sqrt ff_sqrt
219 static inline av_const unsigned int ff_sqrt(unsigned int a)
220 {
221  unsigned int b;
222 
223  if (a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
224  else if (a < (1 << 12)) b = ff_sqrt_tab[a >> 4] >> 2;
225 #if !CONFIG_SMALL
226  else if (a < (1 << 14)) b = ff_sqrt_tab[a >> 6] >> 1;
227  else if (a < (1 << 16)) b = ff_sqrt_tab[a >> 8] ;
228 #endif
229  else {
230  int s = av_log2_16bit(a >> 16) >> 1;
231  unsigned int c = a >> (s + 2);
232  b = ff_sqrt_tab[c >> (s + 8)];
233  b = FASTDIV(c,b) + (b << s);
234  }
235 
236  return b - (a < b * b);
237 }
238 #endif
239 
240 static inline av_const float ff_sqrf(float a)
241 {
242  return a*a;
243 }
244 
245 static inline int8_t ff_u8_to_s8(uint8_t a)
246 {
247  union {
248  uint8_t u8;
249  int8_t s8;
250  } b;
251  b.u8 = a;
252  return b.s8;
253 }
254 
255 #endif /* AVCODEC_MATHOPS_H */
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:250
ff_u8_to_s8
static int8_t ff_u8_to_s8(uint8_t a)
Definition: mathops.h:245
int64_t
long long int64_t
Definition: coverity.c:34
av_log2_16bit
int av_log2_16bit(unsigned v)
Definition: intmath.c:31
av_const
#define av_const
Definition: attributes.h:84
b
#define b
Definition: input.c:41
zero_extend
static av_const unsigned zero_extend(unsigned val, unsigned bits)
Definition: mathops.h:151
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
attribute_visibility_hidden
#define attribute_visibility_hidden
Definition: attributes_internal.h:29
ff_inverse
const uint32_t ff_inverse[257]
Definition: mathtables.c:27
mathops.h
median4
#define median4
Definition: mathops.h:117
val
static double val(void *priv, double ch)
Definition: aeval.c:78
ff_sqrt
#define ff_sqrt
Definition: mathops.h:218
attributes_internal.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
bits
uint8_t bits
Definition: vp3data.h:128
sign_extend64
static av_const int64_t sign_extend64(int64_t val, unsigned bits)
Definition: mathops.h:142
UMULH
static av_always_inline unsigned UMULH(unsigned a, unsigned b)
Definition: mathops.h:69
FASTDIV
#define FASTDIV(a, b)
Definition: mathops.h:214
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_sqrt_tab
const uint8_t ff_sqrt_tab[256]
Definition: mathtables.c:63
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
shift
static int shift(int a, int b)
Definition: bonk.c:262
MULH
static av_always_inline int MULH(int a, int b)
Definition: mathops.h:63
ff_crop_tab
const uint8_t attribute_visibility_hidden ff_crop_tab[256+2 *MAX_NEG_CROP]
Definition: mathtables.c:77
mathops.h
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
common.h
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_sqrf
static av_const float ff_sqrf(float a)
Definition: mathops.h:240
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
mid_pred
#define mid_pred
Definition: mathops.h:98
mathops.h
mathops.h
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
MUL64
#define MUL64(a, b)
Definition: mathops.h:55
mathops.h
d
d
Definition: ffmpeg_filter.c:409
ff_log2_run
const uint8_t ff_log2_run[41]
Definition: mathtables.c:116
MAX_NEG_CROP
#define MAX_NEG_CROP
Definition: mathops.h:31
int
int
Definition: ffmpeg_filter.c:409