aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/linden/indra/llmath/llmath.h
diff options
context:
space:
mode:
authorJacek Antonelli2008-08-15 23:44:46 -0500
committerJacek Antonelli2008-08-15 23:44:46 -0500
commit38d6d37f2d982fa959e9e8a4a3f7e1ccfad7b5d4 (patch)
treeadca584755d22ca041a2dbfc35d4eca01f70b32c /linden/indra/llmath/llmath.h
parentREADME.txt (diff)
downloadmeta-impy-38d6d37f2d982fa959e9e8a4a3f7e1ccfad7b5d4.zip
meta-impy-38d6d37f2d982fa959e9e8a4a3f7e1ccfad7b5d4.tar.gz
meta-impy-38d6d37f2d982fa959e9e8a4a3f7e1ccfad7b5d4.tar.bz2
meta-impy-38d6d37f2d982fa959e9e8a4a3f7e1ccfad7b5d4.tar.xz
Second Life viewer sources 1.13.2.12
Diffstat (limited to 'linden/indra/llmath/llmath.h')
-rw-r--r--linden/indra/llmath/llmath.h421
1 files changed, 421 insertions, 0 deletions
diff --git a/linden/indra/llmath/llmath.h b/linden/indra/llmath/llmath.h
new file mode 100644
index 0000000..7009557
--- /dev/null
+++ b/linden/indra/llmath/llmath.h
@@ -0,0 +1,421 @@
1/**
2 * @file llmath.h
3 * @brief Useful math constants and macros.
4 *
5 * Copyright (c) 2000-2007, Linden Research, Inc.
6 *
7 * The source code in this file ("Source Code") is provided by Linden Lab
8 * to you under the terms of the GNU General Public License, version 2.0
9 * ("GPL"), unless you have obtained a separate licensing agreement
10 * ("Other License"), formally executed by you and Linden Lab. Terms of
11 * the GPL can be found in doc/GPL-license.txt in this distribution, or
12 * online at http://secondlife.com/developers/opensource/gplv2
13 *
14 * There are special exceptions to the terms and conditions of the GPL as
15 * it is applied to this Source Code. View the full text of the exception
16 * in the file doc/FLOSS-exception.txt in this software distribution, or
17 * online at http://secondlife.com/developers/opensource/flossexception
18 *
19 * By copying, modifying or distributing this software, you acknowledge
20 * that you have read and understood your obligations described above,
21 * and agree to abide by those obligations.
22 *
23 * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
24 * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
25 * COMPLETENESS OR PERFORMANCE.
26 */
27
28#ifndef LLMATH_H
29#define LLMATH_H
30
31#include <cmath>
32#include <math.h>
33#include <stdlib.h>
34
35#include "lldefs.h"
36
37// work around for Windows & older gcc non-standard function names.
38#if LL_WINDOWS
39#define llisnan(val) _isnan(val)
40#define llfinite(val) _finite(val)
41#elif (LL_LINUX && __GNUC__ <= 2)
42#define llisnan(val) isnan(val)
43#define llfinite(val) isfinite(val)
44#else
45#define llisnan(val) std::isnan(val)
46#define llfinite(val) std::isfinite(val)
47#endif
48
49// Single Precision Floating Point Routines
50#ifndef fsqrtf
51#define fsqrtf(x) ((F32)sqrt((F64)(x)))
52#endif
53#ifndef sqrtf
54#define sqrtf(x) ((F32)sqrt((F64)(x)))
55#endif
56
57#ifndef cosf
58#define cosf(x) ((F32)cos((F64)(x)))
59#endif
60#ifndef sinf
61#define sinf(x) ((F32)sin((F64)(x)))
62#endif
63#ifndef tanf
64#define tanf(x) ((F32)tan((F64)(x)))
65#endif
66#ifndef acosf
67#define acosf(x) ((F32)acos((F64)(x)))
68#endif
69
70#ifndef powf
71#define powf(x,y) ((F32)pow((F64)(x),(F64)(y)))
72#endif
73
74const F32 GRAVITY = -9.8f;
75
76// mathematical constants
77const F32 F_PI = 3.1415926535897932384626433832795f;
78const F32 F_TWO_PI = 6.283185307179586476925286766559f;
79const F32 F_PI_BY_TWO = 1.5707963267948966192313216916398f;
80const F32 F_SQRT2 = 1.4142135623730950488016887242097f;
81const F32 F_SQRT3 = 1.73205080756888288657986402541f;
82const F32 OO_SQRT2 = 0.7071067811865475244008443621049f;
83const F32 DEG_TO_RAD = 0.017453292519943295769236907684886f;
84const F32 RAD_TO_DEG = 57.295779513082320876798154814105f;
85const F32 F_APPROXIMATELY_ZERO = 0.00001f;
86const F32 F_LN2 = 0.69314718056f;
87const F32 OO_LN2 = 1.4426950408889634073599246810019f;
88
89// BUG: Eliminate in favor of F_APPROXIMATELY_ZERO above?
90const F32 FP_MAG_THRESHOLD = 0.0000001f;
91
92// TODO: Replace with logic like is_approx_equal
93inline BOOL is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f < F_APPROXIMATELY_ZERO); }
94
95inline BOOL is_approx_equal(F32 x, F32 y)
96{
97 const S32 COMPARE_MANTISSA_UP_TO_BIT = 0x02;
98 return (abs((S32) ((U32&)x - (U32&)y) ) < COMPARE_MANTISSA_UP_TO_BIT);
99}
100
101inline S32 llabs(const S32 a)
102{
103 return S32(labs(a));
104}
105
106inline F32 llabs(const F32 a)
107{
108 return F32(fabs(a));
109}
110
111inline F64 llabs(const F64 a)
112{
113 return F64(fabs(a));
114}
115
116inline S32 lltrunc( F32 f )
117{
118#if LL_WINDOWS && !defined( __INTEL_COMPILER )
119 // Avoids changing the floating point control word.
120 // Add or subtract 0.5 - epsilon and then round
121 const static U32 zpfp[] = { 0xBEFFFFFF, 0x3EFFFFFF };
122 S32 result;
123 __asm {
124 fld f
125 mov eax, f
126 shr eax, 29
127 and eax, 4
128 fadd dword ptr [zpfp + eax]
129 fistp result
130 }
131 return result;
132#else
133 return (S32)f;
134#endif
135}
136
137inline S32 lltrunc( F64 f )
138{
139 return (S32)f;
140}
141
142inline S32 llfloor( F32 f )
143{
144#if LL_WINDOWS && !defined( __INTEL_COMPILER )
145 // Avoids changing the floating point control word.
146 // Accurate (unlike Stereopsis version) for all values between S32_MIN and S32_MAX and slightly faster than Stereopsis version.
147 // Add -(0.5 - epsilon) and then round
148 const U32 zpfp = 0xBEFFFFFF;
149 S32 result;
150 __asm {
151 fld f
152 fadd dword ptr [zpfp]
153 fistp result
154 }
155 return result;
156#else
157 return (S32)floor(f);
158#endif
159}
160
161
162inline S32 llceil( F32 f )
163{
164 // This could probably be optimized, but this works.
165 return (S32)ceil(f);
166}
167
168
169#ifndef BOGUS_ROUND
170// Use this round. Does an arithmetic round (0.5 always rounds up)
171inline S32 llround(const F32 val)
172{
173 return llfloor(val + 0.5f);
174}
175
176#else // BOGUS_ROUND
177// Old llround implementation - does banker's round (toward nearest even in the case of a 0.5.
178// Not using this because we don't have a consistent implementation on both platforms, use
179// llfloor(val + 0.5f), which is consistent on all platforms.
180inline S32 llround(const F32 val)
181{
182 #if LL_WINDOWS
183 // Note: assumes that the floating point control word is set to rounding mode (the default)
184 S32 ret_val;
185 _asm fld val
186 _asm fistp ret_val;
187 return ret_val;
188 #elif LL_LINUX
189 // Note: assumes that the floating point control word is set
190 // to rounding mode (the default)
191 S32 ret_val;
192 __asm__ __volatile__( "flds %1 \n\t"
193 "fistpl %0 \n\t"
194 : "=m" (ret_val)
195 : "m" (val) );
196 return ret_val;
197 #else
198 return llfloor(val + 0.5f);
199 #endif
200}
201
202// A fast arithmentic round on intel, from Laurent de Soras http://ldesoras.free.fr
203inline int round_int(double x)
204{
205 const float round_to_nearest = 0.5f;
206 int i;
207 __asm
208 {
209 fld x
210 fadd st, st (0)
211 fadd round_to_nearest
212 fistp i
213 sar i, 1
214 }
215 return (i);
216}
217#endif // BOGUS_ROUND
218
219inline F32 llround( F32 val, F32 nearest )
220{
221 return F32(floor(val * (1.0f / nearest) + 0.5f)) * nearest;
222}
223
224inline F64 llround( F64 val, F64 nearest )
225{
226 return F64(floor(val * (1.0 / nearest) + 0.5)) * nearest;
227}
228
229// these provide minimum peak error
230//
231// avg error = -0.013049
232// peak error = -31.4 dB
233// RMS error = -28.1 dB
234
235const F32 FAST_MAG_ALPHA = 0.960433870103f;
236const F32 FAST_MAG_BETA = 0.397824734759f;
237
238// these provide minimum RMS error
239//
240// avg error = 0.000003
241// peak error = -32.6 dB
242// RMS error = -25.7 dB
243//
244//const F32 FAST_MAG_ALPHA = 0.948059448969f;
245//const F32 FAST_MAG_BETA = 0.392699081699f;
246
247inline F32 fastMagnitude(F32 a, F32 b)
248{
249 a = (a > 0) ? a : -a;
250 b = (b > 0) ? b : -b;
251 return(FAST_MAG_ALPHA * llmax(a,b) + FAST_MAG_BETA * llmin(a,b));
252}
253
254
255
256////////////////////
257//
258// Fast F32/S32 conversions
259//
260// Culled from www.stereopsis.com/FPU.html
261
262const F64 LL_DOUBLE_TO_FIX_MAGIC = 68719476736.0*1.5; //2^36 * 1.5, (52-_shiftamt=36) uses limited precisicion to floor
263const S32 LL_SHIFT_AMOUNT = 16; //16.16 fixed point representation,
264
265// Endian dependent code
266#ifdef LL_LITTLE_ENDIAN
267 #define LL_EXP_INDEX 1
268 #define LL_MAN_INDEX 0
269#else
270 #define LL_EXP_INDEX 0
271 #define LL_MAN_INDEX 1
272#endif
273
274/* Deprecated: use llround(), lltrunc(), or llfloor() instead
275// ================================================================================================
276// Real2Int
277// ================================================================================================
278inline S32 F64toS32(F64 val)
279{
280 val = val + LL_DOUBLE_TO_FIX_MAGIC;
281 return ((S32*)&val)[LL_MAN_INDEX] >> LL_SHIFT_AMOUNT;
282}
283
284// ================================================================================================
285// Real2Int
286// ================================================================================================
287inline S32 F32toS32(F32 val)
288{
289 return F64toS32 ((F64)val);
290}
291*/
292
293////////////////////////////////////////////////
294//
295// Fast exp and log
296//
297
298// Implementation of fast exp() approximation (from a paper by Nicol N. Schraudolph
299// http://www.inf.ethz.ch/~schraudo/pubs/exp.pdf
300static union
301{
302 double d;
303 struct
304 {
305#ifdef LL_LITTLE_ENDIAN
306 S32 j, i;
307#else
308 S32 i, j;
309#endif
310 } n;
311} LLECO; // not sure what the name means
312
313#define LL_EXP_A (1048576 * OO_LN2) // use 1512775 for integer
314#define LL_EXP_C (60801) // this value of C good for -4 < y < 4
315
316#define LL_FAST_EXP(y) (LLECO.n.i = llround(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d)
317
318
319
320inline F32 llfastpow(const F32 x, const F32 y)
321{
322 return (F32)(LL_FAST_EXP(y * log(x)));
323}
324
325
326inline F32 snap_to_sig_figs(F32 foo, S32 sig_figs)
327{
328 // compute the power of ten
329 F32 bar = 1.f;
330 for (S32 i = 0; i < sig_figs; i++)
331 {
332 bar *= 10.f;
333 }
334
335 foo = (F32)llround(foo * bar);
336
337 // shift back
338 foo /= bar;
339 return foo;
340}
341
342inline F32 lerp(F32 a, F32 b, F32 u)
343{
344 return a + ((b - a) * u);
345}
346
347inline F32 lerp2d(F32 x00, F32 x01, F32 x10, F32 x11, F32 u, F32 v)
348{
349 F32 a = x00 + (x01-x00)*u;
350 F32 b = x10 + (x11-x10)*u;
351 F32 r = a + (b-a)*v;
352 return r;
353}
354
355inline F32 ramp(F32 x, F32 a, F32 b)
356{
357 return (a == b) ? 0.0f : ((a - x) / (a - b));
358}
359
360inline F32 rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2)
361{
362 return lerp(y1, y2, ramp(x, x1, x2));
363}
364
365inline F32 clamp_rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2)
366{
367 if (y1 < y2)
368 {
369 return llclamp(rescale(x,x1,x2,y1,y2),y1,y2);
370 }
371 else
372 {
373 return llclamp(rescale(x,x1,x2,y1,y2),y2,y1);
374 }
375}
376
377
378inline F32 cubic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 )
379{
380 if (x <= x0)
381 return s0;
382
383 if (x >= x1)
384 return s1;
385
386 F32 f = (x - x0) / (x1 - x0);
387
388 return s0 + (s1 - s0) * (f * f) * (3.0f - 2.0f * f);
389}
390
391inline F32 cubic_step( F32 x )
392{
393 x = llclampf(x);
394
395 return (x * x) * (3.0f - 2.0f * x);
396}
397
398inline F32 quadratic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 )
399{
400 if (x <= x0)
401 return s0;
402
403 if (x >= x1)
404 return s1;
405
406 F32 f = (x - x0) / (x1 - x0);
407 F32 f_squared = f * f;
408
409 return (s0 * (1.f - f_squared)) + ((s1 - s0) * f_squared);
410}
411
412inline F32 llsimple_angle(F32 angle)
413{
414 while(angle <= -F_PI)
415 angle += F_TWO_PI;
416 while(angle > F_PI)
417 angle -= F_TWO_PI;
418 return angle;
419}
420
421#endif