1 |
207 |
jeremybenn |
/* -------------------------------------------------------------- */
|
2 |
|
|
/* (C)Copyright 2007,2008, */
|
3 |
|
|
/* International Business Machines Corporation */
|
4 |
|
|
/* All Rights Reserved. */
|
5 |
|
|
/* */
|
6 |
|
|
/* Redistribution and use in source and binary forms, with or */
|
7 |
|
|
/* without modification, are permitted provided that the */
|
8 |
|
|
/* following conditions are met: */
|
9 |
|
|
/* */
|
10 |
|
|
/* - Redistributions of source code must retain the above copyright*/
|
11 |
|
|
/* notice, this list of conditions and the following disclaimer. */
|
12 |
|
|
/* */
|
13 |
|
|
/* - Redistributions in binary form must reproduce the above */
|
14 |
|
|
/* copyright notice, this list of conditions and the following */
|
15 |
|
|
/* disclaimer in the documentation and/or other materials */
|
16 |
|
|
/* provided with the distribution. */
|
17 |
|
|
/* */
|
18 |
|
|
/* - Neither the name of IBM Corporation nor the names of its */
|
19 |
|
|
/* contributors may be used to endorse or promote products */
|
20 |
|
|
/* derived from this software without specific prior written */
|
21 |
|
|
/* permission. */
|
22 |
|
|
/* */
|
23 |
|
|
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */
|
24 |
|
|
/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */
|
25 |
|
|
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
|
26 |
|
|
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
|
27 |
|
|
/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR */
|
28 |
|
|
/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
|
29 |
|
|
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT */
|
30 |
|
|
/* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
|
31 |
|
|
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */
|
32 |
|
|
/* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN */
|
33 |
|
|
/* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR */
|
34 |
|
|
/* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, */
|
35 |
|
|
/* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
|
36 |
|
|
/* -------------------------------------------------------------- */
|
37 |
|
|
/* PROLOG END TAG zYx */
|
38 |
|
|
#ifdef __SPU__
|
39 |
|
|
|
40 |
|
|
#ifndef _LGAMMAD2_H_
|
41 |
|
|
#define _LGAMMAD2_H_ 1
|
42 |
|
|
|
43 |
|
|
#include <spu_intrinsics.h>
|
44 |
|
|
#include "divd2.h"
|
45 |
|
|
#include "recipd2.h"
|
46 |
|
|
#include "logd2.h"
|
47 |
|
|
#include "sind2.h"
|
48 |
|
|
#include "truncd2.h"
|
49 |
|
|
|
50 |
|
|
|
51 |
|
|
/*
|
52 |
|
|
* FUNCTION
|
53 |
|
|
* vector double _lgammad2(vector double x) - Natural Log of Gamma Function
|
54 |
|
|
*
|
55 |
|
|
* DESCRIPTION
|
56 |
|
|
* _lgammad2 calculates the natural logarithm of the absolute value of the gamma
|
57 |
|
|
* function for the corresponding elements of the input vector.
|
58 |
|
|
*
|
59 |
|
|
* C99 Special Cases:
|
60 |
|
|
* lgamma(0) returns +infinite
|
61 |
|
|
* lgamma(1) returns +0
|
62 |
|
|
* lgamma(2) returns +0
|
63 |
|
|
* lgamma(negative integer) returns +infinite
|
64 |
|
|
* lgamma(+infinite) returns +infinite
|
65 |
|
|
* lgamma(-infinite) returns +infinite
|
66 |
|
|
*
|
67 |
|
|
* Other Cases:
|
68 |
|
|
* lgamma(Nan) returns Nan
|
69 |
|
|
* lgamma(Denorm) treated as lgamma(0) and returns +infinite
|
70 |
|
|
*
|
71 |
|
|
*/
|
72 |
|
|
|
73 |
|
|
#define PI 3.1415926535897932384626433832795028841971693993751058209749445923078164
|
74 |
|
|
#define HALFLOG2PI 9.1893853320467274178032973640561763986139747363778341281715154048276570E-1
|
75 |
|
|
|
76 |
|
|
#define EULER_MASCHERONI 0.5772156649015328606065
|
77 |
|
|
|
78 |
|
|
/*
|
79 |
|
|
* Zeta constants for Maclaurin approx. near zero
|
80 |
|
|
*/
|
81 |
|
|
#define ZETA_02_DIV_02 8.2246703342411321823620758332301E-1
|
82 |
|
|
#define ZETA_03_DIV_03 -4.0068563438653142846657938717048E-1
|
83 |
|
|
#define ZETA_04_DIV_04 2.7058080842778454787900092413529E-1
|
84 |
|
|
#define ZETA_05_DIV_05 -2.0738555102867398526627309729141E-1
|
85 |
|
|
#define ZETA_06_DIV_06 1.6955717699740818995241965496515E-1
|
86 |
|
|
|
87 |
|
|
/*
|
88 |
|
|
* More Maclaurin coefficients
|
89 |
|
|
*/
|
90 |
|
|
/*
|
91 |
|
|
#define ZETA_07_DIV_07 -1.4404989676884611811997107854997E-1
|
92 |
|
|
#define ZETA_08_DIV_08 1.2550966952474304242233565481358E-1
|
93 |
|
|
#define ZETA_09_DIV_09 -1.1133426586956469049087252991471E-1
|
94 |
|
|
#define ZETA_10_DIV_10 1.0009945751278180853371459589003E-1
|
95 |
|
|
#define ZETA_11_DIV_11 -9.0954017145829042232609298411497E-2
|
96 |
|
|
#define ZETA_12_DIV_12 8.3353840546109004024886499837312E-2
|
97 |
|
|
#define ZETA_13_DIV_13 -7.6932516411352191472827064348181E-2
|
98 |
|
|
#define ZETA_14_DIV_14 7.1432946295361336059232753221795E-2
|
99 |
|
|
#define ZETA_15_DIV_15 -6.6668705882420468032903448567376E-2
|
100 |
|
|
#define ZETA_16_DIV_16 6.2500955141213040741983285717977E-2
|
101 |
|
|
#define ZETA_17_DIV_17 -5.8823978658684582338957270605504E-2
|
102 |
|
|
#define ZETA_18_DIV_18 5.5555767627403611102214247869146E-2
|
103 |
|
|
#define ZETA_19_DIV_19 -5.2631679379616660733627666155673E-2
|
104 |
|
|
#define ZETA_20_DIV_20 5.0000047698101693639805657601934E-2
|
105 |
|
|
*/
|
106 |
|
|
|
107 |
|
|
/*
|
108 |
|
|
* Coefficients for Stirling's Series for Lgamma()
|
109 |
|
|
*/
|
110 |
|
|
#define STIRLING_01 8.3333333333333333333333333333333333333333333333333333333333333333333333E-2
|
111 |
|
|
#define STIRLING_02 -2.7777777777777777777777777777777777777777777777777777777777777777777778E-3
|
112 |
|
|
#define STIRLING_03 7.9365079365079365079365079365079365079365079365079365079365079365079365E-4
|
113 |
|
|
#define STIRLING_04 -5.9523809523809523809523809523809523809523809523809523809523809523809524E-4
|
114 |
|
|
#define STIRLING_05 8.4175084175084175084175084175084175084175084175084175084175084175084175E-4
|
115 |
|
|
#define STIRLING_06 -1.9175269175269175269175269175269175269175269175269175269175269175269175E-3
|
116 |
|
|
#define STIRLING_07 6.4102564102564102564102564102564102564102564102564102564102564102564103E-3
|
117 |
|
|
#define STIRLING_08 -2.9550653594771241830065359477124183006535947712418300653594771241830065E-2
|
118 |
|
|
#define STIRLING_09 1.7964437236883057316493849001588939669435025472177174963552672531000704E-1
|
119 |
|
|
#define STIRLING_10 -1.3924322169059011164274322169059011164274322169059011164274322169059011E0
|
120 |
|
|
#define STIRLING_11 1.3402864044168391994478951000690131124913733609385783298826777087646653E1
|
121 |
|
|
#define STIRLING_12 -1.5684828462600201730636513245208897382810426288687158252375643679991506E2
|
122 |
|
|
#define STIRLING_13 2.1931033333333333333333333333333333333333333333333333333333333333333333E3
|
123 |
|
|
#define STIRLING_14 -3.6108771253724989357173265219242230736483610046828437633035334184759472E4
|
124 |
|
|
#define STIRLING_15 6.9147226885131306710839525077567346755333407168779805042318946657100161E5
|
125 |
|
|
/*
|
126 |
|
|
* More Stirling's coefficients
|
127 |
|
|
*/
|
128 |
|
|
/*
|
129 |
|
|
#define STIRLING_16 -1.5238221539407416192283364958886780518659076533839342188488298545224541E7
|
130 |
|
|
#define STIRLING_17 3.8290075139141414141414141414141414141414141414141414141414141414141414E8
|
131 |
|
|
#define STIRLING_18 -1.0882266035784391089015149165525105374729434879810819660443720594096534E10
|
132 |
|
|
#define STIRLING_19 3.4732028376500225225225225225225225225225225225225225225225225225225225E11
|
133 |
|
|
#define STIRLING_20 -1.2369602142269274454251710349271324881080978641954251710349271324881081E13
|
134 |
|
|
#define STIRLING_21 4.8878806479307933507581516251802290210847053890567382180703629532735764E14
|
135 |
|
|
*/
|
136 |
|
|
|
137 |
|
|
|
138 |
|
|
static __inline vector double _lgammad2(vector double x)
|
139 |
|
|
{
|
140 |
|
|
vec_uchar16 dup_even = ((vec_uchar16) { 0,1,2,3, 0,1,2,3, 8, 9,10,11, 8, 9,10,11 });
|
141 |
|
|
vec_uchar16 dup_odd = ((vec_uchar16) { 4,5,6,7, 4,5,6,7, 12,13,14,15, 12,13,14,15 });
|
142 |
|
|
vec_uchar16 swap_word = ((vec_uchar16) { 4,5,6,7, 0,1,2,3, 12,13,14,15, 8, 9,10,11 });
|
143 |
|
|
vec_double2 infinited = (vec_double2)spu_splats(0x7FF0000000000000ull);
|
144 |
|
|
vec_double2 zerod = spu_splats(0.0);
|
145 |
|
|
vec_double2 oned = spu_splats(1.0);
|
146 |
|
|
vec_double2 twod = spu_splats(2.0);
|
147 |
|
|
vec_double2 pi = spu_splats(PI);
|
148 |
|
|
vec_double2 sign_maskd = spu_splats(-0.0);
|
149 |
|
|
|
150 |
|
|
/* This is where we switch from near zero approx. */
|
151 |
|
|
vec_float4 zero_switch = spu_splats(0.001f);
|
152 |
|
|
vec_float4 shift_switch = spu_splats(6.0f);
|
153 |
|
|
|
154 |
|
|
vec_float4 xf;
|
155 |
|
|
vec_double2 inv_x, inv_xsqu;
|
156 |
|
|
vec_double2 xtrunc, xstirling;
|
157 |
|
|
vec_double2 sum, xabs;
|
158 |
|
|
vec_uint4 xhigh, xlow, xthigh, xtlow;
|
159 |
|
|
vec_uint4 x1, isnaninf, isnposint, iszero, isint, isneg, isshifted, is1, is2;
|
160 |
|
|
vec_double2 result, stresult, shresult, mresult, nresult;
|
161 |
|
|
|
162 |
|
|
|
163 |
|
|
/* Force Denorms to 0 */
|
164 |
|
|
x = spu_add(x, zerod);
|
165 |
|
|
|
166 |
|
|
xabs = spu_andc(x, sign_maskd);
|
167 |
|
|
xf = spu_roundtf(xabs);
|
168 |
|
|
xf = spu_shuffle(xf, xf, dup_even);
|
169 |
|
|
|
170 |
|
|
|
171 |
|
|
/*
|
172 |
|
|
* For 0 < x <= 0.001.
|
173 |
|
|
* Approximation Near Zero
|
174 |
|
|
*
|
175 |
|
|
* Use Maclaurin Expansion of lgamma()
|
176 |
|
|
*
|
177 |
|
|
* lgamma(z) = -ln(z) - z * EulerMascheroni + Sum[(-1)^n * z^n * Zeta(n)/n]
|
178 |
|
|
*/
|
179 |
|
|
mresult = spu_madd(xabs, spu_splats(ZETA_06_DIV_06), spu_splats(ZETA_05_DIV_05));
|
180 |
|
|
mresult = spu_madd(xabs, mresult, spu_splats(ZETA_04_DIV_04));
|
181 |
|
|
mresult = spu_madd(xabs, mresult, spu_splats(ZETA_03_DIV_03));
|
182 |
|
|
mresult = spu_madd(xabs, mresult, spu_splats(ZETA_02_DIV_02));
|
183 |
|
|
mresult = spu_mul(xabs, spu_mul(xabs, mresult));
|
184 |
|
|
mresult = spu_sub(mresult, spu_add(_logd2(xabs), spu_mul(xabs, spu_splats(EULER_MASCHERONI))));
|
185 |
|
|
|
186 |
|
|
|
187 |
|
|
/*
|
188 |
|
|
* For 0.001 < x <= 6.0, we are going to push value
|
189 |
|
|
* out to an area where Stirling's approximation is
|
190 |
|
|
* accurate. Let's use a constant of 6.
|
191 |
|
|
*
|
192 |
|
|
* Use the recurrence relation:
|
193 |
|
|
* lgamma(x + 1) = ln(x) + lgamma(x)
|
194 |
|
|
*
|
195 |
|
|
* Note that we shift x here, before Stirling's calculation,
|
196 |
|
|
* then after Stirling's, we adjust the result.
|
197 |
|
|
*
|
198 |
|
|
*/
|
199 |
|
|
|
200 |
|
|
isshifted = spu_cmpgt(shift_switch, xf);
|
201 |
|
|
xstirling = spu_sel(xabs, spu_add(xabs, spu_splats(6.0)), (vec_ullong2)isshifted);
|
202 |
|
|
inv_x = _recipd2(xstirling);
|
203 |
|
|
inv_xsqu = spu_mul(inv_x, inv_x);
|
204 |
|
|
|
205 |
|
|
/*
|
206 |
|
|
* For 6.0 < x < infinite
|
207 |
|
|
*
|
208 |
|
|
* Use Stirling's Series.
|
209 |
|
|
*
|
210 |
|
|
* 1 1 1 1 1
|
211 |
|
|
* lgamma(x) = --- ln (2*pi) + (z - ---) ln(x) - x + --- - ----- + ------ ...
|
212 |
|
|
* 2 2 12x 360x^3 1260x^5
|
213 |
|
|
*
|
214 |
|
|
* Taking 10 terms of the sum gives good results for x > 6.0
|
215 |
|
|
*
|
216 |
|
|
*/
|
217 |
|
|
sum = spu_madd(inv_xsqu, spu_splats(STIRLING_15), spu_splats(STIRLING_14));
|
218 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_13));
|
219 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_12));
|
220 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_11));
|
221 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_10));
|
222 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_09));
|
223 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_08));
|
224 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_07));
|
225 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_06));
|
226 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_05));
|
227 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_04));
|
228 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_03));
|
229 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_02));
|
230 |
|
|
sum = spu_madd(sum, inv_xsqu, spu_splats(STIRLING_01));
|
231 |
|
|
sum = spu_mul(sum, inv_x);
|
232 |
|
|
|
233 |
|
|
stresult = spu_madd(spu_sub(xstirling, spu_splats(0.5)), _logd2(xstirling), spu_splats(HALFLOG2PI));
|
234 |
|
|
stresult = spu_sub(stresult, xstirling);
|
235 |
|
|
stresult = spu_add(stresult, sum);
|
236 |
|
|
|
237 |
|
|
/*
|
238 |
|
|
* Adjust result if we shifted x into Stirling range.
|
239 |
|
|
*
|
240 |
|
|
* lgamma(x) = lgamma(x + n) - ln(x(x+1)(x+2)...(x+n-1)
|
241 |
|
|
*
|
242 |
|
|
*/
|
243 |
|
|
shresult = spu_mul(xabs, spu_add(xabs, spu_splats(1.0)));
|
244 |
|
|
shresult = spu_mul(shresult, spu_add(xabs, spu_splats(2.0)));
|
245 |
|
|
shresult = spu_mul(shresult, spu_add(xabs, spu_splats(3.0)));
|
246 |
|
|
shresult = spu_mul(shresult, spu_add(xabs, spu_splats(4.0)));
|
247 |
|
|
shresult = spu_mul(shresult, spu_add(xabs, spu_splats(5.0)));
|
248 |
|
|
shresult = _logd2(shresult);
|
249 |
|
|
shresult = spu_sub(stresult, shresult);
|
250 |
|
|
stresult = spu_sel(stresult, shresult, (vec_ullong2)isshifted);
|
251 |
|
|
|
252 |
|
|
|
253 |
|
|
/*
|
254 |
|
|
* Select either Maclaurin or Stirling result before Negative X calc.
|
255 |
|
|
*/
|
256 |
|
|
xf = spu_shuffle(xf, xf, dup_even);
|
257 |
|
|
vec_uint4 useStirlings = spu_cmpgt(xf, zero_switch);
|
258 |
|
|
result = spu_sel(mresult, stresult, (vec_ullong2)useStirlings);
|
259 |
|
|
|
260 |
|
|
|
261 |
|
|
/*
|
262 |
|
|
* Approximation for Negative X
|
263 |
|
|
*
|
264 |
|
|
* Use reflection relation
|
265 |
|
|
*
|
266 |
|
|
* gamma(x) * gamma(-x) = -pi/(x sin(pi x))
|
267 |
|
|
*
|
268 |
|
|
* lgamma(x) = log(pi/(-x sin(pi x))) - lgamma(-x)
|
269 |
|
|
*
|
270 |
|
|
*/
|
271 |
|
|
nresult = spu_mul(x, _sind2(spu_mul(x, pi)));
|
272 |
|
|
nresult = spu_andc(nresult, sign_maskd);
|
273 |
|
|
nresult = _logd2(_divd2(pi, nresult));
|
274 |
|
|
nresult = spu_sub(nresult, result);
|
275 |
|
|
|
276 |
|
|
|
277 |
|
|
/*
|
278 |
|
|
* Select between the negative or positive x approximations.
|
279 |
|
|
*/
|
280 |
|
|
isneg = (vec_uint4)spu_shuffle(x, x, dup_even);
|
281 |
|
|
isneg = spu_rlmaska(isneg, -32);
|
282 |
|
|
result = spu_sel(result, nresult, (vec_ullong2)isneg);
|
283 |
|
|
|
284 |
|
|
|
285 |
|
|
/*
|
286 |
|
|
* Finally, special cases/errors.
|
287 |
|
|
*/
|
288 |
|
|
xhigh = (vec_uint4)spu_shuffle(xabs, xabs, dup_even);
|
289 |
|
|
xlow = (vec_uint4)spu_shuffle(xabs, xabs, dup_odd);
|
290 |
|
|
|
291 |
|
|
/* x = zero, return infinite */
|
292 |
|
|
x1 = spu_or(xhigh, xlow);
|
293 |
|
|
iszero = spu_cmpeq(x1, 0);
|
294 |
|
|
|
295 |
|
|
/* x = negative integer, return infinite */
|
296 |
|
|
xtrunc = _truncd2(xabs);
|
297 |
|
|
xthigh = (vec_uint4)spu_shuffle(xtrunc, xtrunc, dup_even);
|
298 |
|
|
xtlow = (vec_uint4)spu_shuffle(xtrunc, xtrunc, dup_odd);
|
299 |
|
|
isint = spu_and(spu_cmpeq(xthigh, xhigh), spu_cmpeq(xtlow, xlow));
|
300 |
|
|
isnposint = spu_or(spu_and(isint, isneg), iszero);
|
301 |
|
|
result = spu_sel(result, infinited, (vec_ullong2)isnposint);
|
302 |
|
|
|
303 |
|
|
/* x = 1.0 or 2.0, return 0.0 */
|
304 |
|
|
is1 = spu_cmpeq((vec_uint4)x, (vec_uint4)oned);
|
305 |
|
|
is1 = spu_and(is1, spu_shuffle(is1, is1, swap_word));
|
306 |
|
|
is2 = spu_cmpeq((vec_uint4)x, (vec_uint4)twod);
|
307 |
|
|
is2 = spu_and(is2, spu_shuffle(is2, is2, swap_word));
|
308 |
|
|
result = spu_sel(result, zerod, (vec_ullong2)spu_or(is1,is2));
|
309 |
|
|
|
310 |
|
|
/* x = +/- infinite or nan, return |x| */
|
311 |
|
|
isnaninf = spu_cmpgt(xhigh, 0x7FEFFFFF);
|
312 |
|
|
result = spu_sel(result, xabs, (vec_ullong2)isnaninf);
|
313 |
|
|
|
314 |
|
|
return result;
|
315 |
|
|
}
|
316 |
|
|
|
317 |
|
|
#endif /* _LGAMMAD2_H_ */
|
318 |
|
|
#endif /* __SPU__ */
|