1 |
148 |
jeremybenn |
/* -------------------------------------------------------------- */
|
2 |
|
|
/* (C)Copyright 2007,2008, */
|
3 |
|
|
/* International Business Machines Corporation */
|
4 |
|
|
/* All Rights Reserved. */
|
5 |
|
|
/* */
|
6 |
|
|
/* Redistribution and use in source and binary forms, with or */
|
7 |
|
|
/* without modification, are permitted provided that the */
|
8 |
|
|
/* following conditions are met: */
|
9 |
|
|
/* */
|
10 |
|
|
/* - Redistributions of source code must retain the above copyright*/
|
11 |
|
|
/* notice, this list of conditions and the following disclaimer. */
|
12 |
|
|
/* */
|
13 |
|
|
/* - Redistributions in binary form must reproduce the above */
|
14 |
|
|
/* copyright notice, this list of conditions and the following */
|
15 |
|
|
/* disclaimer in the documentation and/or other materials */
|
16 |
|
|
/* provided with the distribution. */
|
17 |
|
|
/* */
|
18 |
|
|
/* - Neither the name of IBM Corporation nor the names of its */
|
19 |
|
|
/* contributors may be used to endorse or promote products */
|
20 |
|
|
/* derived from this software without specific prior written */
|
21 |
|
|
/* permission. */
|
22 |
|
|
/* */
|
23 |
|
|
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */
|
24 |
|
|
/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */
|
25 |
|
|
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
|
26 |
|
|
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
|
27 |
|
|
/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR */
|
28 |
|
|
/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
|
29 |
|
|
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT */
|
30 |
|
|
/* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
|
31 |
|
|
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */
|
32 |
|
|
/* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN */
|
33 |
|
|
/* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR */
|
34 |
|
|
/* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, */
|
35 |
|
|
/* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
|
36 |
|
|
/* -------------------------------------------------------------- */
|
37 |
|
|
/* PROLOG END TAG zYx */
|
38 |
|
|
#ifdef __SPU__
|
39 |
|
|
#ifndef _TANHD2_H_
|
40 |
|
|
#define _TANHD2_H_ 1
|
41 |
|
|
|
42 |
|
|
#include <spu_intrinsics.h>
|
43 |
|
|
|
44 |
|
|
#include "expd2.h"
|
45 |
|
|
#include "divd2.h"
|
46 |
|
|
|
47 |
|
|
|
48 |
|
|
/*
|
49 |
|
|
* Taylor coefficients for tanh
|
50 |
|
|
*/
|
51 |
|
|
#define TANH_TAY01 1.000000000000000000000000000000E0
|
52 |
|
|
#define TANH_TAY02 -3.333333333333333333333333333333E-1
|
53 |
|
|
#define TANH_TAY03 1.333333333333333333333333333333E-1
|
54 |
|
|
#define TANH_TAY04 -5.396825396825396825396825396825E-2
|
55 |
|
|
#define TANH_TAY05 2.186948853615520282186948853616E-2
|
56 |
|
|
#define TANH_TAY06 -8.863235529902196568863235529902E-3
|
57 |
|
|
#define TANH_TAY07 3.592128036572481016925461369906E-3
|
58 |
|
|
#define TANH_TAY08 -1.455834387051318268249485180702E-3
|
59 |
|
|
#define TANH_TAY09 5.900274409455859813780759937000E-4
|
60 |
|
|
#define TANH_TAY10 -2.391291142435524814857314588851E-4
|
61 |
|
|
#define TANH_TAY11 9.691537956929450325595875000389E-5
|
62 |
|
|
#define TANH_TAY12 -3.927832388331683405337080809312E-5
|
63 |
|
|
#define TANH_TAY13 1.591890506932896474074427981657E-5
|
64 |
|
|
#define TANH_TAY14 -6.451689215655430763190842315303E-6
|
65 |
|
|
#define TANH_TAY15 2.614771151290754554263594256410E-6
|
66 |
|
|
#define TANH_TAY16 -1.059726832010465435091355394125E-6
|
67 |
|
|
#define TANH_TAY17 4.294911078273805854820351280397E-7
|
68 |
|
|
|
69 |
|
|
|
70 |
|
|
/*
|
71 |
|
|
* FUNCTION
|
72 |
|
|
* vector double _tanhd2(vector double x)
|
73 |
|
|
*
|
74 |
|
|
* DESCRIPTION
|
75 |
|
|
* The _tanhd2 function computes the hyperbolic tangent for each
|
76 |
|
|
* element of the input vector.
|
77 |
|
|
*
|
78 |
|
|
* We use the following to approximate tanh:
|
79 |
|
|
*
|
80 |
|
|
* |x| <= .25: Taylor Series
|
81 |
|
|
* |x| > .25: tanh(x) = (exp(2x) - 1)/(exp(2x) + 1)
|
82 |
|
|
*
|
83 |
|
|
*
|
84 |
|
|
* SPECIAL CASES:
|
85 |
|
|
* - tanh(+/- 0) = +/-0
|
86 |
|
|
* - tanh(+/- infinity) = +/- 1
|
87 |
|
|
* - tanh(NaN) = NaN
|
88 |
|
|
*
|
89 |
|
|
*/
|
90 |
|
|
|
91 |
|
|
static __inline vector double _tanhd2(vector double x)
|
92 |
|
|
{
|
93 |
|
|
vector double signbit = spu_splats(-0.0);
|
94 |
|
|
vector double oned = spu_splats(1.0);
|
95 |
|
|
vector double twod = spu_splats(2.0);
|
96 |
|
|
vector double infd = (vector double)spu_splats(0x7FF0000000000000ull);
|
97 |
|
|
vector double xabs;
|
98 |
|
|
vector double x2;
|
99 |
|
|
vector unsigned long long gttaylor;
|
100 |
|
|
vector double e;
|
101 |
|
|
vector double tresult;
|
102 |
|
|
vector double eresult;
|
103 |
|
|
vector double result;
|
104 |
|
|
|
105 |
|
|
xabs = spu_andc(x, signbit);
|
106 |
|
|
|
107 |
|
|
/*
|
108 |
|
|
* This is where we switch from Taylor Series
|
109 |
|
|
* to exponential formula.
|
110 |
|
|
*/
|
111 |
|
|
gttaylor = spu_cmpgt(xabs, spu_splats(0.25));
|
112 |
|
|
|
113 |
|
|
|
114 |
|
|
/*
|
115 |
|
|
* Taylor Series Approximation
|
116 |
|
|
*/
|
117 |
|
|
x2 = spu_mul(x,x);
|
118 |
|
|
tresult = spu_madd(x2, spu_splats(TANH_TAY11), spu_splats(TANH_TAY10));
|
119 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY09));
|
120 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY08));
|
121 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY07));
|
122 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY06));
|
123 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY05));
|
124 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY04));
|
125 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY03));
|
126 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY02));
|
127 |
|
|
tresult = spu_madd(x2, tresult, spu_splats(TANH_TAY01));
|
128 |
|
|
tresult = spu_mul(xabs, tresult);
|
129 |
|
|
|
130 |
|
|
|
131 |
|
|
/*
|
132 |
|
|
* Exponential Formula
|
133 |
|
|
* Our expd2 function gives a more accurate result in general
|
134 |
|
|
* with xabs instead of x for x<0. We correct for sign later.
|
135 |
|
|
*/
|
136 |
|
|
e = _expd2(spu_mul(xabs, twod));
|
137 |
|
|
eresult = _divd2(spu_sub(e, oned), spu_add(e, oned));
|
138 |
|
|
|
139 |
|
|
|
140 |
|
|
/*
|
141 |
|
|
* Select Taylor or exp result.
|
142 |
|
|
*/
|
143 |
|
|
result = spu_sel(tresult, eresult, gttaylor);
|
144 |
|
|
|
145 |
|
|
/*
|
146 |
|
|
* Inf and NaN special cases. NaN is already in result
|
147 |
|
|
* for x = NaN.
|
148 |
|
|
*/
|
149 |
|
|
result = spu_sel(result, oned, spu_cmpeq(xabs, infd));
|
150 |
|
|
|
151 |
|
|
/*
|
152 |
|
|
* Antisymmetric function - preserve sign bit of x
|
153 |
|
|
* in the result.
|
154 |
|
|
*/
|
155 |
|
|
result = spu_sel(result, x, (vec_ullong2)signbit);
|
156 |
|
|
|
157 |
|
|
return result;
|
158 |
|
|
}
|
159 |
|
|
|
160 |
|
|
#endif /* _TANHD2_H_ */
|
161 |
|
|
#endif /* __SPU__ */
|