1 |
148 |
jeremybenn |
/* -------------------------------------------------------------- */
|
2 |
|
|
/* (C)Copyright 2001,2008, */
|
3 |
|
|
/* International Business Machines Corporation, */
|
4 |
|
|
/* Sony Computer Entertainment, Incorporated, */
|
5 |
|
|
/* Toshiba Corporation, */
|
6 |
|
|
/* */
|
7 |
|
|
/* All Rights Reserved. */
|
8 |
|
|
/* */
|
9 |
|
|
/* Redistribution and use in source and binary forms, with or */
|
10 |
|
|
/* without modification, are permitted provided that the */
|
11 |
|
|
/* following conditions are met: */
|
12 |
|
|
/* */
|
13 |
|
|
/* - Redistributions of source code must retain the above copyright*/
|
14 |
|
|
/* notice, this list of conditions and the following disclaimer. */
|
15 |
|
|
/* */
|
16 |
|
|
/* - Redistributions in binary form must reproduce the above */
|
17 |
|
|
/* copyright notice, this list of conditions and the following */
|
18 |
|
|
/* disclaimer in the documentation and/or other materials */
|
19 |
|
|
/* provided with the distribution. */
|
20 |
|
|
/* */
|
21 |
|
|
/* - Neither the name of IBM Corporation nor the names of its */
|
22 |
|
|
/* contributors may be used to endorse or promote products */
|
23 |
|
|
/* derived from this software without specific prior written */
|
24 |
|
|
/* permission. */
|
25 |
|
|
/* */
|
26 |
|
|
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */
|
27 |
|
|
/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */
|
28 |
|
|
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
|
29 |
|
|
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
|
30 |
|
|
/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR */
|
31 |
|
|
/* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
|
32 |
|
|
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT */
|
33 |
|
|
/* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
|
34 |
|
|
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */
|
35 |
|
|
/* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN */
|
36 |
|
|
/* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR */
|
37 |
|
|
/* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, */
|
38 |
|
|
/* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
|
39 |
|
|
/* -------------------------------------------------------------- */
|
40 |
|
|
/* PROLOG END TAG zYx */
|
41 |
|
|
#ifdef __SPU__
|
42 |
|
|
#ifndef _POWD2_H_
|
43 |
|
|
#define _POWD2_H_ 1
|
44 |
|
|
|
45 |
|
|
#include "exp2d2.h"
|
46 |
|
|
#include "log2d2.h"
|
47 |
|
|
|
48 |
|
|
/*
|
49 |
|
|
* FUNCTION
|
50 |
|
|
* vector double _powd2(vector double x, vector double y)
|
51 |
|
|
*
|
52 |
|
|
* DESCRIPTION
|
53 |
|
|
* The _powd2 function computes x raised to the power y for the set of
|
54 |
|
|
* vectors. The powd2 function is computed as by decomposing
|
55 |
|
|
* the problem into:
|
56 |
|
|
*
|
57 |
|
|
* x^y = 2^(y*log2(x))
|
58 |
|
|
*
|
59 |
|
|
*
|
60 |
|
|
*/
|
61 |
|
|
static __inline vector double _powd2(vector double x, vector double y)
|
62 |
|
|
{
|
63 |
|
|
vec_uchar16 splat_hi = (vec_uchar16) { 0,1,2,3,0,1,2,3, 8,9,10,11, 8,9,10,11 };
|
64 |
|
|
vec_int4 exp, shift;
|
65 |
|
|
vec_uint4 sign = (vec_uint4) { 0x80000000, 0, 0x80000000, 0 };
|
66 |
|
|
vec_uint4 or_mask, and_mask, evenmask, intmask;
|
67 |
|
|
vec_double2 in_hi;
|
68 |
|
|
vector double signmask = spu_splats(-0.0);
|
69 |
|
|
vector signed int error = spu_splats(-1);
|
70 |
|
|
vector double zero = spu_splats(0.0);
|
71 |
|
|
vector unsigned int y_is_int, y_is_odd, y_is_even;
|
72 |
|
|
vector unsigned int x_is_neg;
|
73 |
|
|
vector double xabs, xsign;
|
74 |
|
|
vector double out;
|
75 |
|
|
|
76 |
|
|
|
77 |
|
|
xsign = spu_and(x, signmask);
|
78 |
|
|
xabs = spu_andc(x, signmask);
|
79 |
|
|
x_is_neg = (vec_uint4)spu_cmpgt(zero, x);
|
80 |
|
|
|
81 |
|
|
|
82 |
|
|
/* First we solve assuming x was non-negative */
|
83 |
|
|
out = _exp2d2(spu_mul(y, _log2d2(xabs)));
|
84 |
|
|
|
85 |
|
|
in_hi = spu_shuffle(y, y, splat_hi);
|
86 |
|
|
exp = spu_and(spu_rlmask((vec_int4)in_hi, -20), 0x7FF);
|
87 |
|
|
|
88 |
|
|
/* Determine if y is an integer */
|
89 |
|
|
shift = spu_sub(((vec_int4) { 1023, 1043, 1023, 1043 }), exp);
|
90 |
|
|
or_mask = spu_andc(spu_cmpgt(shift, 0), sign);
|
91 |
|
|
and_mask = spu_rlmask(((vec_uint4) { 0xFFFFF, -1, 0xFFFFF, -1 }), shift);
|
92 |
|
|
intmask = spu_or(spu_and(and_mask, spu_cmpgt(shift, -32)), or_mask);
|
93 |
|
|
y_is_int = (vec_uint4)spu_cmpeq(y, spu_andc(y, (vec_double2)(intmask)));
|
94 |
|
|
|
95 |
|
|
/* Determine if y is an even integer */
|
96 |
|
|
shift = spu_sub(((vec_int4) { 1024, 1044, 1024, 1044 }), exp);
|
97 |
|
|
or_mask = spu_andc(spu_cmpgt(shift, 0), sign);
|
98 |
|
|
and_mask = spu_rlmask(((vec_uint4) { 0xFFFFF, -1, 0xFFFFF, -1 }), shift);
|
99 |
|
|
evenmask = spu_or(spu_and(and_mask, spu_cmpgt(shift, -32)), or_mask);
|
100 |
|
|
y_is_even = (vec_uint4)spu_cmpeq(y, spu_andc(y, (vec_double2)(evenmask)));
|
101 |
|
|
|
102 |
|
|
y_is_odd = spu_andc(y_is_int, y_is_even);
|
103 |
|
|
|
104 |
|
|
|
105 |
|
|
/* Special Cases
|
106 |
|
|
*/
|
107 |
|
|
|
108 |
|
|
/* x < 0 is only ok when y integer */
|
109 |
|
|
out = spu_sel(out, (vec_double2)error, (vec_ullong2)spu_andc(x_is_neg, y_is_int));
|
110 |
|
|
|
111 |
|
|
/* Preserve the sign of x if y is an odd integer */
|
112 |
|
|
out = spu_sel(out, spu_or(out, xsign), (vec_ullong2)y_is_odd);
|
113 |
|
|
|
114 |
|
|
/* x = anything, y = +/- 0, returns 1 */
|
115 |
|
|
out = spu_sel(out, spu_splats(1.0), spu_cmpabseq(y, zero));
|
116 |
|
|
|
117 |
|
|
return(out);
|
118 |
|
|
}
|
119 |
|
|
|
120 |
|
|
#endif /* _POWD2_H_ */
|
121 |
|
|
#endif /* __SPU__ */
|