1 |
148 |
jeremybenn |
/*
|
2 |
|
|
(C) Copyright 2001,2006,
|
3 |
|
|
International Business Machines Corporation,
|
4 |
|
|
Sony Computer Entertainment, Incorporated,
|
5 |
|
|
Toshiba Corporation,
|
6 |
|
|
|
7 |
|
|
All rights reserved.
|
8 |
|
|
|
9 |
|
|
Redistribution and use in source and binary forms, with or without
|
10 |
|
|
modification, are permitted provided that the following conditions are met:
|
11 |
|
|
|
12 |
|
|
* Redistributions of source code must retain the above copyright notice,
|
13 |
|
|
this list of conditions and the following disclaimer.
|
14 |
|
|
* Redistributions in binary form must reproduce the above copyright
|
15 |
|
|
notice, this list of conditions and the following disclaimer in the
|
16 |
|
|
documentation and/or other materials provided with the distribution.
|
17 |
|
|
* Neither the names of the copyright holders nor the names of their
|
18 |
|
|
contributors may be used to endorse or promote products derived from this
|
19 |
|
|
software without specific prior written permission.
|
20 |
|
|
|
21 |
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
22 |
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
23 |
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
24 |
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
25 |
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
26 |
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
27 |
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
28 |
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
29 |
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
30 |
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
31 |
|
|
POSSIBILITY OF SUCH DAMAGE.
|
32 |
|
|
*/
|
33 |
|
|
#include <spu_intrinsics.h>
|
34 |
|
|
#include "vec_literal.h"
|
35 |
|
|
|
36 |
|
|
/* Compare the two strings s1 and s2. Return an integer less than, equal
|
37 |
|
|
* to, or greater than zero if s1 is found, respectively, to be less than,
|
38 |
|
|
* to match, or be greater than s2.
|
39 |
|
|
*/
|
40 |
|
|
|
41 |
|
|
int strcmp(const char *s1, const char *s2)
|
42 |
|
|
{
|
43 |
|
|
unsigned int offset1, offset2;
|
44 |
|
|
vec_uint4 gt_v, lt_v, mask_v;
|
45 |
|
|
vec_uint4 cnt1_v, cnt2_v;
|
46 |
|
|
vec_uint4 end1_v, end2_v, end_v, neq_v;
|
47 |
|
|
vec_uchar16 shuffle1, shuffle2;
|
48 |
|
|
vec_uchar16 data1A, data1B, data1, data2A, data2B, data2;
|
49 |
|
|
vec_uchar16 *ptr1, *ptr2;
|
50 |
|
|
|
51 |
|
|
ptr1 = (vec_uchar16 *)s1;
|
52 |
|
|
ptr2 = (vec_uchar16 *)s2;
|
53 |
|
|
|
54 |
|
|
offset1 = (unsigned int)(ptr1) & 15;
|
55 |
|
|
offset2 = (unsigned int)(ptr2) & 15;
|
56 |
|
|
|
57 |
|
|
shuffle1 = (vec_uchar16)spu_add((vec_uint4)spu_splats((unsigned char)offset1),
|
58 |
|
|
VEC_LITERAL(vec_uint4, 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F));
|
59 |
|
|
shuffle2 = (vec_uchar16)spu_add((vec_uint4)spu_splats((unsigned char)offset2),
|
60 |
|
|
VEC_LITERAL(vec_uint4, 0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F));
|
61 |
|
|
|
62 |
|
|
data1A = *ptr1++;
|
63 |
|
|
data2A = *ptr2++;
|
64 |
|
|
|
65 |
|
|
do {
|
66 |
|
|
data1B = *ptr1++;
|
67 |
|
|
data2B = *ptr2++;
|
68 |
|
|
|
69 |
|
|
data1 = spu_shuffle(data1A, data1B, shuffle1);
|
70 |
|
|
data2 = spu_shuffle(data2A, data2B, shuffle2);
|
71 |
|
|
|
72 |
|
|
data1A = data1B;
|
73 |
|
|
data2A = data2B;
|
74 |
|
|
|
75 |
|
|
neq_v = spu_gather(spu_xor(spu_cmpeq(data1, data2), -1));
|
76 |
|
|
|
77 |
|
|
end1_v = spu_gather(spu_cmpeq(data1, 0));
|
78 |
|
|
end2_v = spu_gather(spu_cmpeq(data2, 0));
|
79 |
|
|
end_v = spu_or(end1_v, end2_v), 0;
|
80 |
|
|
} while (spu_extract(spu_or(end_v, neq_v), 0) == 0);
|
81 |
|
|
|
82 |
|
|
cnt1_v = spu_cntlz(end1_v);
|
83 |
|
|
cnt2_v = spu_cntlz(end2_v);
|
84 |
|
|
|
85 |
|
|
gt_v = spu_gather(spu_cmpgt(data1, data2));
|
86 |
|
|
lt_v = spu_gather(spu_cmpgt(data2, data1));
|
87 |
|
|
|
88 |
|
|
mask_v = spu_and(spu_cmpeq(cnt1_v, cnt2_v),
|
89 |
|
|
spu_cmpeq(spu_rlmask(neq_v, (vec_int4)spu_add((vec_uint4)cnt1_v, -32)), 0));
|
90 |
|
|
|
91 |
|
|
gt_v = spu_sub(-1, spu_sl(spu_cmpgt(gt_v, lt_v), 1));
|
92 |
|
|
|
93 |
|
|
return (spu_extract(spu_andc(gt_v, mask_v), 0));
|
94 |
|
|
}
|