1 |
322 |
jeremybenn |
/* { dg-do run { target { powerpc*-*-* && vmx_hw } } } */
|
2 |
|
|
/* { dg-do compile { target { powerpc*-*-* && { ! vmx_hw } } } } */
|
3 |
|
|
/* { dg-require-effective-target powerpc_altivec_ok } */
|
4 |
|
|
/* { dg-options "-maltivec -O2" } */
|
5 |
|
|
|
6 |
|
|
#include <altivec.h>
|
7 |
|
|
|
8 |
|
|
int printf(const char * , ...);
|
9 |
|
|
extern void abort();
|
10 |
|
|
|
11 |
|
|
void foo(char *bS, char *bS_edge, int field_MBAFF, int top){
|
12 |
|
|
char intra[16] __attribute__ ((aligned(16)));
|
13 |
|
|
signed short mv_const[8] __attribute__((aligned(16)));
|
14 |
|
|
|
15 |
|
|
vector signed short v_three, v_ref_mask00, v_ref_mask01, v_vec_maskv, v_vec_maskh;
|
16 |
|
|
vector unsigned char v_permv, v_permh, v_bS, v_bSh, v_bSv, v_cbp_maskv, v_cbp_maskvn, v_cbp_maskh, v_cbp_maskhn, v_intra_maskh, v_intra_maskv, v_intra_maskhn, v_intra_maskvn;
|
17 |
|
|
vector unsigned char tmp7, tmp8, tmp9, tmp10, v_c1, v_cbp1, v_cbp2, v_pocl, v_poch;
|
18 |
|
|
vector signed short v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
|
19 |
|
|
vector signed short idx0;
|
20 |
|
|
vector signed short tmp00, tmp01, tmp02, tmp03;
|
21 |
|
|
vector unsigned char v_zero = (vector unsigned char) {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p'};
|
22 |
|
|
v_three = (vector signed short) vec_ld (0, (vector signed short *) mv_const);
|
23 |
|
|
|
24 |
|
|
vector unsigned char v_coef_mask = vec_ld(0, (vector unsigned char *)mv_const);
|
25 |
|
|
vector unsigned char v_coef_mask_hi = vec_splat(v_coef_mask, 0);
|
26 |
|
|
vector unsigned char v_coef_mask_lo = vec_splat(v_coef_mask, 1);
|
27 |
|
|
v_coef_mask = vec_sld(v_coef_mask_hi, v_coef_mask_lo, 8);
|
28 |
|
|
vector unsigned char v_bit_mask = vec_sub(vec_splat_u8(7), vec_lvsl(0, (unsigned char *)0));
|
29 |
|
|
v_bit_mask = vec_sld(vec_sld(v_bit_mask, v_bit_mask, 8), v_bit_mask, 8);
|
30 |
|
|
v_bit_mask = vec_sl(vec_splat_u8(1), v_bit_mask);
|
31 |
|
|
tmp5 = (vector signed short) vec_and(v_coef_mask, v_bit_mask);
|
32 |
|
|
|
33 |
|
|
intra[0] = 1;
|
34 |
|
|
tmp8 = vec_ld (0, (vector unsigned char *) intra);
|
35 |
|
|
tmp9 = vec_ld (0, (vector unsigned char *) mv_const);
|
36 |
|
|
tmp10 = vec_ld (0, (vector unsigned char *) mv_const);
|
37 |
|
|
v_permv = vec_ld (0, (vector unsigned char *) mv_const);
|
38 |
|
|
v_permh = vec_ld (0, (vector unsigned char *) mv_const);
|
39 |
|
|
tmp6 = vec_ld (0, (vector signed short *) mv_const);
|
40 |
|
|
|
41 |
|
|
tmp8 = vec_splat((vector unsigned char) tmp8, 0);
|
42 |
|
|
tmp9 = vec_splat((vector unsigned char) tmp9, 12);
|
43 |
|
|
tmp10 = vec_splat((vector unsigned char) tmp10, 12);
|
44 |
|
|
tmp9 = vec_sld ((vector unsigned char) tmp9,(vector unsigned char) tmp8, 12);
|
45 |
|
|
tmp10 = vec_sld ((vector unsigned char) tmp10, (vector unsigned char) tmp8, 12);
|
46 |
|
|
v_intra_maskv = vec_or (tmp9, tmp8);
|
47 |
|
|
v_intra_maskh = vec_or (tmp10, tmp8);
|
48 |
|
|
v_intra_maskv = (vector unsigned char) vec_cmpgt ((vector unsigned char) v_intra_maskv, (vector unsigned char) v_zero);
|
49 |
|
|
v_intra_maskh = (vector unsigned char) vec_cmpgt ((vector unsigned char) v_intra_maskh, (vector unsigned char) v_zero);
|
50 |
|
|
|
51 |
|
|
tmp9 = vec_lvsl (4 + (top<<2), (unsigned char *) 0x0);
|
52 |
|
|
v_cbp1 = vec_perm ((vector unsigned char) tmp6, (vector unsigned char) tmp6, tmp9);
|
53 |
|
|
v_cbp2 = (vector unsigned char) vec_perm ((vector unsigned char) tmp5, (vector unsigned char) tmp5, (vector unsigned char) v_permv);
|
54 |
|
|
v_cbp1 = (vector unsigned char) vec_sld ((vector unsigned char) v_cbp1,(vector unsigned char) v_cbp2, 12);
|
55 |
|
|
v_cbp_maskv = vec_or (v_cbp1, v_cbp2);
|
56 |
|
|
|
57 |
|
|
tmp9 = vec_lvsl (12 + (top<<2), (unsigned char *) 0x0);
|
58 |
|
|
v_cbp1 = vec_perm ((vector unsigned char) tmp6, (vector unsigned char) tmp6, tmp9);
|
59 |
|
|
v_cbp2 = (vector unsigned char) vec_perm ((vector unsigned char) tmp5, (vector unsigned char) tmp5, (vector unsigned char) v_permh);
|
60 |
|
|
v_cbp1 = (vector unsigned char) vec_sld ((vector unsigned char) v_cbp1,(vector unsigned char) v_cbp2, 12);
|
61 |
|
|
v_cbp_maskh = vec_or (v_cbp1, v_cbp2);
|
62 |
|
|
|
63 |
|
|
v_cbp_maskv = (vector unsigned char) vec_cmpgt ((vector unsigned char) v_cbp_maskv, (vector unsigned char) v_zero);
|
64 |
|
|
v_cbp_maskh = (vector unsigned char) vec_cmpgt ((vector unsigned char) v_cbp_maskh, (vector unsigned char) v_zero);
|
65 |
|
|
|
66 |
|
|
intra[0] =0;
|
67 |
|
|
intra[1] =1;
|
68 |
|
|
intra[2] =2;
|
69 |
|
|
intra[3] =3;
|
70 |
|
|
intra[4] =4;
|
71 |
|
|
intra[5] = 5;
|
72 |
|
|
intra[6] =6;
|
73 |
|
|
intra[7] =7;
|
74 |
|
|
intra[8] =8;
|
75 |
|
|
intra[9] =9;
|
76 |
|
|
intra[10] =9;
|
77 |
|
|
intra[11] =9;
|
78 |
|
|
intra[12] = 0xff;
|
79 |
|
|
|
80 |
|
|
idx0 = vec_ld (0, (signed short *) intra);
|
81 |
|
|
|
82 |
|
|
v_c1 = (vector unsigned char) {'1','2','3','4','5','6','7','8','1','2','3','4','5','6','7','8'};
|
83 |
|
|
|
84 |
|
|
if (field_MBAFF){
|
85 |
|
|
v0 = (vector signed short) vec_and ((vector unsigned char) idx0, v_c1);
|
86 |
|
|
idx0 = (vector signed short) vec_sra ((vector unsigned char) idx0, v_c1);
|
87 |
|
|
|
88 |
|
|
v1 = vec_sld (v0, v0, 15);
|
89 |
|
|
v1 = (vector signed short) vec_pack (v1, v0);
|
90 |
|
|
|
91 |
|
|
v2 = vec_sld (v1, v1, 2);
|
92 |
|
|
v3 = vec_sld (v1, v1, 10);
|
93 |
|
|
|
94 |
|
|
v4 = (vector signed short) vec_cmpeq ((vector signed char) v1, (vector signed char) v2);
|
95 |
|
|
v5 = (vector signed short) vec_cmpeq ((vector signed char) v1, (vector signed char) v3);
|
96 |
|
|
v6 = (vector signed short) vec_cmpeq ((vector signed char) v2, (vector signed char) v3);
|
97 |
|
|
}
|
98 |
|
|
else {
|
99 |
|
|
v4 = v5 = v6 = (vector signed short) vec_nor (v_zero, v_zero);
|
100 |
|
|
}
|
101 |
|
|
|
102 |
|
|
tmp1 = (vector signed short) vec_sl ((vector unsigned char) idx0, v_c1);
|
103 |
|
|
v_c1 = vec_mergeh ((vector unsigned char) v_zero, v_c1);
|
104 |
|
|
tmp1 = (vector signed short) vec_add (tmp1, (vector signed short) v_c1);
|
105 |
|
|
|
106 |
|
|
v_pocl = vec_ld (0, (vector unsigned char *) mv_const);
|
107 |
|
|
v_poch = vec_ld (0, (vector unsigned char *) mv_const);
|
108 |
|
|
tmp2 = (vector signed short) vec_perm (v_pocl, v_poch, (vector unsigned char) tmp1);
|
109 |
|
|
|
110 |
|
|
v_pocl = vec_ld (0, (vector unsigned char *) mv_const);
|
111 |
|
|
v_poch = vec_ld (16, (vector unsigned char *) mv_const);
|
112 |
|
|
tmp1 = (vector signed short) vec_perm (v_pocl, v_poch, (vector unsigned char) tmp1);
|
113 |
|
|
tmp1 = vec_sel (tmp1, tmp2, (vector unsigned short) {0xffff,0xffff,0,0,0,0,0,0});
|
114 |
|
|
|
115 |
|
|
tmp3 = (vector signed short) vec_splat ((vector unsigned char) idx0, 12);
|
116 |
|
|
v_c1 = (vector unsigned char) vec_nor (v_zero, v_zero);
|
117 |
|
|
tmp0 = (vector signed short) vec_cmpeq ((vector signed char) idx0, (vector signed char) v_c1);
|
118 |
|
|
tmp1 = vec_sel (tmp1, (vector signed short) tmp3, (vector unsigned short) tmp0);
|
119 |
|
|
|
120 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 15);
|
121 |
|
|
tmp1 = (vector signed short) vec_pack (tmp2, tmp1);
|
122 |
|
|
|
123 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 2);
|
124 |
|
|
tmp3 = vec_sld (tmp1, tmp1, 10);
|
125 |
|
|
|
126 |
|
|
tmp0 = (vector signed short) vec_cmpeq ((vector signed char) tmp1, (vector signed char) tmp2);
|
127 |
|
|
tmp4 = (vector signed short) vec_cmpeq ((vector signed char) tmp1, (vector signed char) tmp3);
|
128 |
|
|
tmp1 = (vector signed short) vec_cmpeq ((vector signed char) tmp2, (vector signed char) tmp3);
|
129 |
|
|
tmp0 = vec_and (tmp0, v4);
|
130 |
|
|
tmp4 = vec_and (tmp4, v5);
|
131 |
|
|
tmp1 = vec_and (tmp1, v6);
|
132 |
|
|
tmp2 = vec_sld ((vector signed short) tmp0, (vector signed short) tmp0, 8);
|
133 |
|
|
tmp3 = vec_sld ((vector signed short) tmp4, (vector signed short) tmp4, 8);
|
134 |
|
|
tmp5 = vec_sld ((vector signed short) tmp1, (vector signed short) tmp1, 8);
|
135 |
|
|
tmp0 = vec_and (tmp0, tmp2);
|
136 |
|
|
tmp4 = vec_and (tmp4, tmp3);
|
137 |
|
|
tmp1 = vec_and (tmp1, tmp5);
|
138 |
|
|
v_ref_mask00 = vec_mergeh ((vector signed short) tmp0, (vector signed short) v_c1);
|
139 |
|
|
v_ref_mask01 = vec_mergeh ((vector signed short) tmp4, (vector signed short) tmp1);
|
140 |
|
|
v_ref_mask00 = (vector signed short) vec_mergeh ((vector unsigned char) v_ref_mask00, (vector unsigned char) v_ref_mask00);
|
141 |
|
|
v_ref_mask01 = (vector signed short) vec_mergeh ((vector unsigned char) v_ref_mask01, (vector unsigned char) v_ref_mask01);
|
142 |
|
|
|
143 |
|
|
v0 = vec_ld (0, (vector signed short *) mv_const);
|
144 |
|
|
v1 = vec_ld (16, (vector signed short *) mv_const);
|
145 |
|
|
v4 = vec_ld (64, (vector signed short *) mv_const);
|
146 |
|
|
v5 = vec_ld (80, (vector signed short *) mv_const);
|
147 |
|
|
v8 = vec_ld (0, (vector signed short *) mv_const);
|
148 |
|
|
v9 = vec_ld (16, (vector signed short *) mv_const);
|
149 |
|
|
|
150 |
|
|
tmp0 = (vector signed short) vec_perm ((vector unsigned char) v8,
|
151 |
|
|
(vector unsigned char) v8, (vector unsigned char) {0,1,2,3,8,9,10,11,4,5,6,7,12,13,14,15});
|
152 |
|
|
tmp1 = (vector signed short) vec_mergeh ((vector signed int) v0, (vector signed int) v1);
|
153 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
154 |
|
|
tmp3 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
155 |
|
|
tmp4 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
156 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
157 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
158 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
159 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
160 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
161 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
162 |
|
|
tmp0 = (vector signed short) vec_perm ((vector unsigned char) v9, (vector unsigned char) v9,
|
163 |
|
|
(vector unsigned char) {0,1,2,3,8,9,10,11,4,5,6,7,12,13,14,15});
|
164 |
|
|
tmp1 = (vector signed short) vec_mergeh ((vector signed int) v4, (vector signed int) v5);
|
165 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
166 |
|
|
tmp5 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
167 |
|
|
tmp6 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
168 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
169 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
170 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
171 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
172 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
173 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
174 |
|
|
|
175 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
176 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
177 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 12);
|
178 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 12);
|
179 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
180 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
181 |
|
|
tmp00 = (vector signed short) vec_pack ((vector unsigned short) tmp3, (vector unsigned short) tmp4);
|
182 |
|
|
|
183 |
|
|
tmp0 = (vector signed short) vec_mergeh ((vector signed int) v0, (vector signed int) v1);
|
184 |
|
|
tmp1 = (vector signed short) vec_mergel ((vector signed int) v0, (vector signed int) v1);
|
185 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
186 |
|
|
tmp3 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
187 |
|
|
tmp4 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
188 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
189 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
190 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
191 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
192 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
193 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
194 |
|
|
|
195 |
|
|
tmp0 = (vector signed short) vec_mergeh ((vector signed int) v4, (vector signed int) v5);
|
196 |
|
|
tmp1 = (vector signed short) vec_mergel ((vector signed int) v4, (vector signed int) v5);
|
197 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
198 |
|
|
tmp5 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
199 |
|
|
tmp6 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
200 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
201 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
202 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
203 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
204 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
205 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
206 |
|
|
|
207 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
208 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
209 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 12);
|
210 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 12);
|
211 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
212 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
213 |
|
|
tmp01 = (vector signed short) vec_pack ((vector unsigned short) tmp3, (vector unsigned short) tmp4);
|
214 |
|
|
|
215 |
|
|
v2 = vec_ld (32, (vector signed short *) mv_const);
|
216 |
|
|
v3 = vec_ld (48, (vector signed short *) mv_const);
|
217 |
|
|
v6 = vec_ld (96, (vector signed short *) mv_const);
|
218 |
|
|
v7 = vec_ld (112,(vector signed short *) mv_const);
|
219 |
|
|
|
220 |
|
|
tmp0 = (vector signed short) vec_mergel ((vector signed int) v0, (vector signed int) v1);
|
221 |
|
|
tmp1 = (vector signed short) vec_mergeh ((vector signed int) v2, (vector signed int) v3);
|
222 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
223 |
|
|
tmp3 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
224 |
|
|
tmp4 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
225 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
226 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
227 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
228 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
229 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
230 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
231 |
|
|
|
232 |
|
|
tmp0 = (vector signed short) vec_mergel ((vector signed int) v4, (vector signed int) v5);
|
233 |
|
|
tmp1 = (vector signed short) vec_mergeh ((vector signed int) v6, (vector signed int) v7);
|
234 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
235 |
|
|
tmp5 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
236 |
|
|
tmp6 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
237 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
238 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
239 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
240 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
241 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
242 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
243 |
|
|
|
244 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
245 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
246 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 12);
|
247 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 12);
|
248 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
249 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
250 |
|
|
tmp02 = (vector signed short) vec_pack ((vector unsigned short) tmp3, (vector unsigned short) tmp4);
|
251 |
|
|
|
252 |
|
|
tmp0 = (vector signed short) vec_mergeh ((vector signed int) v2, (vector signed int) v3);
|
253 |
|
|
tmp1 = (vector signed short) vec_mergel ((vector signed int) v2, (vector signed int) v3);
|
254 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
255 |
|
|
tmp3 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
256 |
|
|
tmp4 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
257 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
258 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
259 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
260 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
261 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
262 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
263 |
|
|
|
264 |
|
|
tmp0 = (vector signed short) vec_mergeh ((vector signed int) v6, (vector signed int) v7);
|
265 |
|
|
tmp1 = (vector signed short) vec_mergel ((vector signed int) v6, (vector signed int) v7);
|
266 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 8);
|
267 |
|
|
tmp5 = vec_sub (vec_max (tmp0, tmp1), vec_min (tmp0, tmp1));
|
268 |
|
|
tmp6 = vec_sub (vec_max (tmp0, tmp2), vec_min (tmp0, tmp2));
|
269 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
270 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
271 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
272 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
273 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
274 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
275 |
|
|
|
276 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
277 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
278 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 12);
|
279 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 12);
|
280 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
281 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
282 |
|
|
tmp03 = (vector signed short) vec_pack ((vector unsigned short) tmp3, (vector unsigned short) tmp4);
|
283 |
|
|
|
284 |
|
|
tmp0 = (vector signed short) vec_pack ((vector unsigned int) tmp00, (vector unsigned int) tmp01);
|
285 |
|
|
tmp1 = (vector signed short) vec_pack ((vector unsigned int) tmp02, (vector unsigned int) tmp03);
|
286 |
|
|
tmp2 = (vector signed short) vec_mergeh ((vector signed int) tmp0, (vector signed int) tmp1);
|
287 |
|
|
tmp3 = (vector signed short) vec_mergel ((vector signed int) tmp0, (vector signed int) tmp1);
|
288 |
|
|
tmp4 = (vector signed short) vec_mergeh ((vector signed int) tmp2, (vector signed int) tmp3);
|
289 |
|
|
tmp5 = (vector signed short) vec_mergel ((vector signed int) tmp2, (vector signed int) tmp3);
|
290 |
|
|
tmp4 = vec_and (v_ref_mask00, tmp4);
|
291 |
|
|
tmp5 = vec_and (v_ref_mask01, tmp5);
|
292 |
|
|
|
293 |
|
|
tmp0 = vec_nor (v_ref_mask00, v_ref_mask01);
|
294 |
|
|
tmp1 = vec_and (v_ref_mask00, v_ref_mask01);
|
295 |
|
|
tmp2 = vec_and (tmp4, tmp5);
|
296 |
|
|
tmp2 = vec_and (tmp2, tmp1);
|
297 |
|
|
tmp3 = vec_nor (tmp4, tmp5);
|
298 |
|
|
tmp3 = vec_nor (tmp3, tmp1);
|
299 |
|
|
v_vec_maskv = vec_or (tmp0, tmp2);
|
300 |
|
|
v_vec_maskv = vec_or (v_vec_maskv, tmp3);
|
301 |
|
|
|
302 |
|
|
intra[0] = 1;
|
303 |
|
|
intra[1] = 1;
|
304 |
|
|
intra[2] = 2;
|
305 |
|
|
intra[3] = 3;
|
306 |
|
|
intra[4] = 2;
|
307 |
|
|
intra[5] = 2;
|
308 |
|
|
intra[6] = 2;
|
309 |
|
|
intra[7] = 1;
|
310 |
|
|
intra[8] = 1;
|
311 |
|
|
intra[9] = 5;
|
312 |
|
|
intra[10] = 5;
|
313 |
|
|
intra[11] = 5;
|
314 |
|
|
|
315 |
|
|
intra[13] = 0;
|
316 |
|
|
intra[14] = 0;
|
317 |
|
|
intra[15] = 0;
|
318 |
|
|
|
319 |
|
|
idx0 = vec_ld (0, (signed short *) intra);
|
320 |
|
|
|
321 |
|
|
v_c1 = (vector unsigned char) {'1','2','3','4','5','6','7','8','1','2','3','4','5','6','7','8'};
|
322 |
|
|
|
323 |
|
|
if (field_MBAFF){
|
324 |
|
|
v8 = (vector signed short) vec_and ((vector unsigned char) idx0, v_c1);
|
325 |
|
|
idx0 = (vector signed short) vec_sra ((vector unsigned char) idx0, v_c1);
|
326 |
|
|
|
327 |
|
|
v9 = vec_sld (v8, v8, 15);
|
328 |
|
|
v9 = (vector signed short) vec_pack (v9, v8);
|
329 |
|
|
|
330 |
|
|
v10 = vec_sld (v9, v9, 2);
|
331 |
|
|
v11 = vec_sld (v9, v9, 10);
|
332 |
|
|
|
333 |
|
|
v8 = (vector signed short) vec_cmpeq ((vector signed char) v9, (vector signed char) v10);
|
334 |
|
|
v9 = (vector signed short) vec_cmpeq ((vector signed char) v9, (vector signed char) v11);
|
335 |
|
|
v10 = (vector signed short) vec_cmpeq ((vector signed char) v10, (vector signed char) v11);
|
336 |
|
|
}
|
337 |
|
|
else {
|
338 |
|
|
v8 = v9 = v10 = (vector signed short) vec_nor (v_zero, v_zero);
|
339 |
|
|
}
|
340 |
|
|
|
341 |
|
|
tmp1 = (vector signed short) vec_sl ((vector unsigned char) idx0, v_c1);
|
342 |
|
|
|
343 |
|
|
if (1){
|
344 |
|
|
int m;
|
345 |
|
|
unsigned char toto2[16] __attribute__((aligned(16)));
|
346 |
|
|
|
347 |
|
|
printf("vc1\n");
|
348 |
|
|
vec_st(v_c1, 0, (unsigned char *) toto2);
|
349 |
|
|
for (m=0; m<16;m++) {printf("%c ", toto2[m]);}
|
350 |
|
|
|
351 |
|
|
printf("\nv_zero\n");
|
352 |
|
|
|
353 |
|
|
vec_st (v_zero, 0, (unsigned char *) toto2);
|
354 |
|
|
for (m=0; m< 16; m++) {printf("%c ", toto2[m]);}
|
355 |
|
|
printf("\n");
|
356 |
|
|
}
|
357 |
|
|
|
358 |
|
|
v_c1 = vec_mergeh ((vector unsigned char) v_zero, v_c1);
|
359 |
|
|
tmp1 = (vector signed short) vec_add (tmp1, (vector signed short) v_c1);
|
360 |
|
|
|
361 |
|
|
if (1){
|
362 |
|
|
vector unsigned char vres =
|
363 |
|
|
(vector unsigned char){'a','1','b','2','c','3','d','4','e','5','f','6','g','7','h','8'};
|
364 |
|
|
unsigned char toto2[16] __attribute__((aligned(16)));
|
365 |
|
|
int m;
|
366 |
|
|
|
367 |
|
|
printf("vc1\n");
|
368 |
|
|
vec_st(v_c1, 0, (unsigned char *) toto2);
|
369 |
|
|
for (m=0; m<16;m++) {printf("%c ", toto2[m]);}
|
370 |
|
|
printf("\n");
|
371 |
|
|
if (!vec_all_eq (vres, v_c1))
|
372 |
|
|
abort();
|
373 |
|
|
}
|
374 |
|
|
|
375 |
|
|
v_pocl = vec_ld (32, (vector unsigned char *) mv_const);
|
376 |
|
|
v_poch = vec_ld (48, (vector unsigned char *) mv_const);
|
377 |
|
|
tmp2 = (vector signed short) vec_perm (v_pocl, v_poch, (vector unsigned char) tmp1);
|
378 |
|
|
|
379 |
|
|
v_pocl = vec_ld (0, (vector unsigned char *) mv_const);
|
380 |
|
|
v_poch = vec_ld (16, (vector unsigned char *) mv_const);
|
381 |
|
|
|
382 |
|
|
tmp1 = (vector signed short) vec_perm (v_pocl, v_poch, (vector unsigned char) tmp1);
|
383 |
|
|
|
384 |
|
|
tmp1 = vec_sel (tmp1, tmp2, (vector unsigned short) {0xffff,0xffff,0,0,0,0,0,0});
|
385 |
|
|
|
386 |
|
|
|
387 |
|
|
tmp3 = (vector signed short) vec_splat ((vector unsigned char) idx0, 12);
|
388 |
|
|
v_c1 = (vector unsigned char) vec_nor (v_zero, v_zero);
|
389 |
|
|
tmp0 = (vector signed short) vec_cmpeq ((vector signed char) idx0, (vector signed char) v_c1);
|
390 |
|
|
tmp1 = vec_sel (tmp1, (vector signed short) tmp3, (vector unsigned short) tmp0);
|
391 |
|
|
|
392 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 15);
|
393 |
|
|
tmp1 = (vector signed short) vec_pack (tmp2, tmp1);
|
394 |
|
|
|
395 |
|
|
|
396 |
|
|
tmp2 = vec_sld (tmp1, tmp1, 2);
|
397 |
|
|
tmp3 = vec_sld (tmp1, tmp1, 10);
|
398 |
|
|
|
399 |
|
|
tmp0 = (vector signed short) vec_cmpeq ((vector signed char) tmp1, (vector signed char) tmp2);
|
400 |
|
|
tmp4 = (vector signed short) vec_cmpeq ((vector signed char) tmp1, (vector signed char) tmp3);
|
401 |
|
|
tmp1 = (vector signed short) vec_cmpeq ((vector signed char) tmp2, (vector signed char) tmp3);
|
402 |
|
|
tmp0 = vec_and (tmp0, v8);
|
403 |
|
|
tmp4 = vec_and (tmp4, v9);
|
404 |
|
|
tmp1 = vec_and (tmp1, v10);
|
405 |
|
|
tmp2 = vec_sld ((vector signed short) tmp0, (vector signed short) tmp0, 8);
|
406 |
|
|
tmp3 = vec_sld ((vector signed short) tmp4, (vector signed short) tmp4, 8);
|
407 |
|
|
tmp5 = vec_sld ((vector signed short) tmp1, (vector signed short) tmp1, 8);
|
408 |
|
|
tmp0 = vec_and (tmp0, tmp2);
|
409 |
|
|
tmp4 = vec_and (tmp4, tmp3);
|
410 |
|
|
tmp1 = vec_and (tmp1, tmp5);
|
411 |
|
|
v_ref_mask00 = vec_mergeh ((vector signed short) tmp0, (vector signed short) v_c1);
|
412 |
|
|
v_ref_mask01 = vec_mergeh ((vector signed short) tmp4, (vector signed short) tmp1);
|
413 |
|
|
v_ref_mask00 = (vector signed short) vec_mergeh ((vector unsigned char) v_ref_mask00, (vector unsigned char) v_ref_mask00);
|
414 |
|
|
v_ref_mask01 = (vector signed short) vec_mergeh ((vector unsigned char) v_ref_mask01, (vector unsigned char) v_ref_mask01);
|
415 |
|
|
|
416 |
|
|
|
417 |
|
|
v_permv= vec_ld (0, (vector unsigned char *) mv_const);
|
418 |
|
|
v8 = vec_ld (0, (vector signed short *) mv_const);
|
419 |
|
|
v9 = vec_ld (16, (vector signed short *) mv_const);
|
420 |
|
|
tmp2 = vec_perm (v0, v0, v_permv);
|
421 |
|
|
tmp3 = vec_sub (vec_max (v8, v0), vec_min (v8, v0));
|
422 |
|
|
tmp4 = vec_sub (vec_max (v8, tmp2), vec_min (v8, tmp2));
|
423 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
424 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
425 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
426 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
427 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
428 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
429 |
|
|
|
430 |
|
|
tmp2 = vec_perm (v2, v2, v_permv);
|
431 |
|
|
tmp5 = vec_sub (vec_max (v9, v2), vec_min (v9, v2));
|
432 |
|
|
tmp6 = vec_sub (vec_max (v9, tmp2), vec_min (v9, tmp2));
|
433 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
434 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
435 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
436 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
437 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
438 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
439 |
|
|
|
440 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
441 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
442 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
443 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
444 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
445 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
446 |
|
|
tmp00 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp4);
|
447 |
|
|
|
448 |
|
|
tmp2 = vec_perm (v1, v1, v_permv);
|
449 |
|
|
tmp3 = vec_sub (vec_max (v0, v1), vec_min (v0, v1));
|
450 |
|
|
tmp4 = vec_sub (vec_max (v0, tmp2), vec_min (v0, tmp2));
|
451 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
452 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
453 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
454 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
455 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
456 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
457 |
|
|
|
458 |
|
|
tmp2 = vec_perm (v3, v3, v_permv);
|
459 |
|
|
tmp5 = vec_sub (vec_max (v2, v3), vec_min (v2, v3));
|
460 |
|
|
tmp6 = vec_sub (vec_max (v2, tmp2), vec_min (v2, tmp2));
|
461 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
462 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
463 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
464 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
465 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
466 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
467 |
|
|
|
468 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
469 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
470 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
471 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
472 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
473 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
474 |
|
|
tmp01 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp4);
|
475 |
|
|
|
476 |
|
|
tmp2 = vec_perm (v4, v4, v_permv);
|
477 |
|
|
tmp3 = vec_sub (vec_max (v1, v4), vec_min (v1, v4));
|
478 |
|
|
tmp4 = vec_sub (vec_max (v1, tmp2), vec_min (v1, tmp2));
|
479 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
480 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
481 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
482 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
483 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
484 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
485 |
|
|
|
486 |
|
|
tmp2 = vec_perm (v6, v6, v_permv);
|
487 |
|
|
tmp5 = vec_sub (vec_max (v3, v6), vec_min (v3, v6));
|
488 |
|
|
tmp6 = vec_sub (vec_max (v3, tmp2), vec_min (v3, tmp2));
|
489 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
490 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
491 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
492 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
493 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
494 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
495 |
|
|
|
496 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
497 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
498 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
499 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
500 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
501 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
502 |
|
|
tmp02 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp4);
|
503 |
|
|
|
504 |
|
|
|
505 |
|
|
tmp2 = vec_perm (v5, v5, v_permv);
|
506 |
|
|
tmp3 = vec_sub (vec_max (v4, v5), vec_min (v4, v5));
|
507 |
|
|
tmp4 = vec_sub (vec_max (v4, tmp2), vec_min (v4, tmp2));
|
508 |
|
|
tmp3 = (vector signed short) vec_cmpgt (tmp3, v_three);
|
509 |
|
|
tmp4 = (vector signed short) vec_cmpgt (tmp4, v_three);
|
510 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
511 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
512 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
513 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
514 |
|
|
|
515 |
|
|
tmp2 = vec_perm (v7, v7, v_permv);
|
516 |
|
|
tmp5 = vec_sub (vec_max (v6, v7), vec_min (v6, v7));
|
517 |
|
|
tmp6 = vec_sub (vec_max (v6, tmp2), vec_min (v6, tmp2));
|
518 |
|
|
tmp5 = (vector signed short) vec_cmpgt (tmp5, v_three);
|
519 |
|
|
tmp6 = (vector signed short) vec_cmpgt (tmp6, v_three);
|
520 |
|
|
tmp0 = vec_sld (tmp5, tmp5, 14);
|
521 |
|
|
tmp1 = vec_sld (tmp6, tmp6, 14);
|
522 |
|
|
tmp5 = vec_or (tmp0, tmp5);
|
523 |
|
|
tmp6 = vec_or (tmp1, tmp6);
|
524 |
|
|
|
525 |
|
|
tmp3 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp5);
|
526 |
|
|
tmp4 = (vector signed short) vec_pack ((vector unsigned int) tmp4, (vector unsigned int) tmp6);
|
527 |
|
|
tmp5 = vec_sld (tmp3, tmp3, 14);
|
528 |
|
|
tmp6 = vec_sld (tmp4, tmp4, 14);
|
529 |
|
|
tmp3 = vec_or (tmp3, tmp5);
|
530 |
|
|
tmp4 = vec_or (tmp4, tmp6);
|
531 |
|
|
tmp03 = (vector signed short) vec_pack ((vector unsigned int) tmp3, (vector unsigned int) tmp4);
|
532 |
|
|
|
533 |
|
|
tmp0 = (vector signed short) vec_pack ((vector unsigned short) tmp00, (vector unsigned short) tmp01);
|
534 |
|
|
tmp1 = (vector signed short) vec_pack ((vector unsigned short) tmp02, (vector unsigned short) tmp03);
|
535 |
|
|
tmp2 = (vector signed short) vec_mergeh ((vector signed int) tmp0, (vector signed int) tmp1);
|
536 |
|
|
tmp3 = (vector signed short) vec_mergel ((vector signed int) tmp0, (vector signed int) tmp1);
|
537 |
|
|
tmp4 = (vector signed short) vec_mergeh ((vector signed int) tmp2, (vector signed int) tmp3);
|
538 |
|
|
tmp5 = (vector signed short) vec_mergel ((vector signed int) tmp2, (vector signed int) tmp3);
|
539 |
|
|
tmp4 = vec_and (v_ref_mask00, tmp4);
|
540 |
|
|
tmp5 = vec_and (v_ref_mask01, tmp5);
|
541 |
|
|
|
542 |
|
|
tmp0 = vec_nor (v_ref_mask00, v_ref_mask01);
|
543 |
|
|
tmp1 = vec_and (v_ref_mask00, v_ref_mask01);
|
544 |
|
|
tmp2 = vec_and (tmp4, tmp5);
|
545 |
|
|
tmp2 = vec_and (tmp2, tmp1);
|
546 |
|
|
tmp3 = vec_nor (tmp4, tmp5);
|
547 |
|
|
tmp3 = vec_nor (tmp3, tmp1);
|
548 |
|
|
v_vec_maskh = vec_or (tmp0, tmp2);
|
549 |
|
|
v_vec_maskh = vec_or (v_vec_maskh, tmp3);
|
550 |
|
|
|
551 |
|
|
|
552 |
|
|
v_intra_maskvn = vec_nor (v_intra_maskv, v_intra_maskv);
|
553 |
|
|
v_intra_maskhn = vec_nor (v_intra_maskh, v_intra_maskh);
|
554 |
|
|
v_cbp_maskvn = (vector unsigned char) vec_cmpeq ((vector unsigned char) v_cbp_maskv, (vector unsigned char) v_zero);
|
555 |
|
|
v_cbp_maskhn = (vector unsigned char) vec_cmpeq ((vector unsigned char) v_cbp_maskh, (vector unsigned char) v_zero);
|
556 |
|
|
|
557 |
|
|
v_cbp_maskv = vec_and (v_cbp_maskv, v_intra_maskvn);
|
558 |
|
|
v_cbp_maskh = vec_and (v_cbp_maskh, v_intra_maskhn);
|
559 |
|
|
v_vec_maskv = vec_and (v_vec_maskv, (vector signed short) v_intra_maskvn);
|
560 |
|
|
v_vec_maskv = vec_and (v_vec_maskv, (vector signed short) v_cbp_maskvn);
|
561 |
|
|
v_vec_maskh = vec_and (v_vec_maskh, (vector signed short) v_intra_maskhn);
|
562 |
|
|
v_vec_maskh = vec_and (v_vec_maskh, (vector signed short) v_cbp_maskhn);
|
563 |
|
|
|
564 |
|
|
tmp9 = vec_splat_u8(2);
|
565 |
|
|
tmp8 = vec_splat_u8(1);
|
566 |
|
|
v_bS = vec_ld (0, (vector unsigned char *) mv_const);
|
567 |
|
|
|
568 |
|
|
v_bSv = vec_and ((vector unsigned char) v_bS, (vector unsigned char)v_intra_maskv);
|
569 |
|
|
tmp7 = vec_and ((vector unsigned char)tmp9, (vector unsigned char)v_cbp_maskv);
|
570 |
|
|
tmp6 = (vector signed short) vec_and ((vector unsigned char)tmp8, (vector unsigned char)v_vec_maskv);
|
571 |
|
|
tmp7 = vec_or ((vector unsigned char)tmp7, (vector unsigned char)tmp6);
|
572 |
|
|
v_bSv = vec_or ((vector unsigned char)tmp7, (vector unsigned char)v_bSv);
|
573 |
|
|
|
574 |
|
|
v_bS = vec_ld (0, (vector unsigned char *) mv_const);
|
575 |
|
|
v_bSh = vec_and ((vector unsigned char) v_bS, (vector unsigned char)v_intra_maskh);
|
576 |
|
|
tmp7 = vec_and ((vector unsigned char)tmp9, (vector unsigned char)v_cbp_maskh);
|
577 |
|
|
tmp6 = (vector signed short) vec_and ((vector unsigned char)tmp8, (vector unsigned char)v_vec_maskh);
|
578 |
|
|
tmp7 = vec_or ((vector unsigned char)tmp7, (vector unsigned char)tmp6);
|
579 |
|
|
v_bSh = vec_or ((vector unsigned char)tmp7, (vector unsigned char)v_bSh);
|
580 |
|
|
|
581 |
|
|
v_permh = (vector unsigned char) vec_ld (0 , (vector unsigned char *) mv_const);
|
582 |
|
|
v_permv = (vector unsigned char) vec_ld (0, (vector unsigned char *) mv_const);
|
583 |
|
|
v_bSv = vec_and (v_bSv, v_permv);
|
584 |
|
|
v_bSh = vec_and (v_bSh, v_permh);
|
585 |
|
|
|
586 |
|
|
vec_st (v_bSv, 0, (unsigned char *) mv_const);
|
587 |
|
|
vec_st (v_bSh, 0, (unsigned char *) mv_const);
|
588 |
|
|
|
589 |
|
|
v_bSv = vec_mergeh (v_bSv, v_bSv);
|
590 |
|
|
v_bSv = vec_mergeh (v_bSv, v_bSv);
|
591 |
|
|
v_bSh = vec_mergeh (v_bSh, v_bSh);
|
592 |
|
|
v_bSh = vec_mergeh (v_bSh, v_bSh);
|
593 |
|
|
|
594 |
|
|
vec_st (v_bSv, 0, (vector unsigned char *) mv_const);
|
595 |
|
|
vec_st (v_bSh, 0,(vector unsigned char *) mv_const);
|
596 |
|
|
}
|
597 |
|
|
|
598 |
|
|
|
599 |
|
|
int main(int argc, char **argv)
|
600 |
|
|
{
|
601 |
|
|
char toto[32] __attribute__((aligned(16)));
|
602 |
|
|
|
603 |
|
|
foo(toto, toto, 0, 0);
|
604 |
|
|
return 0;
|
605 |
|
|
}
|