1 |
709 |
jeremybenn |
;; Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
|
2 |
|
|
|
3 |
|
|
;; This file is free software; you can redistribute it and/or modify it under
|
4 |
|
|
;; the terms of the GNU General Public License as published by the Free
|
5 |
|
|
;; Software Foundation; either version 3 of the License, or (at your option)
|
6 |
|
|
;; any later version.
|
7 |
|
|
|
8 |
|
|
;; This file is distributed in the hope that it will be useful, but WITHOUT
|
9 |
|
|
;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
10 |
|
|
;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
11 |
|
|
;; for more details.
|
12 |
|
|
|
13 |
|
|
;; You should have received a copy of the GNU General Public License
|
14 |
|
|
;; along with GCC; see the file COPYING3. If not see
|
15 |
|
|
;; .
|
16 |
|
|
|
17 |
|
|
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
|
18 |
|
|
|
19 |
|
|
|
20 |
|
|
;; Define an insn type attribute. This is used in function unit delay
|
21 |
|
|
;; computations.
|
22 |
|
|
;; multi0 is a multiple insn rtl whose first insn is in pipe0
|
23 |
|
|
;; multi1 is a multiple insn rtl whose first insn is in pipe1
|
24 |
|
|
(define_attr "type" "fx2,shuf,fx3,load,store,br,spr,lnop,nop,fxb,fp6,fp7,fpd,iprefetch,multi0,multi1,hbr,convert"
|
25 |
|
|
(const_string "fx2"))
|
26 |
|
|
|
27 |
|
|
;; Length (in bytes).
|
28 |
|
|
(define_attr "length" ""
|
29 |
|
|
(const_int 4))
|
30 |
|
|
|
31 |
|
|
(define_attr "tune" "cell,celledp" (const (symbol_ref "spu_tune")))
|
32 |
|
|
;; Processor type -- this attribute must exactly match the processor_type
|
33 |
|
|
;; enumeration in spu.h.
|
34 |
|
|
|
35 |
|
|
(define_attr "cpu" "spu"
|
36 |
|
|
(const (symbol_ref "spu_cpu_attr")))
|
37 |
|
|
|
38 |
|
|
; (define_function_unit NAME MULTIPLICITY SIMULTANEITY
|
39 |
|
|
; TEST READY-DELAY ISSUE-DELAY [CONFLICT-LIST])
|
40 |
|
|
|
41 |
|
|
(define_cpu_unit "pipe0,pipe1,fp,ls")
|
42 |
|
|
|
43 |
|
|
(define_insn_reservation "NOP" 1 (eq_attr "type" "nop")
|
44 |
|
|
"pipe0")
|
45 |
|
|
|
46 |
|
|
(define_insn_reservation "FX2" 2 (eq_attr "type" "fx2")
|
47 |
|
|
"pipe0, nothing")
|
48 |
|
|
|
49 |
|
|
(define_insn_reservation "FX3" 4 (eq_attr "type" "fx3,fxb")
|
50 |
|
|
"pipe0, nothing*3")
|
51 |
|
|
|
52 |
|
|
(define_insn_reservation "FP6" 6 (eq_attr "type" "fp6")
|
53 |
|
|
"pipe0 + fp, nothing*5")
|
54 |
|
|
|
55 |
|
|
(define_insn_reservation "FP7" 7 (eq_attr "type" "fp7")
|
56 |
|
|
"pipe0, fp, nothing*5")
|
57 |
|
|
|
58 |
|
|
;; The behavior of the double precision is that both pipes stall
|
59 |
|
|
;; for 6 cycles and the rest of the operation pipelines for
|
60 |
|
|
;; 7 cycles. The simplest way to model this is to simply ignore
|
61 |
|
|
;; the 6 cyle stall.
|
62 |
|
|
(define_insn_reservation "FPD" 7
|
63 |
|
|
(and (eq_attr "tune" "cell")
|
64 |
|
|
(eq_attr "type" "fpd"))
|
65 |
|
|
"pipe0 + pipe1, fp, nothing*5")
|
66 |
|
|
|
67 |
|
|
;; Tune for CELLEDP, 9 cycles, dual-issuable, fully pipelined
|
68 |
|
|
(define_insn_reservation "FPD_CELLEDP" 9
|
69 |
|
|
(and (eq_attr "tune" "celledp")
|
70 |
|
|
(eq_attr "type" "fpd"))
|
71 |
|
|
"pipe0 + fp, nothing*8")
|
72 |
|
|
|
73 |
|
|
(define_insn_reservation "LNOP" 1 (eq_attr "type" "lnop")
|
74 |
|
|
"pipe1")
|
75 |
|
|
|
76 |
|
|
(define_insn_reservation "STORE" 1 (eq_attr "type" "store")
|
77 |
|
|
"pipe1 + ls")
|
78 |
|
|
|
79 |
|
|
(define_insn_reservation "IPREFETCH" 1 (eq_attr "type" "iprefetch")
|
80 |
|
|
"pipe1 + ls")
|
81 |
|
|
|
82 |
|
|
(define_insn_reservation "SHUF" 4 (eq_attr "type" "shuf,br,spr")
|
83 |
|
|
"pipe1, nothing*3")
|
84 |
|
|
|
85 |
|
|
(define_insn_reservation "LOAD" 6 (eq_attr "type" "load")
|
86 |
|
|
"pipe1 + ls, nothing*5")
|
87 |
|
|
|
88 |
|
|
(define_insn_reservation "HBR" 18 (eq_attr "type" "hbr")
|
89 |
|
|
"pipe1, nothing*15")
|
90 |
|
|
|
91 |
|
|
(define_insn_reservation "MULTI0" 4 (eq_attr "type" "multi0")
|
92 |
|
|
"pipe0+pipe1, nothing*3")
|
93 |
|
|
|
94 |
|
|
(define_insn_reservation "MULTI1" 4 (eq_attr "type" "multi1")
|
95 |
|
|
"pipe1, nothing*3")
|
96 |
|
|
|
97 |
|
|
(define_insn_reservation "CONVERT" 0 (eq_attr "type" "convert")
|
98 |
|
|
"nothing")
|
99 |
|
|
|
100 |
|
|
;; Force pipe0 to occur before pipe 1 in a cycle.
|
101 |
|
|
(absence_set "pipe0" "pipe1")
|
102 |
|
|
|
103 |
|
|
|
104 |
|
|
(define_c_enum "unspec" [
|
105 |
|
|
UNSPEC_IPREFETCH
|
106 |
|
|
UNSPEC_FREST
|
107 |
|
|
UNSPEC_FRSQEST
|
108 |
|
|
UNSPEC_FI
|
109 |
|
|
UNSPEC_EXTEND_CMP
|
110 |
|
|
UNSPEC_CG
|
111 |
|
|
UNSPEC_CGX
|
112 |
|
|
UNSPEC_ADDX
|
113 |
|
|
UNSPEC_BG
|
114 |
|
|
UNSPEC_BGX
|
115 |
|
|
UNSPEC_SFX
|
116 |
|
|
UNSPEC_FSM
|
117 |
|
|
UNSPEC_HBR
|
118 |
|
|
UNSPEC_NOP
|
119 |
|
|
UNSPEC_CONVERT
|
120 |
|
|
UNSPEC_SELB
|
121 |
|
|
UNSPEC_SHUFB
|
122 |
|
|
UNSPEC_CPAT
|
123 |
|
|
UNSPEC_CNTB
|
124 |
|
|
UNSPEC_SUMB
|
125 |
|
|
UNSPEC_FSMB
|
126 |
|
|
UNSPEC_FSMH
|
127 |
|
|
UNSPEC_GBB
|
128 |
|
|
UNSPEC_GBH
|
129 |
|
|
UNSPEC_GB
|
130 |
|
|
UNSPEC_AVGB
|
131 |
|
|
UNSPEC_ABSDB
|
132 |
|
|
UNSPEC_ORX
|
133 |
|
|
UNSPEC_HEQ
|
134 |
|
|
UNSPEC_HGT
|
135 |
|
|
UNSPEC_HLGT
|
136 |
|
|
UNSPEC_STOP
|
137 |
|
|
UNSPEC_STOPD
|
138 |
|
|
UNSPEC_SET_INTR
|
139 |
|
|
UNSPEC_FSCRRD
|
140 |
|
|
UNSPEC_FSCRWR
|
141 |
|
|
UNSPEC_MFSPR
|
142 |
|
|
UNSPEC_MTSPR
|
143 |
|
|
UNSPEC_RDCH
|
144 |
|
|
UNSPEC_RCHCNT
|
145 |
|
|
UNSPEC_WRCH
|
146 |
|
|
UNSPEC_SPU_REALIGN_LOAD
|
147 |
|
|
UNSPEC_SPU_MASK_FOR_LOAD
|
148 |
|
|
UNSPEC_DFTSV
|
149 |
|
|
UNSPEC_FLOAT_EXTEND
|
150 |
|
|
UNSPEC_FLOAT_TRUNCATE
|
151 |
|
|
UNSPEC_SP_SET
|
152 |
|
|
UNSPEC_SP_TEST
|
153 |
|
|
])
|
154 |
|
|
|
155 |
|
|
(define_c_enum "unspecv" [
|
156 |
|
|
UNSPECV_BLOCKAGE
|
157 |
|
|
UNSPECV_LNOP
|
158 |
|
|
UNSPECV_NOP
|
159 |
|
|
UNSPECV_SYNC
|
160 |
|
|
])
|
161 |
|
|
|
162 |
|
|
(include "predicates.md")
|
163 |
|
|
(include "constraints.md")
|
164 |
|
|
|
165 |
|
|
|
166 |
|
|
;; Mode iterators
|
167 |
|
|
|
168 |
|
|
(define_mode_iterator ALL [QI V16QI
|
169 |
|
|
HI V8HI
|
170 |
|
|
SI V4SI
|
171 |
|
|
DI V2DI
|
172 |
|
|
TI
|
173 |
|
|
SF V4SF
|
174 |
|
|
DF V2DF])
|
175 |
|
|
|
176 |
|
|
; Everything except DI and TI which are handled separately because
|
177 |
|
|
; they need different constraints to correctly test VOIDmode constants
|
178 |
|
|
(define_mode_iterator MOV [QI V16QI
|
179 |
|
|
HI V8HI
|
180 |
|
|
SI V4SI
|
181 |
|
|
V2DI
|
182 |
|
|
SF V4SF
|
183 |
|
|
DF V2DF])
|
184 |
|
|
|
185 |
|
|
(define_mode_iterator QHSI [QI HI SI])
|
186 |
|
|
(define_mode_iterator QHSDI [QI HI SI DI])
|
187 |
|
|
(define_mode_iterator DTI [DI TI])
|
188 |
|
|
|
189 |
|
|
(define_mode_iterator VINT [QI V16QI
|
190 |
|
|
HI V8HI
|
191 |
|
|
SI V4SI
|
192 |
|
|
DI V2DI
|
193 |
|
|
TI])
|
194 |
|
|
|
195 |
|
|
(define_mode_iterator VQHSI [QI V16QI
|
196 |
|
|
HI V8HI
|
197 |
|
|
SI V4SI])
|
198 |
|
|
|
199 |
|
|
(define_mode_iterator VHSI [HI V8HI
|
200 |
|
|
SI V4SI])
|
201 |
|
|
|
202 |
|
|
(define_mode_iterator VSDF [SF V4SF
|
203 |
|
|
DF V2DF])
|
204 |
|
|
|
205 |
|
|
(define_mode_iterator VSI [SI V4SI])
|
206 |
|
|
(define_mode_iterator VDI [DI V2DI])
|
207 |
|
|
(define_mode_iterator VSF [SF V4SF])
|
208 |
|
|
(define_mode_iterator VDF [DF V2DF])
|
209 |
|
|
|
210 |
|
|
(define_mode_iterator VCMP [V16QI
|
211 |
|
|
V8HI
|
212 |
|
|
V4SI
|
213 |
|
|
V4SF
|
214 |
|
|
V2DF])
|
215 |
|
|
|
216 |
|
|
(define_mode_iterator VCMPU [V16QI
|
217 |
|
|
V8HI
|
218 |
|
|
V4SI])
|
219 |
|
|
|
220 |
|
|
(define_mode_attr v [(V8HI "v") (V4SI "v")
|
221 |
|
|
(HI "") (SI "")])
|
222 |
|
|
|
223 |
|
|
(define_mode_attr bh [(QI "b") (V16QI "b")
|
224 |
|
|
(HI "h") (V8HI "h")
|
225 |
|
|
(SI "") (V4SI "")])
|
226 |
|
|
|
227 |
|
|
(define_mode_attr d [(SF "") (V4SF "")
|
228 |
|
|
(DF "d") (V2DF "d")])
|
229 |
|
|
(define_mode_attr d6 [(SF "6") (V4SF "6")
|
230 |
|
|
(DF "d") (V2DF "d")])
|
231 |
|
|
|
232 |
|
|
(define_mode_attr f2i [(SF "si") (V4SF "v4si")
|
233 |
|
|
(DF "di") (V2DF "v2di")])
|
234 |
|
|
(define_mode_attr F2I [(SF "SI") (V4SF "V4SI")
|
235 |
|
|
(DF "DI") (V2DF "V2DI")])
|
236 |
|
|
(define_mode_attr i2f [(SI "sf") (V4SI "v4sf")
|
237 |
|
|
(DI "df") (V2DI "v2df")])
|
238 |
|
|
(define_mode_attr I2F [(SI "SF") (V4SI "V4SF")
|
239 |
|
|
(DI "DF") (V2DI "V2DF")])
|
240 |
|
|
|
241 |
|
|
(define_mode_attr DF2I [(DF "SI") (V2DF "V2DI")])
|
242 |
|
|
|
243 |
|
|
(define_mode_attr umask [(HI "f") (V8HI "f")
|
244 |
|
|
(SI "g") (V4SI "g")])
|
245 |
|
|
(define_mode_attr nmask [(HI "F") (V8HI "F")
|
246 |
|
|
(SI "G") (V4SI "G")])
|
247 |
|
|
|
248 |
|
|
;; Used for carry and borrow instructions.
|
249 |
|
|
(define_mode_iterator CBOP [SI DI V4SI V2DI])
|
250 |
|
|
|
251 |
|
|
;; Used in vec_set and vec_extract
|
252 |
|
|
(define_mode_iterator V [V2DI V4SI V8HI V16QI V2DF V4SF])
|
253 |
|
|
(define_mode_attr inner [(V16QI "QI")
|
254 |
|
|
(V8HI "HI")
|
255 |
|
|
(V4SI "SI")
|
256 |
|
|
(V2DI "DI")
|
257 |
|
|
(V4SF "SF")
|
258 |
|
|
(V2DF "DF")])
|
259 |
|
|
(define_mode_attr vmult [(V16QI "1")
|
260 |
|
|
(V8HI "2")
|
261 |
|
|
(V4SI "4")
|
262 |
|
|
(V2DI "8")
|
263 |
|
|
(V4SF "4")
|
264 |
|
|
(V2DF "8")])
|
265 |
|
|
(define_mode_attr voff [(V16QI "13")
|
266 |
|
|
(V8HI "14")
|
267 |
|
|
(V4SI "0")
|
268 |
|
|
(V2DI "0")
|
269 |
|
|
(V4SF "0")
|
270 |
|
|
(V2DF "0")])
|
271 |
|
|
|
272 |
|
|
|
273 |
|
|
;; mov
|
274 |
|
|
|
275 |
|
|
(define_expand "mov"
|
276 |
|
|
[(set (match_operand:ALL 0 "nonimmediate_operand" "")
|
277 |
|
|
(match_operand:ALL 1 "general_operand" ""))]
|
278 |
|
|
""
|
279 |
|
|
{
|
280 |
|
|
if (spu_expand_mov(operands, mode))
|
281 |
|
|
DONE;
|
282 |
|
|
})
|
283 |
|
|
|
284 |
|
|
(define_split
|
285 |
|
|
[(set (match_operand 0 "spu_reg_operand")
|
286 |
|
|
(match_operand 1 "immediate_operand"))]
|
287 |
|
|
|
288 |
|
|
""
|
289 |
|
|
[(set (match_dup 0)
|
290 |
|
|
(high (match_dup 1)))
|
291 |
|
|
(set (match_dup 0)
|
292 |
|
|
(lo_sum (match_dup 0)
|
293 |
|
|
(match_dup 1)))]
|
294 |
|
|
{
|
295 |
|
|
if (spu_split_immediate (operands))
|
296 |
|
|
DONE;
|
297 |
|
|
FAIL;
|
298 |
|
|
})
|
299 |
|
|
|
300 |
|
|
(define_insn "pic"
|
301 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
302 |
|
|
(match_operand:SI 1 "immediate_operand" "s"))
|
303 |
|
|
(use (const_int 0))]
|
304 |
|
|
"flag_pic"
|
305 |
|
|
"ila\t%0,%%pic(%1)")
|
306 |
|
|
|
307 |
|
|
;; Whenever a function generates the 'pic' pattern above we need to
|
308 |
|
|
;; load the pic_offset_table register.
|
309 |
|
|
;; GCC doesn't deal well with labels in the middle of a block so we
|
310 |
|
|
;; hardcode the offsets in the asm here.
|
311 |
|
|
(define_insn "load_pic_offset"
|
312 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
313 |
|
|
(unspec:SI [(const_int 0)] 0))
|
314 |
|
|
(set (match_operand:SI 1 "spu_reg_operand" "=r")
|
315 |
|
|
(unspec:SI [(const_int 0)] 0))]
|
316 |
|
|
"flag_pic"
|
317 |
|
|
"ila\t%1,.+8\;brsl\t%0,4"
|
318 |
|
|
[(set_attr "length" "8")
|
319 |
|
|
(set_attr "type" "multi0")])
|
320 |
|
|
|
321 |
|
|
|
322 |
|
|
;; move internal
|
323 |
|
|
|
324 |
|
|
(define_insn "_mov"
|
325 |
|
|
[(set (match_operand:MOV 0 "spu_dest_operand" "=r,r,r,r,r,m")
|
326 |
|
|
(match_operand:MOV 1 "spu_mov_operand" "r,A,f,j,m,r"))]
|
327 |
|
|
"register_operand(operands[0], mode)
|
328 |
|
|
|| register_operand(operands[1], mode)"
|
329 |
|
|
"@
|
330 |
|
|
ori\t%0,%1,0
|
331 |
|
|
il%s1\t%0,%S1
|
332 |
|
|
fsmbi\t%0,%S1
|
333 |
|
|
c%s1d\t%0,%S1($sp)
|
334 |
|
|
lq%p1\t%0,%1
|
335 |
|
|
stq%p0\t%1,%0"
|
336 |
|
|
[(set_attr "type" "fx2,fx2,shuf,shuf,load,store")])
|
337 |
|
|
|
338 |
|
|
(define_insn "low_"
|
339 |
|
|
[(set (match_operand:VSI 0 "spu_reg_operand" "=r")
|
340 |
|
|
(lo_sum:VSI (match_operand:VSI 1 "spu_reg_operand" "0")
|
341 |
|
|
(match_operand:VSI 2 "immediate_operand" "i")))]
|
342 |
|
|
""
|
343 |
|
|
"iohl\t%0,%2@l")
|
344 |
|
|
|
345 |
|
|
(define_insn "_movdi"
|
346 |
|
|
[(set (match_operand:DI 0 "spu_dest_operand" "=r,r,r,r,r,m")
|
347 |
|
|
(match_operand:DI 1 "spu_mov_operand" "r,a,f,k,m,r"))]
|
348 |
|
|
"register_operand(operands[0], DImode)
|
349 |
|
|
|| register_operand(operands[1], DImode)"
|
350 |
|
|
"@
|
351 |
|
|
ori\t%0,%1,0
|
352 |
|
|
il%d1\t%0,%D1
|
353 |
|
|
fsmbi\t%0,%D1
|
354 |
|
|
c%d1d\t%0,%D1($sp)
|
355 |
|
|
lq%p1\t%0,%1
|
356 |
|
|
stq%p0\t%1,%0"
|
357 |
|
|
[(set_attr "type" "fx2,fx2,shuf,shuf,load,store")])
|
358 |
|
|
|
359 |
|
|
(define_insn "_movti"
|
360 |
|
|
[(set (match_operand:TI 0 "spu_dest_operand" "=r,r,r,r,r,m")
|
361 |
|
|
(match_operand:TI 1 "spu_mov_operand" "r,U,f,l,m,r"))]
|
362 |
|
|
"register_operand(operands[0], TImode)
|
363 |
|
|
|| register_operand(operands[1], TImode)"
|
364 |
|
|
"@
|
365 |
|
|
ori\t%0,%1,0
|
366 |
|
|
il%t1\t%0,%T1
|
367 |
|
|
fsmbi\t%0,%T1
|
368 |
|
|
c%t1d\t%0,%T1($sp)
|
369 |
|
|
lq%p1\t%0,%1
|
370 |
|
|
stq%p0\t%1,%0"
|
371 |
|
|
[(set_attr "type" "fx2,fx2,shuf,shuf,load,store")])
|
372 |
|
|
|
373 |
|
|
(define_split
|
374 |
|
|
[(set (match_operand 0 "spu_reg_operand")
|
375 |
|
|
(match_operand 1 "memory_operand"))]
|
376 |
|
|
"GET_MODE_SIZE (GET_MODE (operands[0])) < 16
|
377 |
|
|
&& GET_MODE(operands[0]) == GET_MODE(operands[1])
|
378 |
|
|
&& !reload_in_progress && !reload_completed"
|
379 |
|
|
[(set (match_dup 0)
|
380 |
|
|
(match_dup 1))]
|
381 |
|
|
{ if (spu_split_load(operands))
|
382 |
|
|
DONE;
|
383 |
|
|
})
|
384 |
|
|
|
385 |
|
|
(define_split
|
386 |
|
|
[(set (match_operand 0 "memory_operand")
|
387 |
|
|
(match_operand 1 "spu_reg_operand"))]
|
388 |
|
|
"GET_MODE_SIZE (GET_MODE (operands[0])) < 16
|
389 |
|
|
&& GET_MODE(operands[0]) == GET_MODE(operands[1])
|
390 |
|
|
&& !reload_in_progress && !reload_completed"
|
391 |
|
|
[(set (match_dup 0)
|
392 |
|
|
(match_dup 1))]
|
393 |
|
|
{ if (spu_split_store(operands))
|
394 |
|
|
DONE;
|
395 |
|
|
})
|
396 |
|
|
;; Operand 3 is the number of bytes. 1:b 2:h 4:w 8:d
|
397 |
|
|
|
398 |
|
|
(define_expand "cpat"
|
399 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
400 |
|
|
(unspec:TI [(match_operand:SI 1 "spu_reg_operand" "r,r")
|
401 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "r,n")
|
402 |
|
|
(match_operand:SI 3 "immediate_operand" "i,i")] UNSPEC_CPAT))]
|
403 |
|
|
""
|
404 |
|
|
{
|
405 |
|
|
rtx x = gen_cpat_const (operands);
|
406 |
|
|
if (x)
|
407 |
|
|
{
|
408 |
|
|
emit_move_insn (operands[0], x);
|
409 |
|
|
DONE;
|
410 |
|
|
}
|
411 |
|
|
})
|
412 |
|
|
|
413 |
|
|
(define_insn "_cpat"
|
414 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
415 |
|
|
(unspec:TI [(match_operand:SI 1 "spu_reg_operand" "r,r")
|
416 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "r,n")
|
417 |
|
|
(match_operand:SI 3 "immediate_operand" "i,i")] UNSPEC_CPAT))]
|
418 |
|
|
""
|
419 |
|
|
"@
|
420 |
|
|
c%M3x\t%0,%1,%2
|
421 |
|
|
c%M3d\t%0,%C2(%1)"
|
422 |
|
|
[(set_attr "type" "shuf")])
|
423 |
|
|
|
424 |
|
|
(define_split
|
425 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand")
|
426 |
|
|
(unspec:TI [(match_operand:SI 1 "spu_nonmem_operand")
|
427 |
|
|
(match_operand:SI 2 "immediate_operand")
|
428 |
|
|
(match_operand:SI 3 "immediate_operand")] UNSPEC_CPAT))]
|
429 |
|
|
""
|
430 |
|
|
[(set (match_dup:TI 0)
|
431 |
|
|
(match_dup:TI 4))]
|
432 |
|
|
{
|
433 |
|
|
operands[4] = gen_cpat_const (operands);
|
434 |
|
|
if (!operands[4])
|
435 |
|
|
FAIL;
|
436 |
|
|
})
|
437 |
|
|
|
438 |
|
|
;; extend
|
439 |
|
|
|
440 |
|
|
(define_insn "extendqihi2"
|
441 |
|
|
[(set (match_operand:HI 0 "spu_reg_operand" "=r")
|
442 |
|
|
(sign_extend:HI (match_operand:QI 1 "spu_reg_operand" "r")))]
|
443 |
|
|
""
|
444 |
|
|
"xsbh\t%0,%1")
|
445 |
|
|
|
446 |
|
|
(define_insn "extendhisi2"
|
447 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
448 |
|
|
(sign_extend:SI (match_operand:HI 1 "spu_reg_operand" "r")))]
|
449 |
|
|
""
|
450 |
|
|
"xshw\t%0,%1")
|
451 |
|
|
|
452 |
|
|
(define_expand "extendsidi2"
|
453 |
|
|
[(set (match_dup:DI 2)
|
454 |
|
|
(zero_extend:DI (match_operand:SI 1 "spu_reg_operand" "")))
|
455 |
|
|
(set (match_operand:DI 0 "spu_reg_operand" "")
|
456 |
|
|
(sign_extend:DI (vec_select:SI (match_dup:V2SI 3)
|
457 |
|
|
(parallel [(const_int 1)]))))]
|
458 |
|
|
""
|
459 |
|
|
{
|
460 |
|
|
operands[2] = gen_reg_rtx (DImode);
|
461 |
|
|
operands[3] = spu_gen_subreg (V2SImode, operands[2]);
|
462 |
|
|
})
|
463 |
|
|
|
464 |
|
|
(define_insn "xswd"
|
465 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r")
|
466 |
|
|
(sign_extend:DI
|
467 |
|
|
(vec_select:SI
|
468 |
|
|
(match_operand:V2SI 1 "spu_reg_operand" "r")
|
469 |
|
|
(parallel [(const_int 1) ]))))]
|
470 |
|
|
""
|
471 |
|
|
"xswd\t%0,%1");
|
472 |
|
|
|
473 |
|
|
;; By splitting this late we don't allow much opportunity for sharing of
|
474 |
|
|
;; constants. That's ok because this should really be optimized away.
|
475 |
|
|
(define_insn_and_split "extendti2"
|
476 |
|
|
[(set (match_operand:TI 0 "register_operand" "")
|
477 |
|
|
(sign_extend:TI (match_operand:QHSDI 1 "register_operand" "")))]
|
478 |
|
|
""
|
479 |
|
|
"#"
|
480 |
|
|
""
|
481 |
|
|
[(set (match_dup:TI 0)
|
482 |
|
|
(sign_extend:TI (match_dup:QHSDI 1)))]
|
483 |
|
|
{
|
484 |
|
|
spu_expand_sign_extend(operands);
|
485 |
|
|
DONE;
|
486 |
|
|
})
|
487 |
|
|
|
488 |
|
|
|
489 |
|
|
;; zero_extend
|
490 |
|
|
|
491 |
|
|
(define_insn "zero_extendqihi2"
|
492 |
|
|
[(set (match_operand:HI 0 "spu_reg_operand" "=r")
|
493 |
|
|
(zero_extend:HI (match_operand:QI 1 "spu_reg_operand" "r")))]
|
494 |
|
|
""
|
495 |
|
|
"andi\t%0,%1,0x00ff")
|
496 |
|
|
|
497 |
|
|
(define_insn "zero_extendqisi2"
|
498 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
499 |
|
|
(zero_extend:SI (match_operand:QI 1 "spu_reg_operand" "r")))]
|
500 |
|
|
""
|
501 |
|
|
"andi\t%0,%1,0x00ff")
|
502 |
|
|
|
503 |
|
|
(define_expand "zero_extendhisi2"
|
504 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
505 |
|
|
(zero_extend:SI (match_operand:HI 1 "spu_reg_operand" "r")))
|
506 |
|
|
(clobber (match_scratch:SI 2 "=&r"))]
|
507 |
|
|
""
|
508 |
|
|
{
|
509 |
|
|
rtx mask = gen_reg_rtx (SImode);
|
510 |
|
|
rtx op1 = simplify_gen_subreg (SImode, operands[1], HImode, 0);
|
511 |
|
|
emit_move_insn (mask, GEN_INT (0xffff));
|
512 |
|
|
emit_insn (gen_andsi3(operands[0], op1, mask));
|
513 |
|
|
DONE;
|
514 |
|
|
})
|
515 |
|
|
|
516 |
|
|
(define_insn "zero_extendsidi2"
|
517 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r")
|
518 |
|
|
(zero_extend:DI (match_operand:SI 1 "spu_reg_operand" "r")))]
|
519 |
|
|
""
|
520 |
|
|
"rotqmbyi\t%0,%1,-4"
|
521 |
|
|
[(set_attr "type" "shuf")])
|
522 |
|
|
|
523 |
|
|
(define_insn "zero_extendqiti2"
|
524 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
525 |
|
|
(zero_extend:TI (match_operand:QI 1 "spu_reg_operand" "r")))]
|
526 |
|
|
""
|
527 |
|
|
"andi\t%0,%1,0x00ff\;rotqmbyi\t%0,%0,-12"
|
528 |
|
|
[(set_attr "type" "multi0")
|
529 |
|
|
(set_attr "length" "8")])
|
530 |
|
|
|
531 |
|
|
(define_insn "zero_extendhiti2"
|
532 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
533 |
|
|
(zero_extend:TI (match_operand:HI 1 "spu_reg_operand" "r")))]
|
534 |
|
|
""
|
535 |
|
|
"shli\t%0,%1,16\;rotqmbyi\t%0,%0,-14"
|
536 |
|
|
[(set_attr "type" "multi1")
|
537 |
|
|
(set_attr "length" "8")])
|
538 |
|
|
|
539 |
|
|
(define_insn "zero_extendsiti2"
|
540 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
541 |
|
|
(zero_extend:TI (match_operand:SI 1 "spu_reg_operand" "r")))]
|
542 |
|
|
""
|
543 |
|
|
"rotqmbyi\t%0,%1,-12"
|
544 |
|
|
[(set_attr "type" "shuf")])
|
545 |
|
|
|
546 |
|
|
(define_insn "zero_extendditi2"
|
547 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
548 |
|
|
(zero_extend:TI (match_operand:DI 1 "spu_reg_operand" "r")))]
|
549 |
|
|
""
|
550 |
|
|
"rotqmbyi\t%0,%1,-8"
|
551 |
|
|
[(set_attr "type" "shuf")])
|
552 |
|
|
|
553 |
|
|
|
554 |
|
|
;; trunc
|
555 |
|
|
|
556 |
|
|
(define_insn "truncdiqi2"
|
557 |
|
|
[(set (match_operand:QI 0 "spu_reg_operand" "=r")
|
558 |
|
|
(truncate:QI (match_operand:DI 1 "spu_reg_operand" "r")))]
|
559 |
|
|
""
|
560 |
|
|
"shlqbyi\t%0,%1,4"
|
561 |
|
|
[(set_attr "type" "shuf")])
|
562 |
|
|
|
563 |
|
|
(define_insn "truncdihi2"
|
564 |
|
|
[(set (match_operand:HI 0 "spu_reg_operand" "=r")
|
565 |
|
|
(truncate:HI (match_operand:DI 1 "spu_reg_operand" "r")))]
|
566 |
|
|
""
|
567 |
|
|
"shlqbyi\t%0,%1,4"
|
568 |
|
|
[(set_attr "type" "shuf")])
|
569 |
|
|
|
570 |
|
|
(define_insn "truncdisi2"
|
571 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
572 |
|
|
(truncate:SI (match_operand:DI 1 "spu_reg_operand" "r")))]
|
573 |
|
|
""
|
574 |
|
|
"shlqbyi\t%0,%1,4"
|
575 |
|
|
[(set_attr "type" "shuf")])
|
576 |
|
|
|
577 |
|
|
(define_insn "trunctiqi2"
|
578 |
|
|
[(set (match_operand:QI 0 "spu_reg_operand" "=r")
|
579 |
|
|
(truncate:QI (match_operand:TI 1 "spu_reg_operand" "r")))]
|
580 |
|
|
""
|
581 |
|
|
"shlqbyi\t%0,%1,12"
|
582 |
|
|
[(set_attr "type" "shuf")])
|
583 |
|
|
|
584 |
|
|
(define_insn "trunctihi2"
|
585 |
|
|
[(set (match_operand:HI 0 "spu_reg_operand" "=r")
|
586 |
|
|
(truncate:HI (match_operand:TI 1 "spu_reg_operand" "r")))]
|
587 |
|
|
""
|
588 |
|
|
"shlqbyi\t%0,%1,12"
|
589 |
|
|
[(set_attr "type" "shuf")])
|
590 |
|
|
|
591 |
|
|
(define_insn "trunctisi2"
|
592 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
593 |
|
|
(truncate:SI (match_operand:TI 1 "spu_reg_operand" "r")))]
|
594 |
|
|
""
|
595 |
|
|
"shlqbyi\t%0,%1,12"
|
596 |
|
|
[(set_attr "type" "shuf")])
|
597 |
|
|
|
598 |
|
|
(define_insn "trunctidi2"
|
599 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r")
|
600 |
|
|
(truncate:DI (match_operand:TI 1 "spu_reg_operand" "r")))]
|
601 |
|
|
""
|
602 |
|
|
"shlqbyi\t%0,%1,8"
|
603 |
|
|
[(set_attr "type" "shuf")])
|
604 |
|
|
|
605 |
|
|
|
606 |
|
|
;; float conversions
|
607 |
|
|
|
608 |
|
|
(define_insn "float2"
|
609 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
610 |
|
|
(float: (match_operand:VSI 1 "spu_reg_operand" "r")))]
|
611 |
|
|
""
|
612 |
|
|
"csflt\t%0,%1,0"
|
613 |
|
|
[(set_attr "type" "fp7")])
|
614 |
|
|
|
615 |
|
|
(define_insn "fix_trunc2"
|
616 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
617 |
|
|
(fix: (match_operand:VSF 1 "spu_reg_operand" "r")))]
|
618 |
|
|
""
|
619 |
|
|
"cflts\t%0,%1,0"
|
620 |
|
|
[(set_attr "type" "fp7")])
|
621 |
|
|
|
622 |
|
|
(define_insn "floatuns2"
|
623 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
624 |
|
|
(unsigned_float: (match_operand:VSI 1 "spu_reg_operand" "r")))]
|
625 |
|
|
""
|
626 |
|
|
"cuflt\t%0,%1,0"
|
627 |
|
|
[(set_attr "type" "fp7")])
|
628 |
|
|
|
629 |
|
|
(define_insn "fixuns_trunc2"
|
630 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
631 |
|
|
(unsigned_fix: (match_operand:VSF 1 "spu_reg_operand" "r")))]
|
632 |
|
|
""
|
633 |
|
|
"cfltu\t%0,%1,0"
|
634 |
|
|
[(set_attr "type" "fp7")])
|
635 |
|
|
|
636 |
|
|
(define_insn "float2_mul"
|
637 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
638 |
|
|
(mult: (float: (match_operand:VSI 1 "spu_reg_operand" "r"))
|
639 |
|
|
(match_operand: 2 "spu_inv_exp2_operand" "w")))]
|
640 |
|
|
""
|
641 |
|
|
"csflt\t%0,%1,%w2"
|
642 |
|
|
[(set_attr "type" "fp7")])
|
643 |
|
|
|
644 |
|
|
(define_insn "float2_div"
|
645 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
646 |
|
|
(div: (float: (match_operand:VSI 1 "spu_reg_operand" "r"))
|
647 |
|
|
(match_operand: 2 "spu_exp2_operand" "v")))]
|
648 |
|
|
""
|
649 |
|
|
"csflt\t%0,%1,%v2"
|
650 |
|
|
[(set_attr "type" "fp7")])
|
651 |
|
|
|
652 |
|
|
|
653 |
|
|
(define_insn "fix_trunc2_mul"
|
654 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
655 |
|
|
(fix: (mult:VSF (match_operand:VSF 1 "spu_reg_operand" "r")
|
656 |
|
|
(match_operand:VSF 2 "spu_exp2_operand" "v"))))]
|
657 |
|
|
""
|
658 |
|
|
"cflts\t%0,%1,%v2"
|
659 |
|
|
[(set_attr "type" "fp7")])
|
660 |
|
|
|
661 |
|
|
(define_insn "floatuns2_mul"
|
662 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
663 |
|
|
(mult: (unsigned_float: (match_operand:VSI 1 "spu_reg_operand" "r"))
|
664 |
|
|
(match_operand: 2 "spu_inv_exp2_operand" "w")))]
|
665 |
|
|
""
|
666 |
|
|
"cuflt\t%0,%1,%w2"
|
667 |
|
|
[(set_attr "type" "fp7")])
|
668 |
|
|
|
669 |
|
|
(define_insn "floatuns2_div"
|
670 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
671 |
|
|
(div: (unsigned_float: (match_operand:VSI 1 "spu_reg_operand" "r"))
|
672 |
|
|
(match_operand: 2 "spu_exp2_operand" "v")))]
|
673 |
|
|
""
|
674 |
|
|
"cuflt\t%0,%1,%v2"
|
675 |
|
|
[(set_attr "type" "fp7")])
|
676 |
|
|
|
677 |
|
|
(define_insn "fixuns_trunc2_mul"
|
678 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
679 |
|
|
(unsigned_fix: (mult:VSF (match_operand:VSF 1 "spu_reg_operand" "r")
|
680 |
|
|
(match_operand:VSF 2 "spu_exp2_operand" "v"))))]
|
681 |
|
|
""
|
682 |
|
|
"cfltu\t%0,%1,%v2"
|
683 |
|
|
[(set_attr "type" "fp7")])
|
684 |
|
|
|
685 |
|
|
(define_insn "extendsfdf2"
|
686 |
|
|
[(set (match_operand:DF 0 "spu_reg_operand" "=r")
|
687 |
|
|
(unspec:DF [(match_operand:SF 1 "spu_reg_operand" "r")]
|
688 |
|
|
UNSPEC_FLOAT_EXTEND))]
|
689 |
|
|
""
|
690 |
|
|
"fesd\t%0,%1"
|
691 |
|
|
[(set_attr "type" "fpd")])
|
692 |
|
|
|
693 |
|
|
(define_insn "truncdfsf2"
|
694 |
|
|
[(set (match_operand:SF 0 "spu_reg_operand" "=r")
|
695 |
|
|
(unspec:SF [(match_operand:DF 1 "spu_reg_operand" "r")]
|
696 |
|
|
UNSPEC_FLOAT_TRUNCATE))]
|
697 |
|
|
""
|
698 |
|
|
"frds\t%0,%1"
|
699 |
|
|
[(set_attr "type" "fpd")])
|
700 |
|
|
|
701 |
|
|
(define_expand "floatdisf2"
|
702 |
|
|
[(set (match_operand:SF 0 "register_operand" "")
|
703 |
|
|
(float:SF (match_operand:DI 1 "register_operand" "")))]
|
704 |
|
|
""
|
705 |
|
|
{
|
706 |
|
|
rtx c0 = gen_reg_rtx (SImode);
|
707 |
|
|
rtx r0 = gen_reg_rtx (DImode);
|
708 |
|
|
rtx r1 = gen_reg_rtx (SFmode);
|
709 |
|
|
rtx r2 = gen_reg_rtx (SImode);
|
710 |
|
|
rtx setneg = gen_reg_rtx (SImode);
|
711 |
|
|
rtx isneg = gen_reg_rtx (SImode);
|
712 |
|
|
rtx neg = gen_reg_rtx (DImode);
|
713 |
|
|
rtx mask = gen_reg_rtx (DImode);
|
714 |
|
|
|
715 |
|
|
emit_move_insn (c0, GEN_INT (-0x80000000ll));
|
716 |
|
|
|
717 |
|
|
emit_insn (gen_negdi2 (neg, operands[1]));
|
718 |
|
|
emit_insn (gen_cgt_di_m1 (isneg, operands[1]));
|
719 |
|
|
emit_insn (gen_extend_compare (mask, isneg));
|
720 |
|
|
emit_insn (gen_selb (r0, neg, operands[1], mask));
|
721 |
|
|
emit_insn (gen_andc_si (setneg, c0, isneg));
|
722 |
|
|
|
723 |
|
|
emit_insn (gen_floatunsdisf2 (r1, r0));
|
724 |
|
|
|
725 |
|
|
emit_insn (gen_iorsi3 (r2, gen_rtx_SUBREG (SImode, r1, 0), setneg));
|
726 |
|
|
emit_move_insn (operands[0], gen_rtx_SUBREG (SFmode, r2, 0));
|
727 |
|
|
DONE;
|
728 |
|
|
})
|
729 |
|
|
|
730 |
|
|
(define_insn_and_split "floatunsdisf2"
|
731 |
|
|
[(set (match_operand:SF 0 "register_operand" "=r")
|
732 |
|
|
(unsigned_float:SF (match_operand:DI 1 "register_operand" "r")))
|
733 |
|
|
(clobber (match_scratch:SF 2 "=r"))
|
734 |
|
|
(clobber (match_scratch:SF 3 "=r"))
|
735 |
|
|
(clobber (match_scratch:SF 4 "=r"))]
|
736 |
|
|
""
|
737 |
|
|
"#"
|
738 |
|
|
"reload_completed"
|
739 |
|
|
[(set (match_dup:SF 0)
|
740 |
|
|
(unsigned_float:SF (match_dup:DI 1)))]
|
741 |
|
|
{
|
742 |
|
|
rtx op1_v4si = gen_rtx_REG (V4SImode, REGNO (operands[1]));
|
743 |
|
|
rtx op2_v4sf = gen_rtx_REG (V4SFmode, REGNO (operands[2]));
|
744 |
|
|
rtx op2_ti = gen_rtx_REG (TImode, REGNO (operands[2]));
|
745 |
|
|
rtx op3_ti = gen_rtx_REG (TImode, REGNO (operands[3]));
|
746 |
|
|
|
747 |
|
|
REAL_VALUE_TYPE scale;
|
748 |
|
|
real_2expN (&scale, 32, SFmode);
|
749 |
|
|
|
750 |
|
|
emit_insn (gen_floatunsv4siv4sf2 (op2_v4sf, op1_v4si));
|
751 |
|
|
emit_insn (gen_shlqby_ti (op3_ti, op2_ti, GEN_INT (4)));
|
752 |
|
|
|
753 |
|
|
emit_move_insn (operands[4],
|
754 |
|
|
CONST_DOUBLE_FROM_REAL_VALUE (scale, SFmode));
|
755 |
|
|
emit_insn (gen_fmasf4 (operands[0],
|
756 |
|
|
operands[2], operands[4], operands[3]));
|
757 |
|
|
DONE;
|
758 |
|
|
})
|
759 |
|
|
|
760 |
|
|
(define_expand "floattisf2"
|
761 |
|
|
[(set (match_operand:SF 0 "register_operand" "")
|
762 |
|
|
(float:SF (match_operand:TI 1 "register_operand" "")))]
|
763 |
|
|
""
|
764 |
|
|
{
|
765 |
|
|
rtx c0 = gen_reg_rtx (SImode);
|
766 |
|
|
rtx r0 = gen_reg_rtx (TImode);
|
767 |
|
|
rtx r1 = gen_reg_rtx (SFmode);
|
768 |
|
|
rtx r2 = gen_reg_rtx (SImode);
|
769 |
|
|
rtx setneg = gen_reg_rtx (SImode);
|
770 |
|
|
rtx isneg = gen_reg_rtx (SImode);
|
771 |
|
|
rtx neg = gen_reg_rtx (TImode);
|
772 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
773 |
|
|
|
774 |
|
|
emit_move_insn (c0, GEN_INT (-0x80000000ll));
|
775 |
|
|
|
776 |
|
|
emit_insn (gen_negti2 (neg, operands[1]));
|
777 |
|
|
emit_insn (gen_cgt_ti_m1 (isneg, operands[1]));
|
778 |
|
|
emit_insn (gen_extend_compare (mask, isneg));
|
779 |
|
|
emit_insn (gen_selb (r0, neg, operands[1], mask));
|
780 |
|
|
emit_insn (gen_andc_si (setneg, c0, isneg));
|
781 |
|
|
|
782 |
|
|
emit_insn (gen_floatunstisf2 (r1, r0));
|
783 |
|
|
|
784 |
|
|
emit_insn (gen_iorsi3 (r2, gen_rtx_SUBREG (SImode, r1, 0), setneg));
|
785 |
|
|
emit_move_insn (operands[0], gen_rtx_SUBREG (SFmode, r2, 0));
|
786 |
|
|
DONE;
|
787 |
|
|
})
|
788 |
|
|
|
789 |
|
|
(define_insn_and_split "floatunstisf2"
|
790 |
|
|
[(set (match_operand:SF 0 "register_operand" "=r")
|
791 |
|
|
(unsigned_float:SF (match_operand:TI 1 "register_operand" "r")))
|
792 |
|
|
(clobber (match_scratch:SF 2 "=r"))
|
793 |
|
|
(clobber (match_scratch:SF 3 "=r"))
|
794 |
|
|
(clobber (match_scratch:SF 4 "=r"))]
|
795 |
|
|
""
|
796 |
|
|
"#"
|
797 |
|
|
"reload_completed"
|
798 |
|
|
[(set (match_dup:SF 0)
|
799 |
|
|
(unsigned_float:SF (match_dup:TI 1)))]
|
800 |
|
|
{
|
801 |
|
|
rtx op1_v4si = gen_rtx_REG (V4SImode, REGNO (operands[1]));
|
802 |
|
|
rtx op2_v4sf = gen_rtx_REG (V4SFmode, REGNO (operands[2]));
|
803 |
|
|
rtx op2_ti = gen_rtx_REG (TImode, REGNO (operands[2]));
|
804 |
|
|
rtx op3_ti = gen_rtx_REG (TImode, REGNO (operands[3]));
|
805 |
|
|
|
806 |
|
|
REAL_VALUE_TYPE scale;
|
807 |
|
|
real_2expN (&scale, 32, SFmode);
|
808 |
|
|
|
809 |
|
|
emit_insn (gen_floatunsv4siv4sf2 (op2_v4sf, op1_v4si));
|
810 |
|
|
emit_insn (gen_shlqby_ti (op3_ti, op2_ti, GEN_INT (4)));
|
811 |
|
|
|
812 |
|
|
emit_move_insn (operands[4],
|
813 |
|
|
CONST_DOUBLE_FROM_REAL_VALUE (scale, SFmode));
|
814 |
|
|
emit_insn (gen_fmasf4 (operands[2],
|
815 |
|
|
operands[2], operands[4], operands[3]));
|
816 |
|
|
|
817 |
|
|
emit_insn (gen_shlqby_ti (op3_ti, op3_ti, GEN_INT (4)));
|
818 |
|
|
emit_insn (gen_fmasf4 (operands[2],
|
819 |
|
|
operands[2], operands[4], operands[3]));
|
820 |
|
|
|
821 |
|
|
emit_insn (gen_shlqby_ti (op3_ti, op3_ti, GEN_INT (4)));
|
822 |
|
|
emit_insn (gen_fmasf4 (operands[0],
|
823 |
|
|
operands[2], operands[4], operands[3]));
|
824 |
|
|
DONE;
|
825 |
|
|
})
|
826 |
|
|
|
827 |
|
|
;; Do (double)(operands[1]+0x80000000u)-(double)0x80000000
|
828 |
|
|
(define_expand "floatsidf2"
|
829 |
|
|
[(set (match_operand:DF 0 "register_operand" "")
|
830 |
|
|
(float:DF (match_operand:SI 1 "register_operand" "")))]
|
831 |
|
|
""
|
832 |
|
|
{
|
833 |
|
|
rtx c0 = gen_reg_rtx (SImode);
|
834 |
|
|
rtx c1 = gen_reg_rtx (DFmode);
|
835 |
|
|
rtx r0 = gen_reg_rtx (SImode);
|
836 |
|
|
rtx r1 = gen_reg_rtx (DFmode);
|
837 |
|
|
|
838 |
|
|
emit_move_insn (c0, GEN_INT (-0x80000000ll));
|
839 |
|
|
emit_move_insn (c1, spu_float_const ("2147483648", DFmode));
|
840 |
|
|
emit_insn (gen_xorsi3 (r0, operands[1], c0));
|
841 |
|
|
emit_insn (gen_floatunssidf2 (r1, r0));
|
842 |
|
|
emit_insn (gen_subdf3 (operands[0], r1, c1));
|
843 |
|
|
DONE;
|
844 |
|
|
})
|
845 |
|
|
|
846 |
|
|
(define_expand "floatunssidf2"
|
847 |
|
|
[(set (match_operand:DF 0 "register_operand" "=r")
|
848 |
|
|
(unsigned_float:DF (match_operand:SI 1 "register_operand" "r")))]
|
849 |
|
|
""
|
850 |
|
|
"{
|
851 |
|
|
rtx value, insns;
|
852 |
|
|
rtx c0 = spu_const_from_ints (V16QImode, 0x02031011, 0x12138080,
|
853 |
|
|
0x06071415, 0x16178080);
|
854 |
|
|
rtx r0 = gen_reg_rtx (V16QImode);
|
855 |
|
|
|
856 |
|
|
if (optimize_size)
|
857 |
|
|
{
|
858 |
|
|
start_sequence ();
|
859 |
|
|
value =
|
860 |
|
|
emit_library_call_value (convert_optab_libfunc (ufloat_optab,
|
861 |
|
|
DFmode, SImode),
|
862 |
|
|
NULL_RTX, LCT_NORMAL, DFmode, 1, operands[1], SImode);
|
863 |
|
|
insns = get_insns ();
|
864 |
|
|
end_sequence ();
|
865 |
|
|
emit_libcall_block (insns, operands[0], value,
|
866 |
|
|
gen_rtx_UNSIGNED_FLOAT (DFmode, operands[1]));
|
867 |
|
|
}
|
868 |
|
|
else
|
869 |
|
|
{
|
870 |
|
|
emit_move_insn (r0, c0);
|
871 |
|
|
emit_insn (gen_floatunssidf2_internal (operands[0], operands[1], r0));
|
872 |
|
|
}
|
873 |
|
|
DONE;
|
874 |
|
|
}")
|
875 |
|
|
|
876 |
|
|
(define_insn_and_split "floatunssidf2_internal"
|
877 |
|
|
[(set (match_operand:DF 0 "register_operand" "=r")
|
878 |
|
|
(unsigned_float:DF (match_operand:SI 1 "register_operand" "r")))
|
879 |
|
|
(use (match_operand:V16QI 2 "register_operand" "r"))
|
880 |
|
|
(clobber (match_scratch:V4SI 3 "=&r"))
|
881 |
|
|
(clobber (match_scratch:V4SI 4 "=&r"))
|
882 |
|
|
(clobber (match_scratch:V4SI 5 "=&r"))
|
883 |
|
|
(clobber (match_scratch:V4SI 6 "=&r"))]
|
884 |
|
|
""
|
885 |
|
|
"clz\t%3,%1\;il\t%6,1023+31\;shl\t%4,%1,%3\;ceqi\t%5,%3,32\;sf\t%6,%3,%6\;a\t%4,%4,%4\;andc\t%6,%6,%5\;shufb\t%6,%6,%4,%2\;shlqbii\t%0,%6,4"
|
886 |
|
|
"reload_completed"
|
887 |
|
|
[(set (match_dup:DF 0)
|
888 |
|
|
(unsigned_float:DF (match_dup:SI 1)))]
|
889 |
|
|
"{
|
890 |
|
|
rtx *ops = operands;
|
891 |
|
|
rtx op1_v4si = gen_rtx_REG(V4SImode, REGNO(ops[1]));
|
892 |
|
|
rtx op0_ti = gen_rtx_REG (TImode, REGNO (ops[0]));
|
893 |
|
|
rtx op2_ti = gen_rtx_REG (TImode, REGNO (ops[2]));
|
894 |
|
|
rtx op6_ti = gen_rtx_REG (TImode, REGNO (ops[6]));
|
895 |
|
|
emit_insn (gen_clzv4si2 (ops[3],op1_v4si));
|
896 |
|
|
emit_move_insn (ops[6], spu_const (V4SImode, 1023+31));
|
897 |
|
|
emit_insn (gen_vashlv4si3 (ops[4],op1_v4si,ops[3]));
|
898 |
|
|
emit_insn (gen_ceq_v4si (ops[5],ops[3],spu_const (V4SImode, 32)));
|
899 |
|
|
emit_insn (gen_subv4si3 (ops[6],ops[6],ops[3]));
|
900 |
|
|
emit_insn (gen_addv4si3 (ops[4],ops[4],ops[4]));
|
901 |
|
|
emit_insn (gen_andc_v4si (ops[6],ops[6],ops[5]));
|
902 |
|
|
emit_insn (gen_shufb (ops[6],ops[6],ops[4],op2_ti));
|
903 |
|
|
emit_insn (gen_shlqbi_ti (op0_ti,op6_ti,GEN_INT(4)));
|
904 |
|
|
DONE;
|
905 |
|
|
}"
|
906 |
|
|
[(set_attr "length" "32")])
|
907 |
|
|
|
908 |
|
|
(define_expand "floatdidf2"
|
909 |
|
|
[(set (match_operand:DF 0 "register_operand" "")
|
910 |
|
|
(float:DF (match_operand:DI 1 "register_operand" "")))]
|
911 |
|
|
""
|
912 |
|
|
{
|
913 |
|
|
rtx c0 = gen_reg_rtx (DImode);
|
914 |
|
|
rtx r0 = gen_reg_rtx (DImode);
|
915 |
|
|
rtx r1 = gen_reg_rtx (DFmode);
|
916 |
|
|
rtx r2 = gen_reg_rtx (DImode);
|
917 |
|
|
rtx setneg = gen_reg_rtx (DImode);
|
918 |
|
|
rtx isneg = gen_reg_rtx (SImode);
|
919 |
|
|
rtx neg = gen_reg_rtx (DImode);
|
920 |
|
|
rtx mask = gen_reg_rtx (DImode);
|
921 |
|
|
|
922 |
|
|
emit_move_insn (c0, GEN_INT (0x8000000000000000ull));
|
923 |
|
|
|
924 |
|
|
emit_insn (gen_negdi2 (neg, operands[1]));
|
925 |
|
|
emit_insn (gen_cgt_di_m1 (isneg, operands[1]));
|
926 |
|
|
emit_insn (gen_extend_compare (mask, isneg));
|
927 |
|
|
emit_insn (gen_selb (r0, neg, operands[1], mask));
|
928 |
|
|
emit_insn (gen_andc_di (setneg, c0, mask));
|
929 |
|
|
|
930 |
|
|
emit_insn (gen_floatunsdidf2 (r1, r0));
|
931 |
|
|
|
932 |
|
|
emit_insn (gen_iordi3 (r2, gen_rtx_SUBREG (DImode, r1, 0), setneg));
|
933 |
|
|
emit_move_insn (operands[0], gen_rtx_SUBREG (DFmode, r2, 0));
|
934 |
|
|
DONE;
|
935 |
|
|
})
|
936 |
|
|
|
937 |
|
|
(define_expand "floatunsdidf2"
|
938 |
|
|
[(set (match_operand:DF 0 "register_operand" "=r")
|
939 |
|
|
(unsigned_float:DF (match_operand:DI 1 "register_operand" "r")))]
|
940 |
|
|
""
|
941 |
|
|
"{
|
942 |
|
|
rtx value, insns;
|
943 |
|
|
rtx c0 = spu_const_from_ints (V16QImode, 0x02031011, 0x12138080,
|
944 |
|
|
0x06071415, 0x16178080);
|
945 |
|
|
rtx c1 = spu_const_from_ints (V4SImode, 1023+63, 1023+31, 0, 0);
|
946 |
|
|
rtx r0 = gen_reg_rtx (V16QImode);
|
947 |
|
|
rtx r1 = gen_reg_rtx (V4SImode);
|
948 |
|
|
|
949 |
|
|
if (optimize_size)
|
950 |
|
|
{
|
951 |
|
|
start_sequence ();
|
952 |
|
|
value =
|
953 |
|
|
emit_library_call_value (convert_optab_libfunc (ufloat_optab,
|
954 |
|
|
DFmode, DImode),
|
955 |
|
|
NULL_RTX, LCT_NORMAL, DFmode, 1, operands[1], DImode);
|
956 |
|
|
insns = get_insns ();
|
957 |
|
|
end_sequence ();
|
958 |
|
|
emit_libcall_block (insns, operands[0], value,
|
959 |
|
|
gen_rtx_UNSIGNED_FLOAT (DFmode, operands[1]));
|
960 |
|
|
}
|
961 |
|
|
else
|
962 |
|
|
{
|
963 |
|
|
emit_move_insn (r1, c1);
|
964 |
|
|
emit_move_insn (r0, c0);
|
965 |
|
|
emit_insn (gen_floatunsdidf2_internal (operands[0], operands[1], r0, r1));
|
966 |
|
|
}
|
967 |
|
|
DONE;
|
968 |
|
|
}")
|
969 |
|
|
|
970 |
|
|
(define_insn_and_split "floatunsdidf2_internal"
|
971 |
|
|
[(set (match_operand:DF 0 "register_operand" "=r")
|
972 |
|
|
(unsigned_float:DF (match_operand:DI 1 "register_operand" "r")))
|
973 |
|
|
(use (match_operand:V16QI 2 "register_operand" "r"))
|
974 |
|
|
(use (match_operand:V4SI 3 "register_operand" "r"))
|
975 |
|
|
(clobber (match_scratch:V4SI 4 "=&r"))
|
976 |
|
|
(clobber (match_scratch:V4SI 5 "=&r"))
|
977 |
|
|
(clobber (match_scratch:V4SI 6 "=&r"))]
|
978 |
|
|
""
|
979 |
|
|
"clz\t%4,%1\;shl\t%5,%1,%4\;ceqi\t%6,%4,32\;sf\t%4,%4,%3\;a\t%5,%5,%5\;andc\t%4,%4,%6\;shufb\t%4,%4,%5,%2\;shlqbii\t%4,%4,4\;shlqbyi\t%5,%4,8\;dfa\t%0,%4,%5"
|
980 |
|
|
"reload_completed"
|
981 |
|
|
[(set (match_operand:DF 0 "register_operand" "=r")
|
982 |
|
|
(unsigned_float:DF (match_operand:DI 1 "register_operand" "r")))]
|
983 |
|
|
"{
|
984 |
|
|
rtx *ops = operands;
|
985 |
|
|
rtx op1_v4si = gen_rtx_REG (V4SImode, REGNO(ops[1]));
|
986 |
|
|
rtx op2_ti = gen_rtx_REG (TImode, REGNO(ops[2]));
|
987 |
|
|
rtx op4_ti = gen_rtx_REG (TImode, REGNO(ops[4]));
|
988 |
|
|
rtx op5_ti = gen_rtx_REG (TImode, REGNO(ops[5]));
|
989 |
|
|
rtx op4_df = gen_rtx_REG (DFmode, REGNO(ops[4]));
|
990 |
|
|
rtx op5_df = gen_rtx_REG (DFmode, REGNO(ops[5]));
|
991 |
|
|
emit_insn (gen_clzv4si2 (ops[4],op1_v4si));
|
992 |
|
|
emit_insn (gen_vashlv4si3 (ops[5],op1_v4si,ops[4]));
|
993 |
|
|
emit_insn (gen_ceq_v4si (ops[6],ops[4],spu_const (V4SImode, 32)));
|
994 |
|
|
emit_insn (gen_subv4si3 (ops[4],ops[3],ops[4]));
|
995 |
|
|
emit_insn (gen_addv4si3 (ops[5],ops[5],ops[5]));
|
996 |
|
|
emit_insn (gen_andc_v4si (ops[4],ops[4],ops[6]));
|
997 |
|
|
emit_insn (gen_shufb (ops[4],ops[4],ops[5],op2_ti));
|
998 |
|
|
emit_insn (gen_shlqbi_ti (op4_ti,op4_ti,GEN_INT(4)));
|
999 |
|
|
emit_insn (gen_shlqby_ti (op5_ti,op4_ti,GEN_INT(8)));
|
1000 |
|
|
emit_insn (gen_adddf3 (ops[0],op4_df,op5_df));
|
1001 |
|
|
DONE;
|
1002 |
|
|
}"
|
1003 |
|
|
[(set_attr "length" "40")])
|
1004 |
|
|
|
1005 |
|
|
|
1006 |
|
|
;; add
|
1007 |
|
|
|
1008 |
|
|
(define_expand "addv16qi3"
|
1009 |
|
|
[(set (match_operand:V16QI 0 "spu_reg_operand" "=r")
|
1010 |
|
|
(plus:V16QI (match_operand:V16QI 1 "spu_reg_operand" "r")
|
1011 |
|
|
(match_operand:V16QI 2 "spu_reg_operand" "r")))]
|
1012 |
|
|
""
|
1013 |
|
|
"{
|
1014 |
|
|
rtx res_short = simplify_gen_subreg (V8HImode, operands[0], V16QImode, 0);
|
1015 |
|
|
rtx lhs_short = simplify_gen_subreg (V8HImode, operands[1], V16QImode, 0);
|
1016 |
|
|
rtx rhs_short = simplify_gen_subreg (V8HImode, operands[2], V16QImode, 0);
|
1017 |
|
|
rtx rhs_and = gen_reg_rtx (V8HImode);
|
1018 |
|
|
rtx hi_char = gen_reg_rtx (V8HImode);
|
1019 |
|
|
rtx lo_char = gen_reg_rtx (V8HImode);
|
1020 |
|
|
rtx mask = gen_reg_rtx (V8HImode);
|
1021 |
|
|
|
1022 |
|
|
emit_move_insn (mask, spu_const (V8HImode, 0x00ff));
|
1023 |
|
|
emit_insn (gen_andv8hi3 (rhs_and, rhs_short, spu_const (V8HImode, 0xff00)));
|
1024 |
|
|
emit_insn (gen_addv8hi3 (hi_char, lhs_short, rhs_and));
|
1025 |
|
|
emit_insn (gen_addv8hi3 (lo_char, lhs_short, rhs_short));
|
1026 |
|
|
emit_insn (gen_selb (res_short, hi_char, lo_char, mask));
|
1027 |
|
|
DONE;
|
1028 |
|
|
}")
|
1029 |
|
|
|
1030 |
|
|
(define_insn "add3"
|
1031 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
1032 |
|
|
(plus:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r")
|
1033 |
|
|
(match_operand:VHSI 2 "spu_arith_operand" "r,B")))]
|
1034 |
|
|
""
|
1035 |
|
|
"@
|
1036 |
|
|
a\t%0,%1,%2
|
1037 |
|
|
ai\t%0,%1,%2")
|
1038 |
|
|
|
1039 |
|
|
(define_expand "add3"
|
1040 |
|
|
[(set (match_dup:VDI 3)
|
1041 |
|
|
(unspec:VDI [(match_operand:VDI 1 "spu_reg_operand" "")
|
1042 |
|
|
(match_operand:VDI 2 "spu_reg_operand" "")] UNSPEC_CG))
|
1043 |
|
|
(set (match_dup:VDI 5)
|
1044 |
|
|
(unspec:VDI [(match_dup 3)
|
1045 |
|
|
(match_dup 3)
|
1046 |
|
|
(match_dup:TI 4)] UNSPEC_SHUFB))
|
1047 |
|
|
(set (match_operand:VDI 0 "spu_reg_operand" "")
|
1048 |
|
|
(unspec:VDI [(match_dup 1)
|
1049 |
|
|
(match_dup 2)
|
1050 |
|
|
(match_dup 5)] UNSPEC_ADDX))]
|
1051 |
|
|
""
|
1052 |
|
|
{
|
1053 |
|
|
unsigned char pat[16] = {
|
1054 |
|
|
0x04, 0x05, 0x06, 0x07,
|
1055 |
|
|
0x80, 0x80, 0x80, 0x80,
|
1056 |
|
|
0x0c, 0x0d, 0x0e, 0x0f,
|
1057 |
|
|
0x80, 0x80, 0x80, 0x80
|
1058 |
|
|
};
|
1059 |
|
|
operands[3] = gen_reg_rtx (mode);
|
1060 |
|
|
operands[4] = gen_reg_rtx (TImode);
|
1061 |
|
|
operands[5] = gen_reg_rtx (mode);
|
1062 |
|
|
emit_move_insn (operands[4], array_to_constant (TImode, pat));
|
1063 |
|
|
})
|
1064 |
|
|
|
1065 |
|
|
(define_insn "cg_"
|
1066 |
|
|
[(set (match_operand:CBOP 0 "spu_reg_operand" "=r")
|
1067 |
|
|
(unspec:CBOP [(match_operand 1 "spu_reg_operand" "r")
|
1068 |
|
|
(match_operand 2 "spu_reg_operand" "r")] UNSPEC_CG))]
|
1069 |
|
|
"operands"
|
1070 |
|
|
"cg\t%0,%1,%2")
|
1071 |
|
|
|
1072 |
|
|
(define_insn "cgx_"
|
1073 |
|
|
[(set (match_operand:CBOP 0 "spu_reg_operand" "=r")
|
1074 |
|
|
(unspec:CBOP [(match_operand 1 "spu_reg_operand" "r")
|
1075 |
|
|
(match_operand 2 "spu_reg_operand" "r")
|
1076 |
|
|
(match_operand 3 "spu_reg_operand" "0")] UNSPEC_CGX))]
|
1077 |
|
|
"operands"
|
1078 |
|
|
"cgx\t%0,%1,%2")
|
1079 |
|
|
|
1080 |
|
|
(define_insn "addx_"
|
1081 |
|
|
[(set (match_operand:CBOP 0 "spu_reg_operand" "=r")
|
1082 |
|
|
(unspec:CBOP [(match_operand 1 "spu_reg_operand" "r")
|
1083 |
|
|
(match_operand 2 "spu_reg_operand" "r")
|
1084 |
|
|
(match_operand 3 "spu_reg_operand" "0")] UNSPEC_ADDX))]
|
1085 |
|
|
"operands"
|
1086 |
|
|
"addx\t%0,%1,%2")
|
1087 |
|
|
|
1088 |
|
|
|
1089 |
|
|
;; This is not the most efficient implementation of addti3.
|
1090 |
|
|
;; We include this here because 1) the compiler needs it to be
|
1091 |
|
|
;; defined as the word size is 128-bit and 2) sometimes gcc
|
1092 |
|
|
;; substitutes an add for a constant left-shift. 2) is unlikely
|
1093 |
|
|
;; because we also give addti3 a high cost. In case gcc does
|
1094 |
|
|
;; generate TImode add, here is the code to do it.
|
1095 |
|
|
;; operand 2 is a nonmemory because the compiler requires it.
|
1096 |
|
|
(define_insn "addti3"
|
1097 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=&r")
|
1098 |
|
|
(plus:TI (match_operand:TI 1 "spu_reg_operand" "r")
|
1099 |
|
|
(match_operand:TI 2 "spu_nonmem_operand" "r")))
|
1100 |
|
|
(clobber (match_scratch:TI 3 "=&r"))]
|
1101 |
|
|
""
|
1102 |
|
|
"cg\t%3,%1,%2\n\\
|
1103 |
|
|
shlqbyi\t%3,%3,4\n\\
|
1104 |
|
|
cgx\t%3,%1,%2\n\\
|
1105 |
|
|
shlqbyi\t%3,%3,4\n\\
|
1106 |
|
|
cgx\t%3,%1,%2\n\\
|
1107 |
|
|
shlqbyi\t%0,%3,4\n\\
|
1108 |
|
|
addx\t%0,%1,%2"
|
1109 |
|
|
[(set_attr "type" "multi0")
|
1110 |
|
|
(set_attr "length" "28")])
|
1111 |
|
|
|
1112 |
|
|
(define_insn "add3"
|
1113 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
1114 |
|
|
(plus:VSF (match_operand:VSF 1 "spu_reg_operand" "r")
|
1115 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")))]
|
1116 |
|
|
""
|
1117 |
|
|
"fa\t%0,%1,%2"
|
1118 |
|
|
[(set_attr "type" "fp6")])
|
1119 |
|
|
|
1120 |
|
|
(define_insn "add3"
|
1121 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "=r")
|
1122 |
|
|
(plus:VDF (match_operand:VDF 1 "spu_reg_operand" "r")
|
1123 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")))]
|
1124 |
|
|
""
|
1125 |
|
|
"dfa\t%0,%1,%2"
|
1126 |
|
|
[(set_attr "type" "fpd")])
|
1127 |
|
|
|
1128 |
|
|
|
1129 |
|
|
;; sub
|
1130 |
|
|
|
1131 |
|
|
(define_expand "subv16qi3"
|
1132 |
|
|
[(set (match_operand:V16QI 0 "spu_reg_operand" "=r")
|
1133 |
|
|
(minus:V16QI (match_operand:V16QI 1 "spu_reg_operand" "r")
|
1134 |
|
|
(match_operand:V16QI 2 "spu_reg_operand" "r")))]
|
1135 |
|
|
""
|
1136 |
|
|
"{
|
1137 |
|
|
rtx res_short = simplify_gen_subreg (V8HImode, operands[0], V16QImode, 0);
|
1138 |
|
|
rtx lhs_short = simplify_gen_subreg (V8HImode, operands[1], V16QImode, 0);
|
1139 |
|
|
rtx rhs_short = simplify_gen_subreg (V8HImode, operands[2], V16QImode, 0);
|
1140 |
|
|
rtx rhs_and = gen_reg_rtx (V8HImode);
|
1141 |
|
|
rtx hi_char = gen_reg_rtx (V8HImode);
|
1142 |
|
|
rtx lo_char = gen_reg_rtx (V8HImode);
|
1143 |
|
|
rtx mask = gen_reg_rtx (V8HImode);
|
1144 |
|
|
|
1145 |
|
|
emit_move_insn (mask, spu_const (V8HImode, 0x00ff));
|
1146 |
|
|
emit_insn (gen_andv8hi3 (rhs_and, rhs_short, spu_const (V8HImode, 0xff00)));
|
1147 |
|
|
emit_insn (gen_subv8hi3 (hi_char, lhs_short, rhs_and));
|
1148 |
|
|
emit_insn (gen_subv8hi3 (lo_char, lhs_short, rhs_short));
|
1149 |
|
|
emit_insn (gen_selb (res_short, hi_char, lo_char, mask));
|
1150 |
|
|
DONE;
|
1151 |
|
|
}")
|
1152 |
|
|
|
1153 |
|
|
(define_insn "sub3"
|
1154 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
1155 |
|
|
(minus:VHSI (match_operand:VHSI 1 "spu_arith_operand" "r,B")
|
1156 |
|
|
(match_operand:VHSI 2 "spu_reg_operand" "r,r")))]
|
1157 |
|
|
""
|
1158 |
|
|
"@
|
1159 |
|
|
sf\t%0,%2,%1
|
1160 |
|
|
sfi\t%0,%2,%1")
|
1161 |
|
|
|
1162 |
|
|
(define_expand "sub3"
|
1163 |
|
|
[(set (match_dup:VDI 3)
|
1164 |
|
|
(unspec:VDI [(match_operand:VDI 1 "spu_reg_operand" "")
|
1165 |
|
|
(match_operand:VDI 2 "spu_reg_operand" "")] UNSPEC_BG))
|
1166 |
|
|
(set (match_dup:VDI 5)
|
1167 |
|
|
(unspec:VDI [(match_dup 3)
|
1168 |
|
|
(match_dup 3)
|
1169 |
|
|
(match_dup:TI 4)] UNSPEC_SHUFB))
|
1170 |
|
|
(set (match_operand:VDI 0 "spu_reg_operand" "")
|
1171 |
|
|
(unspec:VDI [(match_dup 1)
|
1172 |
|
|
(match_dup 2)
|
1173 |
|
|
(match_dup 5)] UNSPEC_SFX))]
|
1174 |
|
|
""
|
1175 |
|
|
{
|
1176 |
|
|
unsigned char pat[16] = {
|
1177 |
|
|
0x04, 0x05, 0x06, 0x07,
|
1178 |
|
|
0xc0, 0xc0, 0xc0, 0xc0,
|
1179 |
|
|
0x0c, 0x0d, 0x0e, 0x0f,
|
1180 |
|
|
0xc0, 0xc0, 0xc0, 0xc0
|
1181 |
|
|
};
|
1182 |
|
|
operands[3] = gen_reg_rtx (mode);
|
1183 |
|
|
operands[4] = gen_reg_rtx (TImode);
|
1184 |
|
|
operands[5] = gen_reg_rtx (mode);
|
1185 |
|
|
emit_move_insn (operands[4], array_to_constant (TImode, pat));
|
1186 |
|
|
})
|
1187 |
|
|
|
1188 |
|
|
(define_insn "bg_"
|
1189 |
|
|
[(set (match_operand:CBOP 0 "spu_reg_operand" "=r")
|
1190 |
|
|
(unspec:CBOP [(match_operand 1 "spu_reg_operand" "r")
|
1191 |
|
|
(match_operand 2 "spu_reg_operand" "r")] UNSPEC_BG))]
|
1192 |
|
|
"operands"
|
1193 |
|
|
"bg\t%0,%2,%1")
|
1194 |
|
|
|
1195 |
|
|
(define_insn "bgx_"
|
1196 |
|
|
[(set (match_operand:CBOP 0 "spu_reg_operand" "=r")
|
1197 |
|
|
(unspec:CBOP [(match_operand 1 "spu_reg_operand" "r")
|
1198 |
|
|
(match_operand 2 "spu_reg_operand" "r")
|
1199 |
|
|
(match_operand 3 "spu_reg_operand" "0")] UNSPEC_BGX))]
|
1200 |
|
|
"operands"
|
1201 |
|
|
"bgx\t%0,%2,%1")
|
1202 |
|
|
|
1203 |
|
|
(define_insn "sfx_"
|
1204 |
|
|
[(set (match_operand:CBOP 0 "spu_reg_operand" "=r")
|
1205 |
|
|
(unspec:CBOP [(match_operand 1 "spu_reg_operand" "r")
|
1206 |
|
|
(match_operand 2 "spu_reg_operand" "r")
|
1207 |
|
|
(match_operand 3 "spu_reg_operand" "0")] UNSPEC_SFX))]
|
1208 |
|
|
"operands"
|
1209 |
|
|
"sfx\t%0,%2,%1")
|
1210 |
|
|
|
1211 |
|
|
(define_insn "subti3"
|
1212 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
1213 |
|
|
(minus:TI (match_operand:TI 1 "spu_reg_operand" "r")
|
1214 |
|
|
(match_operand:TI 2 "spu_reg_operand" "r")))
|
1215 |
|
|
(clobber (match_scratch:TI 3 "=&r"))
|
1216 |
|
|
(clobber (match_scratch:TI 4 "=&r"))
|
1217 |
|
|
(clobber (match_scratch:TI 5 "=&r"))
|
1218 |
|
|
(clobber (match_scratch:TI 6 "=&r"))]
|
1219 |
|
|
""
|
1220 |
|
|
"il\t%6,1\n\\
|
1221 |
|
|
bg\t%3,%2,%1\n\\
|
1222 |
|
|
xor\t%3,%3,%6\n\\
|
1223 |
|
|
sf\t%4,%2,%1\n\\
|
1224 |
|
|
shlqbyi\t%5,%3,4\n\\
|
1225 |
|
|
bg\t%3,%5,%4\n\\
|
1226 |
|
|
xor\t%3,%3,%6\n\\
|
1227 |
|
|
sf\t%4,%5,%4\n\\
|
1228 |
|
|
shlqbyi\t%5,%3,4\n\\
|
1229 |
|
|
bg\t%3,%5,%4\n\\
|
1230 |
|
|
xor\t%3,%3,%6\n\\
|
1231 |
|
|
sf\t%4,%5,%4\n\\
|
1232 |
|
|
shlqbyi\t%5,%3,4\n\\
|
1233 |
|
|
sf\t%0,%5,%4"
|
1234 |
|
|
[(set_attr "type" "multi0")
|
1235 |
|
|
(set_attr "length" "56")])
|
1236 |
|
|
|
1237 |
|
|
(define_insn "sub3"
|
1238 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
1239 |
|
|
(minus:VSF (match_operand:VSF 1 "spu_reg_operand" "r")
|
1240 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")))]
|
1241 |
|
|
""
|
1242 |
|
|
"fs\t%0,%1,%2"
|
1243 |
|
|
[(set_attr "type" "fp6")])
|
1244 |
|
|
|
1245 |
|
|
(define_insn "sub3"
|
1246 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "=r")
|
1247 |
|
|
(minus:VDF (match_operand:VDF 1 "spu_reg_operand" "r")
|
1248 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")))]
|
1249 |
|
|
""
|
1250 |
|
|
"dfs\t%0,%1,%2"
|
1251 |
|
|
[(set_attr "type" "fpd")])
|
1252 |
|
|
|
1253 |
|
|
|
1254 |
|
|
;; neg
|
1255 |
|
|
|
1256 |
|
|
(define_expand "negv16qi2"
|
1257 |
|
|
[(set (match_operand:V16QI 0 "spu_reg_operand" "=r")
|
1258 |
|
|
(neg:V16QI (match_operand:V16QI 1 "spu_reg_operand" "r")))]
|
1259 |
|
|
""
|
1260 |
|
|
"{
|
1261 |
|
|
rtx zero = gen_reg_rtx (V16QImode);
|
1262 |
|
|
emit_move_insn (zero, CONST0_RTX (V16QImode));
|
1263 |
|
|
emit_insn (gen_subv16qi3 (operands[0], zero, operands[1]));
|
1264 |
|
|
DONE;
|
1265 |
|
|
}")
|
1266 |
|
|
|
1267 |
|
|
(define_insn "neg2"
|
1268 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r")
|
1269 |
|
|
(neg:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r")))]
|
1270 |
|
|
""
|
1271 |
|
|
"sfi\t%0,%1,0")
|
1272 |
|
|
|
1273 |
|
|
(define_expand "negdi2"
|
1274 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "")
|
1275 |
|
|
(neg:DI (match_operand:DI 1 "spu_reg_operand" "")))]
|
1276 |
|
|
""
|
1277 |
|
|
{
|
1278 |
|
|
rtx zero = gen_reg_rtx(DImode);
|
1279 |
|
|
emit_move_insn(zero, GEN_INT(0));
|
1280 |
|
|
emit_insn (gen_subdi3(operands[0], zero, operands[1]));
|
1281 |
|
|
DONE;
|
1282 |
|
|
})
|
1283 |
|
|
|
1284 |
|
|
(define_expand "negti2"
|
1285 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "")
|
1286 |
|
|
(neg:TI (match_operand:TI 1 "spu_reg_operand" "")))]
|
1287 |
|
|
""
|
1288 |
|
|
{
|
1289 |
|
|
rtx zero = gen_reg_rtx(TImode);
|
1290 |
|
|
emit_move_insn(zero, GEN_INT(0));
|
1291 |
|
|
emit_insn (gen_subti3(operands[0], zero, operands[1]));
|
1292 |
|
|
DONE;
|
1293 |
|
|
})
|
1294 |
|
|
|
1295 |
|
|
(define_expand "neg2"
|
1296 |
|
|
[(parallel
|
1297 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "")
|
1298 |
|
|
(neg:VSF (match_operand:VSF 1 "spu_reg_operand" "")))
|
1299 |
|
|
(use (match_dup 2))])]
|
1300 |
|
|
""
|
1301 |
|
|
"operands[2] = gen_reg_rtx (mode);
|
1302 |
|
|
emit_move_insn (operands[2], spu_const (mode, -0x80000000ull));")
|
1303 |
|
|
|
1304 |
|
|
(define_expand "neg2"
|
1305 |
|
|
[(parallel
|
1306 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "")
|
1307 |
|
|
(neg:VDF (match_operand:VDF 1 "spu_reg_operand" "")))
|
1308 |
|
|
(use (match_dup 2))])]
|
1309 |
|
|
""
|
1310 |
|
|
"operands[2] = gen_reg_rtx (mode);
|
1311 |
|
|
emit_move_insn (operands[2], spu_const (mode, -0x8000000000000000ull));")
|
1312 |
|
|
|
1313 |
|
|
(define_insn_and_split "_neg2"
|
1314 |
|
|
[(set (match_operand:VSDF 0 "spu_reg_operand" "=r")
|
1315 |
|
|
(neg:VSDF (match_operand:VSDF 1 "spu_reg_operand" "r")))
|
1316 |
|
|
(use (match_operand: 2 "spu_reg_operand" "r"))]
|
1317 |
|
|
""
|
1318 |
|
|
"#"
|
1319 |
|
|
""
|
1320 |
|
|
[(set (match_dup: 3)
|
1321 |
|
|
(xor: (match_dup: 4)
|
1322 |
|
|
(match_dup: 2)))]
|
1323 |
|
|
{
|
1324 |
|
|
operands[3] = spu_gen_subreg (mode, operands[0]);
|
1325 |
|
|
operands[4] = spu_gen_subreg (mode, operands[1]);
|
1326 |
|
|
})
|
1327 |
|
|
|
1328 |
|
|
|
1329 |
|
|
;; abs
|
1330 |
|
|
|
1331 |
|
|
(define_expand "abs2"
|
1332 |
|
|
[(parallel
|
1333 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "")
|
1334 |
|
|
(abs:VSF (match_operand:VSF 1 "spu_reg_operand" "")))
|
1335 |
|
|
(use (match_dup 2))])]
|
1336 |
|
|
""
|
1337 |
|
|
"operands[2] = gen_reg_rtx (mode);
|
1338 |
|
|
emit_move_insn (operands[2], spu_const (mode, 0x7fffffffull));")
|
1339 |
|
|
|
1340 |
|
|
(define_expand "abs2"
|
1341 |
|
|
[(parallel
|
1342 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "")
|
1343 |
|
|
(abs:VDF (match_operand:VDF 1 "spu_reg_operand" "")))
|
1344 |
|
|
(use (match_dup 2))])]
|
1345 |
|
|
""
|
1346 |
|
|
"operands[2] = gen_reg_rtx (mode);
|
1347 |
|
|
emit_move_insn (operands[2], spu_const (mode, 0x7fffffffffffffffull));")
|
1348 |
|
|
|
1349 |
|
|
(define_insn_and_split "_abs2"
|
1350 |
|
|
[(set (match_operand:VSDF 0 "spu_reg_operand" "=r")
|
1351 |
|
|
(abs:VSDF (match_operand:VSDF 1 "spu_reg_operand" "r")))
|
1352 |
|
|
(use (match_operand: 2 "spu_reg_operand" "r"))]
|
1353 |
|
|
""
|
1354 |
|
|
"#"
|
1355 |
|
|
""
|
1356 |
|
|
[(set (match_dup: 3)
|
1357 |
|
|
(and: (match_dup: 4)
|
1358 |
|
|
(match_dup: 2)))]
|
1359 |
|
|
{
|
1360 |
|
|
operands[3] = spu_gen_subreg (mode, operands[0]);
|
1361 |
|
|
operands[4] = spu_gen_subreg (mode, operands[1]);
|
1362 |
|
|
})
|
1363 |
|
|
|
1364 |
|
|
|
1365 |
|
|
;; mul
|
1366 |
|
|
|
1367 |
|
|
(define_insn "mulhi3"
|
1368 |
|
|
[(set (match_operand:HI 0 "spu_reg_operand" "=r,r")
|
1369 |
|
|
(mult:HI (match_operand:HI 1 "spu_reg_operand" "r,r")
|
1370 |
|
|
(match_operand:HI 2 "spu_arith_operand" "r,B")))]
|
1371 |
|
|
""
|
1372 |
|
|
"@
|
1373 |
|
|
mpy\t%0,%1,%2
|
1374 |
|
|
mpyi\t%0,%1,%2"
|
1375 |
|
|
[(set_attr "type" "fp7")])
|
1376 |
|
|
|
1377 |
|
|
(define_expand "mulv8hi3"
|
1378 |
|
|
[(set (match_operand:V8HI 0 "spu_reg_operand" "")
|
1379 |
|
|
(mult:V8HI (match_operand:V8HI 1 "spu_reg_operand" "")
|
1380 |
|
|
(match_operand:V8HI 2 "spu_reg_operand" "")))]
|
1381 |
|
|
""
|
1382 |
|
|
"{
|
1383 |
|
|
rtx result = simplify_gen_subreg (V4SImode, operands[0], V8HImode, 0);
|
1384 |
|
|
rtx low = gen_reg_rtx (V4SImode);
|
1385 |
|
|
rtx high = gen_reg_rtx (V4SImode);
|
1386 |
|
|
rtx shift = gen_reg_rtx (V4SImode);
|
1387 |
|
|
rtx mask = gen_reg_rtx (V4SImode);
|
1388 |
|
|
|
1389 |
|
|
emit_move_insn (mask, spu_const (V4SImode, 0x0000ffff));
|
1390 |
|
|
emit_insn (gen_spu_mpyhh (high, operands[1], operands[2]));
|
1391 |
|
|
emit_insn (gen_spu_mpy (low, operands[1], operands[2]));
|
1392 |
|
|
emit_insn (gen_vashlv4si3 (shift, high, spu_const(V4SImode, 16)));
|
1393 |
|
|
emit_insn (gen_selb (result, shift, low, mask));
|
1394 |
|
|
DONE;
|
1395 |
|
|
}")
|
1396 |
|
|
|
1397 |
|
|
(define_expand "mul3"
|
1398 |
|
|
[(parallel
|
1399 |
|
|
[(set (match_operand:VSI 0 "spu_reg_operand" "")
|
1400 |
|
|
(mult:VSI (match_operand:VSI 1 "spu_reg_operand" "")
|
1401 |
|
|
(match_operand:VSI 2 "spu_reg_operand" "")))
|
1402 |
|
|
(clobber (match_dup:VSI 3))
|
1403 |
|
|
(clobber (match_dup:VSI 4))
|
1404 |
|
|
(clobber (match_dup:VSI 5))
|
1405 |
|
|
(clobber (match_dup:VSI 6))])]
|
1406 |
|
|
""
|
1407 |
|
|
{
|
1408 |
|
|
operands[3] = gen_reg_rtx(mode);
|
1409 |
|
|
operands[4] = gen_reg_rtx(mode);
|
1410 |
|
|
operands[5] = gen_reg_rtx(mode);
|
1411 |
|
|
operands[6] = gen_reg_rtx(mode);
|
1412 |
|
|
})
|
1413 |
|
|
|
1414 |
|
|
(define_insn_and_split "_mulsi3"
|
1415 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1416 |
|
|
(mult:SI (match_operand:SI 1 "spu_reg_operand" "r")
|
1417 |
|
|
(match_operand:SI 2 "spu_arith_operand" "rK")))
|
1418 |
|
|
(clobber (match_operand:SI 3 "spu_reg_operand" "=&r"))
|
1419 |
|
|
(clobber (match_operand:SI 4 "spu_reg_operand" "=&r"))
|
1420 |
|
|
(clobber (match_operand:SI 5 "spu_reg_operand" "=&r"))
|
1421 |
|
|
(clobber (match_operand:SI 6 "spu_reg_operand" "=&r"))]
|
1422 |
|
|
""
|
1423 |
|
|
"#"
|
1424 |
|
|
""
|
1425 |
|
|
[(set (match_dup:SI 0)
|
1426 |
|
|
(mult:SI (match_dup:SI 1)
|
1427 |
|
|
(match_dup:SI 2)))]
|
1428 |
|
|
{
|
1429 |
|
|
HOST_WIDE_INT val = 0;
|
1430 |
|
|
rtx a = operands[3];
|
1431 |
|
|
rtx b = operands[4];
|
1432 |
|
|
rtx c = operands[5];
|
1433 |
|
|
rtx d = operands[6];
|
1434 |
|
|
if (GET_CODE(operands[2]) == CONST_INT)
|
1435 |
|
|
{
|
1436 |
|
|
val = INTVAL(operands[2]);
|
1437 |
|
|
emit_move_insn(d, operands[2]);
|
1438 |
|
|
operands[2] = d;
|
1439 |
|
|
}
|
1440 |
|
|
if (val && (val & 0xffff) == 0)
|
1441 |
|
|
{
|
1442 |
|
|
emit_insn (gen_mpyh_si(operands[0], operands[2], operands[1]));
|
1443 |
|
|
}
|
1444 |
|
|
else if (val > 0 && val < 0x10000)
|
1445 |
|
|
{
|
1446 |
|
|
rtx cst = satisfies_constraint_K (GEN_INT (val)) ? GEN_INT(val) : d;
|
1447 |
|
|
emit_insn (gen_mpyh_si(a, operands[1], operands[2]));
|
1448 |
|
|
emit_insn (gen_mpyu_si(c, operands[1], cst));
|
1449 |
|
|
emit_insn (gen_addsi3(operands[0], a, c));
|
1450 |
|
|
}
|
1451 |
|
|
else
|
1452 |
|
|
{
|
1453 |
|
|
emit_insn (gen_mpyh_si(a, operands[1], operands[2]));
|
1454 |
|
|
emit_insn (gen_mpyh_si(b, operands[2], operands[1]));
|
1455 |
|
|
emit_insn (gen_mpyu_si(c, operands[1], operands[2]));
|
1456 |
|
|
emit_insn (gen_addsi3(d, a, b));
|
1457 |
|
|
emit_insn (gen_addsi3(operands[0], d, c));
|
1458 |
|
|
}
|
1459 |
|
|
DONE;
|
1460 |
|
|
})
|
1461 |
|
|
|
1462 |
|
|
(define_insn_and_split "_mulv4si3"
|
1463 |
|
|
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
|
1464 |
|
|
(mult:V4SI (match_operand:V4SI 1 "spu_reg_operand" "r")
|
1465 |
|
|
(match_operand:V4SI 2 "spu_reg_operand" "r")))
|
1466 |
|
|
(clobber (match_operand:V4SI 3 "spu_reg_operand" "=&r"))
|
1467 |
|
|
(clobber (match_operand:V4SI 4 "spu_reg_operand" "=&r"))
|
1468 |
|
|
(clobber (match_operand:V4SI 5 "spu_reg_operand" "=&r"))
|
1469 |
|
|
(clobber (match_operand:V4SI 6 "spu_reg_operand" "=&r"))]
|
1470 |
|
|
""
|
1471 |
|
|
"#"
|
1472 |
|
|
""
|
1473 |
|
|
[(set (match_dup:V4SI 0)
|
1474 |
|
|
(mult:V4SI (match_dup:V4SI 1)
|
1475 |
|
|
(match_dup:V4SI 2)))]
|
1476 |
|
|
{
|
1477 |
|
|
rtx a = operands[3];
|
1478 |
|
|
rtx b = operands[4];
|
1479 |
|
|
rtx c = operands[5];
|
1480 |
|
|
rtx d = operands[6];
|
1481 |
|
|
rtx op1 = simplify_gen_subreg (V8HImode, operands[1], V4SImode, 0);
|
1482 |
|
|
rtx op2 = simplify_gen_subreg (V8HImode, operands[2], V4SImode, 0);
|
1483 |
|
|
emit_insn (gen_spu_mpyh(a, op1, op2));
|
1484 |
|
|
emit_insn (gen_spu_mpyh(b, op2, op1));
|
1485 |
|
|
emit_insn (gen_spu_mpyu(c, op1, op2));
|
1486 |
|
|
emit_insn (gen_addv4si3(d, a, b));
|
1487 |
|
|
emit_insn (gen_addv4si3(operands[0], d, c));
|
1488 |
|
|
DONE;
|
1489 |
|
|
})
|
1490 |
|
|
|
1491 |
|
|
(define_insn "mulhisi3"
|
1492 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1493 |
|
|
(mult:SI (sign_extend:SI (match_operand:HI 1 "spu_reg_operand" "r"))
|
1494 |
|
|
(sign_extend:SI (match_operand:HI 2 "spu_reg_operand" "r"))))]
|
1495 |
|
|
""
|
1496 |
|
|
"mpy\t%0,%1,%2"
|
1497 |
|
|
[(set_attr "type" "fp7")])
|
1498 |
|
|
|
1499 |
|
|
(define_insn "mulhisi3_imm"
|
1500 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1501 |
|
|
(mult:SI (sign_extend:SI (match_operand:HI 1 "spu_reg_operand" "r"))
|
1502 |
|
|
(match_operand:SI 2 "imm_K_operand" "K")))]
|
1503 |
|
|
""
|
1504 |
|
|
"mpyi\t%0,%1,%2"
|
1505 |
|
|
[(set_attr "type" "fp7")])
|
1506 |
|
|
|
1507 |
|
|
(define_insn "umulhisi3"
|
1508 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1509 |
|
|
(mult:SI (zero_extend:SI (match_operand:HI 1 "spu_reg_operand" "r"))
|
1510 |
|
|
(zero_extend:SI (match_operand:HI 2 "spu_reg_operand" "r"))))]
|
1511 |
|
|
""
|
1512 |
|
|
"mpyu\t%0,%1,%2"
|
1513 |
|
|
[(set_attr "type" "fp7")])
|
1514 |
|
|
|
1515 |
|
|
(define_insn "umulhisi3_imm"
|
1516 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1517 |
|
|
(mult:SI (zero_extend:SI (match_operand:HI 1 "spu_reg_operand" "r"))
|
1518 |
|
|
(and:SI (match_operand:SI 2 "imm_K_operand" "K") (const_int 65535))))]
|
1519 |
|
|
""
|
1520 |
|
|
"mpyui\t%0,%1,%2"
|
1521 |
|
|
[(set_attr "type" "fp7")])
|
1522 |
|
|
|
1523 |
|
|
(define_insn "mpyu_si"
|
1524 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r,r")
|
1525 |
|
|
(mult:SI (and:SI (match_operand:SI 1 "spu_reg_operand" "r,r")
|
1526 |
|
|
(const_int 65535))
|
1527 |
|
|
(and:SI (match_operand:SI 2 "spu_arith_operand" "r,K")
|
1528 |
|
|
(const_int 65535))))]
|
1529 |
|
|
""
|
1530 |
|
|
"@
|
1531 |
|
|
mpyu\t%0,%1,%2
|
1532 |
|
|
mpyui\t%0,%1,%2"
|
1533 |
|
|
[(set_attr "type" "fp7")])
|
1534 |
|
|
|
1535 |
|
|
;; This isn't always profitable to use. Consider r = a * b + c * d.
|
1536 |
|
|
;; It's faster to do the multiplies in parallel then add them. If we
|
1537 |
|
|
;; merge a multiply and add it prevents the multiplies from happening in
|
1538 |
|
|
;; parallel.
|
1539 |
|
|
(define_insn "mpya_si"
|
1540 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1541 |
|
|
(plus:SI (mult:SI (sign_extend:SI (match_operand:HI 1 "spu_reg_operand" "r"))
|
1542 |
|
|
(sign_extend:SI (match_operand:HI 2 "spu_reg_operand" "r")))
|
1543 |
|
|
(match_operand:SI 3 "spu_reg_operand" "r")))]
|
1544 |
|
|
"0"
|
1545 |
|
|
"mpya\t%0,%1,%2,%3"
|
1546 |
|
|
[(set_attr "type" "fp7")])
|
1547 |
|
|
|
1548 |
|
|
(define_insn "mpyh_si"
|
1549 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1550 |
|
|
(mult:SI (and:SI (match_operand:SI 1 "spu_reg_operand" "r")
|
1551 |
|
|
(const_int -65536))
|
1552 |
|
|
(and:SI (match_operand:SI 2 "spu_reg_operand" "r")
|
1553 |
|
|
(const_int 65535))))]
|
1554 |
|
|
""
|
1555 |
|
|
"mpyh\t%0,%1,%2"
|
1556 |
|
|
[(set_attr "type" "fp7")])
|
1557 |
|
|
|
1558 |
|
|
(define_insn "mpys_si"
|
1559 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1560 |
|
|
(ashiftrt:SI
|
1561 |
|
|
(mult:SI (sign_extend:SI (match_operand:HI 1 "spu_reg_operand" "r"))
|
1562 |
|
|
(sign_extend:SI (match_operand:HI 2 "spu_reg_operand" "r")))
|
1563 |
|
|
(const_int 16)))]
|
1564 |
|
|
""
|
1565 |
|
|
"mpys\t%0,%1,%2"
|
1566 |
|
|
[(set_attr "type" "fp7")])
|
1567 |
|
|
|
1568 |
|
|
(define_insn "mpyhh_si"
|
1569 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1570 |
|
|
(mult:SI (ashiftrt:SI (match_operand:SI 1 "spu_reg_operand" "r")
|
1571 |
|
|
(const_int 16))
|
1572 |
|
|
(ashiftrt:SI (match_operand:SI 2 "spu_reg_operand" "r")
|
1573 |
|
|
(const_int 16))))]
|
1574 |
|
|
""
|
1575 |
|
|
"mpyhh\t%0,%1,%2"
|
1576 |
|
|
[(set_attr "type" "fp7")])
|
1577 |
|
|
|
1578 |
|
|
(define_insn "mpyhhu_si"
|
1579 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1580 |
|
|
(mult:SI (lshiftrt:SI (match_operand:SI 1 "spu_reg_operand" "r")
|
1581 |
|
|
(const_int 16))
|
1582 |
|
|
(lshiftrt:SI (match_operand:SI 2 "spu_reg_operand" "r")
|
1583 |
|
|
(const_int 16))))]
|
1584 |
|
|
""
|
1585 |
|
|
"mpyhhu\t%0,%1,%2"
|
1586 |
|
|
[(set_attr "type" "fp7")])
|
1587 |
|
|
|
1588 |
|
|
(define_insn "mpyhha_si"
|
1589 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
1590 |
|
|
(plus:SI (mult:SI (ashiftrt:SI (match_operand:SI 1 "spu_reg_operand" "r")
|
1591 |
|
|
(const_int 16))
|
1592 |
|
|
(ashiftrt:SI (match_operand:SI 2 "spu_reg_operand" "r")
|
1593 |
|
|
(const_int 16)))
|
1594 |
|
|
(match_operand:SI 3 "spu_reg_operand" "0")))]
|
1595 |
|
|
"0"
|
1596 |
|
|
"mpyhha\t%0,%1,%2"
|
1597 |
|
|
[(set_attr "type" "fp7")])
|
1598 |
|
|
|
1599 |
|
|
(define_insn "mul3"
|
1600 |
|
|
[(set (match_operand:VSDF 0 "spu_reg_operand" "=r")
|
1601 |
|
|
(mult:VSDF (match_operand:VSDF 1 "spu_reg_operand" "r")
|
1602 |
|
|
(match_operand:VSDF 2 "spu_reg_operand" "r")))]
|
1603 |
|
|
""
|
1604 |
|
|
"fm\t%0,%1,%2"
|
1605 |
|
|
[(set_attr "type" "fp")])
|
1606 |
|
|
|
1607 |
|
|
(define_insn "fma4"
|
1608 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
1609 |
|
|
(fma:VSF (match_operand:VSF 1 "spu_reg_operand" "r")
|
1610 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")
|
1611 |
|
|
(match_operand:VSF 3 "spu_reg_operand" "r")))]
|
1612 |
|
|
""
|
1613 |
|
|
"fma\t%0,%1,%2,%3"
|
1614 |
|
|
[(set_attr "type" "fp6")])
|
1615 |
|
|
|
1616 |
|
|
;; ??? The official description is (c - a*b), which is exactly (-a*b + c).
|
1617 |
|
|
;; Note that this doesn't match the dfnms description. Incorrect?
|
1618 |
|
|
(define_insn "fnma4"
|
1619 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
1620 |
|
|
(fma:VSF
|
1621 |
|
|
(neg:VSF (match_operand:VSF 1 "spu_reg_operand" "r"))
|
1622 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")
|
1623 |
|
|
(match_operand:VSF 3 "spu_reg_operand" "r")))]
|
1624 |
|
|
""
|
1625 |
|
|
"fnms\t%0,%1,%2,%3"
|
1626 |
|
|
[(set_attr "type" "fp6")])
|
1627 |
|
|
|
1628 |
|
|
(define_insn "fms4"
|
1629 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
1630 |
|
|
(fma:VSF
|
1631 |
|
|
(match_operand:VSF 1 "spu_reg_operand" "r")
|
1632 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")
|
1633 |
|
|
(neg:VSF (match_operand:VSF 3 "spu_reg_operand" "r"))))]
|
1634 |
|
|
""
|
1635 |
|
|
"fms\t%0,%1,%2,%3"
|
1636 |
|
|
[(set_attr "type" "fp6")])
|
1637 |
|
|
|
1638 |
|
|
(define_insn "fma4"
|
1639 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "=r")
|
1640 |
|
|
(fma:VDF (match_operand:VDF 1 "spu_reg_operand" "r")
|
1641 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")
|
1642 |
|
|
(match_operand:VDF 3 "spu_reg_operand" "0")))]
|
1643 |
|
|
""
|
1644 |
|
|
"dfma\t%0,%1,%2"
|
1645 |
|
|
[(set_attr "type" "fpd")])
|
1646 |
|
|
|
1647 |
|
|
(define_insn "fms4"
|
1648 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "=r")
|
1649 |
|
|
(fma:VDF
|
1650 |
|
|
(match_operand:VDF 1 "spu_reg_operand" "r")
|
1651 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")
|
1652 |
|
|
(neg:VDF (match_operand:VDF 3 "spu_reg_operand" "0"))))]
|
1653 |
|
|
""
|
1654 |
|
|
"dfms\t%0,%1,%2"
|
1655 |
|
|
[(set_attr "type" "fpd")])
|
1656 |
|
|
|
1657 |
|
|
(define_insn "nfma4"
|
1658 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "=r")
|
1659 |
|
|
(neg:VDF
|
1660 |
|
|
(fma:VDF (match_operand:VDF 1 "spu_reg_operand" "r")
|
1661 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")
|
1662 |
|
|
(match_operand:VDF 3 "spu_reg_operand" "0"))))]
|
1663 |
|
|
""
|
1664 |
|
|
"dfnma\t%0,%1,%2"
|
1665 |
|
|
[(set_attr "type" "fpd")])
|
1666 |
|
|
|
1667 |
|
|
(define_insn "nfms4"
|
1668 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "=r")
|
1669 |
|
|
(neg:VDF
|
1670 |
|
|
(fma:VDF
|
1671 |
|
|
(match_operand:VDF 1 "spu_reg_operand" "r")
|
1672 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")
|
1673 |
|
|
(neg:VDF (match_operand:VDF 3 "spu_reg_operand" "0")))))]
|
1674 |
|
|
""
|
1675 |
|
|
"dfnms\t%0,%1,%2"
|
1676 |
|
|
[(set_attr "type" "fpd")])
|
1677 |
|
|
|
1678 |
|
|
;; If signed zeros are ignored, -(a * b - c) = -a * b + c.
|
1679 |
|
|
(define_expand "fnma4"
|
1680 |
|
|
[(set (match_operand:VDF 0 "spu_reg_operand" "")
|
1681 |
|
|
(neg:VDF
|
1682 |
|
|
(fma:VDF
|
1683 |
|
|
(match_operand:VDF 1 "spu_reg_operand" "")
|
1684 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "")
|
1685 |
|
|
(neg:VDF (match_operand:VDF 3 "spu_reg_operand" "")))))]
|
1686 |
|
|
"!HONOR_SIGNED_ZEROS (mode)"
|
1687 |
|
|
"")
|
1688 |
|
|
|
1689 |
|
|
;; If signed zeros are ignored, -(a * b + c) = -a * b - c.
|
1690 |
|
|
(define_expand "fnms4"
|
1691 |
|
|
[(set (match_operand:VDF 0 "register_operand" "")
|
1692 |
|
|
(neg:VDF
|
1693 |
|
|
(fma:VDF
|
1694 |
|
|
(match_operand:VDF 1 "register_operand" "")
|
1695 |
|
|
(match_operand:VDF 2 "register_operand" "")
|
1696 |
|
|
(match_operand:VDF 3 "register_operand" ""))))]
|
1697 |
|
|
"!HONOR_SIGNED_ZEROS (mode)"
|
1698 |
|
|
"")
|
1699 |
|
|
|
1700 |
|
|
;; mul highpart, used for divide by constant optimizations.
|
1701 |
|
|
|
1702 |
|
|
(define_expand "smulsi3_highpart"
|
1703 |
|
|
[(set (match_operand:SI 0 "register_operand" "")
|
1704 |
|
|
(truncate:SI
|
1705 |
|
|
(ashiftrt:DI
|
1706 |
|
|
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
|
1707 |
|
|
(sign_extend:DI (match_operand:SI 2 "register_operand" "")))
|
1708 |
|
|
(const_int 32))))]
|
1709 |
|
|
""
|
1710 |
|
|
{
|
1711 |
|
|
rtx t0 = gen_reg_rtx (SImode);
|
1712 |
|
|
rtx t1 = gen_reg_rtx (SImode);
|
1713 |
|
|
rtx t2 = gen_reg_rtx (SImode);
|
1714 |
|
|
rtx t3 = gen_reg_rtx (SImode);
|
1715 |
|
|
rtx t4 = gen_reg_rtx (SImode);
|
1716 |
|
|
rtx t5 = gen_reg_rtx (SImode);
|
1717 |
|
|
rtx t6 = gen_reg_rtx (SImode);
|
1718 |
|
|
rtx t7 = gen_reg_rtx (SImode);
|
1719 |
|
|
rtx t8 = gen_reg_rtx (SImode);
|
1720 |
|
|
rtx t9 = gen_reg_rtx (SImode);
|
1721 |
|
|
rtx t11 = gen_reg_rtx (SImode);
|
1722 |
|
|
rtx t12 = gen_reg_rtx (SImode);
|
1723 |
|
|
rtx t14 = gen_reg_rtx (SImode);
|
1724 |
|
|
rtx t15 = gen_reg_rtx (HImode);
|
1725 |
|
|
rtx t16 = gen_reg_rtx (HImode);
|
1726 |
|
|
rtx t17 = gen_reg_rtx (HImode);
|
1727 |
|
|
rtx t18 = gen_reg_rtx (HImode);
|
1728 |
|
|
rtx t19 = gen_reg_rtx (SImode);
|
1729 |
|
|
rtx t20 = gen_reg_rtx (SImode);
|
1730 |
|
|
rtx t21 = gen_reg_rtx (SImode);
|
1731 |
|
|
rtx op1_hi = gen_rtx_SUBREG (HImode, operands[1], 2);
|
1732 |
|
|
rtx op2_hi = gen_rtx_SUBREG (HImode, operands[2], 2);
|
1733 |
|
|
rtx t0_hi = gen_rtx_SUBREG (HImode, t0, 2);
|
1734 |
|
|
rtx t1_hi = gen_rtx_SUBREG (HImode, t1, 2);
|
1735 |
|
|
|
1736 |
|
|
rtx insn = emit_insn (gen_lshrsi3 (t0, operands[1], GEN_INT (16)));
|
1737 |
|
|
emit_insn (gen_lshrsi3 (t1, operands[2], GEN_INT (16)));
|
1738 |
|
|
emit_insn (gen_umulhisi3 (t2, op1_hi, op2_hi));
|
1739 |
|
|
emit_insn (gen_mpyh_si (t3, operands[1], operands[2]));
|
1740 |
|
|
emit_insn (gen_mpyh_si (t4, operands[2], operands[1]));
|
1741 |
|
|
emit_insn (gen_mpyhh_si (t5, operands[1], operands[2]));
|
1742 |
|
|
emit_insn (gen_mpys_si (t6, t0_hi, op2_hi));
|
1743 |
|
|
emit_insn (gen_mpys_si (t7, t1_hi, op1_hi));
|
1744 |
|
|
|
1745 |
|
|
/* Gen carry bits (in t9 and t11). */
|
1746 |
|
|
emit_insn (gen_addsi3 (t8, t2, t3));
|
1747 |
|
|
emit_insn (gen_cg_si (t9, t2, t3));
|
1748 |
|
|
emit_insn (gen_cg_si (t11, t8, t4));
|
1749 |
|
|
|
1750 |
|
|
/* Gen high 32 bits in operand[0]. Correct for mpys. */
|
1751 |
|
|
emit_insn (gen_addx_si (t12, t5, t6, t9));
|
1752 |
|
|
emit_insn (gen_addx_si (t14, t12, t7, t11));
|
1753 |
|
|
|
1754 |
|
|
/* mpys treats both operands as signed when we really want it to treat
|
1755 |
|
|
the first operand as signed and the second operand as unsigned.
|
1756 |
|
|
The code below corrects for that difference. */
|
1757 |
|
|
emit_insn (gen_cgt_hi (t15, op1_hi, GEN_INT (-1)));
|
1758 |
|
|
emit_insn (gen_cgt_hi (t16, op2_hi, GEN_INT (-1)));
|
1759 |
|
|
emit_insn (gen_andc_hi (t17, t1_hi, t15));
|
1760 |
|
|
emit_insn (gen_andc_hi (t18, t0_hi, t16));
|
1761 |
|
|
emit_insn (gen_extendhisi2 (t19, t17));
|
1762 |
|
|
emit_insn (gen_extendhisi2 (t20, t18));
|
1763 |
|
|
emit_insn (gen_addsi3 (t21, t19, t20));
|
1764 |
|
|
emit_insn (gen_addsi3 (operands[0], t14, t21));
|
1765 |
|
|
unshare_all_rtl_in_chain (insn);
|
1766 |
|
|
DONE;
|
1767 |
|
|
})
|
1768 |
|
|
|
1769 |
|
|
(define_expand "umulsi3_highpart"
|
1770 |
|
|
[(set (match_operand:SI 0 "register_operand" "")
|
1771 |
|
|
(truncate:SI
|
1772 |
|
|
(ashiftrt:DI
|
1773 |
|
|
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
|
1774 |
|
|
(zero_extend:DI (match_operand:SI 2 "register_operand" "")))
|
1775 |
|
|
(const_int 32))))]
|
1776 |
|
|
""
|
1777 |
|
|
|
1778 |
|
|
{
|
1779 |
|
|
rtx t0 = gen_reg_rtx (SImode);
|
1780 |
|
|
rtx t1 = gen_reg_rtx (SImode);
|
1781 |
|
|
rtx t2 = gen_reg_rtx (SImode);
|
1782 |
|
|
rtx t3 = gen_reg_rtx (SImode);
|
1783 |
|
|
rtx t4 = gen_reg_rtx (SImode);
|
1784 |
|
|
rtx t5 = gen_reg_rtx (SImode);
|
1785 |
|
|
rtx t6 = gen_reg_rtx (SImode);
|
1786 |
|
|
rtx t7 = gen_reg_rtx (SImode);
|
1787 |
|
|
rtx t8 = gen_reg_rtx (SImode);
|
1788 |
|
|
rtx t9 = gen_reg_rtx (SImode);
|
1789 |
|
|
rtx t10 = gen_reg_rtx (SImode);
|
1790 |
|
|
rtx t12 = gen_reg_rtx (SImode);
|
1791 |
|
|
rtx t13 = gen_reg_rtx (SImode);
|
1792 |
|
|
rtx t14 = gen_reg_rtx (SImode);
|
1793 |
|
|
rtx op1_hi = gen_rtx_SUBREG (HImode, operands[1], 2);
|
1794 |
|
|
rtx op2_hi = gen_rtx_SUBREG (HImode, operands[2], 2);
|
1795 |
|
|
rtx t0_hi = gen_rtx_SUBREG (HImode, t0, 2);
|
1796 |
|
|
|
1797 |
|
|
rtx insn = emit_insn (gen_rotlsi3 (t0, operands[2], GEN_INT (16)));
|
1798 |
|
|
emit_insn (gen_umulhisi3 (t1, op1_hi, op2_hi));
|
1799 |
|
|
emit_insn (gen_umulhisi3 (t2, op1_hi, t0_hi));
|
1800 |
|
|
emit_insn (gen_mpyhhu_si (t3, operands[1], t0));
|
1801 |
|
|
emit_insn (gen_mpyhhu_si (t4, operands[1], operands[2]));
|
1802 |
|
|
emit_insn (gen_ashlsi3 (t5, t2, GEN_INT (16)));
|
1803 |
|
|
emit_insn (gen_ashlsi3 (t6, t3, GEN_INT (16)));
|
1804 |
|
|
emit_insn (gen_lshrsi3 (t7, t2, GEN_INT (16)));
|
1805 |
|
|
emit_insn (gen_lshrsi3 (t8, t3, GEN_INT (16)));
|
1806 |
|
|
|
1807 |
|
|
/* Gen carry bits (in t10 and t12). */
|
1808 |
|
|
emit_insn (gen_addsi3 (t9, t1, t5));
|
1809 |
|
|
emit_insn (gen_cg_si (t10, t1, t5));
|
1810 |
|
|
emit_insn (gen_cg_si (t12, t9, t6));
|
1811 |
|
|
|
1812 |
|
|
/* Gen high 32 bits in operand[0]. */
|
1813 |
|
|
emit_insn (gen_addx_si (t13, t4, t7, t10));
|
1814 |
|
|
emit_insn (gen_addx_si (t14, t13, t8, t12));
|
1815 |
|
|
emit_insn (gen_movsi (operands[0], t14));
|
1816 |
|
|
unshare_all_rtl_in_chain (insn);
|
1817 |
|
|
|
1818 |
|
|
DONE;
|
1819 |
|
|
})
|
1820 |
|
|
|
1821 |
|
|
;; div
|
1822 |
|
|
|
1823 |
|
|
;; Not necessarily the best implementation of divide but faster then
|
1824 |
|
|
;; the default that gcc provides because this is inlined and it uses
|
1825 |
|
|
;; clz.
|
1826 |
|
|
(define_insn "divmodsi4"
|
1827 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=&r")
|
1828 |
|
|
(div:SI (match_operand:SI 1 "spu_reg_operand" "r")
|
1829 |
|
|
(match_operand:SI 2 "spu_reg_operand" "r")))
|
1830 |
|
|
(set (match_operand:SI 3 "spu_reg_operand" "=&r")
|
1831 |
|
|
(mod:SI (match_dup 1)
|
1832 |
|
|
(match_dup 2)))
|
1833 |
|
|
(clobber (match_scratch:SI 4 "=&r"))
|
1834 |
|
|
(clobber (match_scratch:SI 5 "=&r"))
|
1835 |
|
|
(clobber (match_scratch:SI 6 "=&r"))
|
1836 |
|
|
(clobber (match_scratch:SI 7 "=&r"))
|
1837 |
|
|
(clobber (match_scratch:SI 8 "=&r"))
|
1838 |
|
|
(clobber (match_scratch:SI 9 "=&r"))
|
1839 |
|
|
(clobber (match_scratch:SI 10 "=&r"))
|
1840 |
|
|
(clobber (match_scratch:SI 11 "=&r"))
|
1841 |
|
|
(clobber (match_scratch:SI 12 "=&r"))
|
1842 |
|
|
(clobber (reg:SI 130))]
|
1843 |
|
|
""
|
1844 |
|
|
"heqi %2,0\\n\\
|
1845 |
|
|
hbrr 3f,1f\\n\\
|
1846 |
|
|
sfi %8,%1,0\\n\\
|
1847 |
|
|
sfi %9,%2,0\\n\\
|
1848 |
|
|
cgti %10,%1,-1\\n\\
|
1849 |
|
|
cgti %11,%2,-1\\n\\
|
1850 |
|
|
selb %8,%8,%1,%10\\n\\
|
1851 |
|
|
selb %9,%9,%2,%11\\n\\
|
1852 |
|
|
clz %4,%8\\n\\
|
1853 |
|
|
clz %7,%9\\n\\
|
1854 |
|
|
il %5,1\\n\\
|
1855 |
|
|
fsmbi %0,0\\n\\
|
1856 |
|
|
sf %7,%4,%7\\n\\
|
1857 |
|
|
shlqbyi %3,%8,0\\n\\
|
1858 |
|
|
xor %11,%10,%11\\n\\
|
1859 |
|
|
shl %5,%5,%7\\n\\
|
1860 |
|
|
shl %4,%9,%7\\n\\
|
1861 |
|
|
lnop \\n\\
|
1862 |
|
|
1: or %12,%0,%5\\n\\
|
1863 |
|
|
rotqmbii %5,%5,-1\\n\\
|
1864 |
|
|
clgt %6,%4,%3\\n\\
|
1865 |
|
|
lnop \\n\\
|
1866 |
|
|
sf %7,%4,%3\\n\\
|
1867 |
|
|
rotqmbii %4,%4,-1\\n\\
|
1868 |
|
|
selb %0,%12,%0,%6\\n\\
|
1869 |
|
|
lnop \\n\\
|
1870 |
|
|
selb %3,%7,%3,%6\\n\\
|
1871 |
|
|
3: brnz %5,1b\\n\\
|
1872 |
|
|
2: sfi %8,%3,0\\n\\
|
1873 |
|
|
sfi %9,%0,0\\n\\
|
1874 |
|
|
selb %3,%8,%3,%10\\n\\
|
1875 |
|
|
selb %0,%0,%9,%11"
|
1876 |
|
|
[(set_attr "type" "multi0")
|
1877 |
|
|
(set_attr "length" "128")])
|
1878 |
|
|
|
1879 |
|
|
(define_insn "udivmodsi4"
|
1880 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=&r")
|
1881 |
|
|
(udiv:SI (match_operand:SI 1 "spu_reg_operand" "r")
|
1882 |
|
|
(match_operand:SI 2 "spu_reg_operand" "r")))
|
1883 |
|
|
(set (match_operand:SI 3 "spu_reg_operand" "=&r")
|
1884 |
|
|
(umod:SI (match_dup 1)
|
1885 |
|
|
(match_dup 2)))
|
1886 |
|
|
(clobber (match_scratch:SI 4 "=&r"))
|
1887 |
|
|
(clobber (match_scratch:SI 5 "=&r"))
|
1888 |
|
|
(clobber (match_scratch:SI 6 "=&r"))
|
1889 |
|
|
(clobber (match_scratch:SI 7 "=&r"))
|
1890 |
|
|
(clobber (match_scratch:SI 8 "=&r"))
|
1891 |
|
|
(clobber (reg:SI 130))]
|
1892 |
|
|
""
|
1893 |
|
|
"heqi %2,0\\n\\
|
1894 |
|
|
hbrr 3f,1f\\n\\
|
1895 |
|
|
clz %7,%2\\n\\
|
1896 |
|
|
clz %4,%1\\n\\
|
1897 |
|
|
il %5,1\\n\\
|
1898 |
|
|
fsmbi %0,0\\n\\
|
1899 |
|
|
sf %7,%4,%7\\n\\
|
1900 |
|
|
ori %3,%1,0\\n\\
|
1901 |
|
|
shl %5,%5,%7\\n\\
|
1902 |
|
|
shl %4,%2,%7\\n\\
|
1903 |
|
|
1: or %8,%0,%5\\n\\
|
1904 |
|
|
rotqmbii %5,%5,-1\\n\\
|
1905 |
|
|
clgt %6,%4,%3\\n\\
|
1906 |
|
|
lnop \\n\\
|
1907 |
|
|
sf %7,%4,%3\\n\\
|
1908 |
|
|
rotqmbii %4,%4,-1\\n\\
|
1909 |
|
|
selb %0,%8,%0,%6\\n\\
|
1910 |
|
|
lnop \\n\\
|
1911 |
|
|
selb %3,%7,%3,%6\\n\\
|
1912 |
|
|
3: brnz %5,1b\\n\\
|
1913 |
|
|
2:"
|
1914 |
|
|
[(set_attr "type" "multi0")
|
1915 |
|
|
(set_attr "length" "80")])
|
1916 |
|
|
|
1917 |
|
|
(define_expand "div3"
|
1918 |
|
|
[(parallel
|
1919 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "")
|
1920 |
|
|
(div:VSF (match_operand:VSF 1 "spu_reg_operand" "")
|
1921 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "")))
|
1922 |
|
|
(clobber (match_scratch:VSF 3 ""))
|
1923 |
|
|
(clobber (match_scratch:VSF 4 ""))
|
1924 |
|
|
(clobber (match_scratch:VSF 5 ""))])]
|
1925 |
|
|
""
|
1926 |
|
|
"")
|
1927 |
|
|
|
1928 |
|
|
(define_insn_and_split "*div3_fast"
|
1929 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
1930 |
|
|
(div:VSF (match_operand:VSF 1 "spu_reg_operand" "r")
|
1931 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")))
|
1932 |
|
|
(clobber (match_scratch:VSF 3 "=&r"))
|
1933 |
|
|
(clobber (match_scratch:VSF 4 "=&r"))
|
1934 |
|
|
(clobber (scratch:VSF))]
|
1935 |
|
|
"flag_unsafe_math_optimizations"
|
1936 |
|
|
"#"
|
1937 |
|
|
"reload_completed"
|
1938 |
|
|
[(set (match_dup:VSF 0)
|
1939 |
|
|
(div:VSF (match_dup:VSF 1)
|
1940 |
|
|
(match_dup:VSF 2)))
|
1941 |
|
|
(clobber (match_dup:VSF 3))
|
1942 |
|
|
(clobber (match_dup:VSF 4))
|
1943 |
|
|
(clobber (scratch:VSF))]
|
1944 |
|
|
{
|
1945 |
|
|
emit_insn (gen_frest_(operands[3], operands[2]));
|
1946 |
|
|
emit_insn (gen_fi_(operands[3], operands[2], operands[3]));
|
1947 |
|
|
emit_insn (gen_mul3(operands[4], operands[1], operands[3]));
|
1948 |
|
|
emit_insn (gen_fnma4(operands[0], operands[4], operands[2], operands[1]));
|
1949 |
|
|
emit_insn (gen_fma4(operands[0], operands[0], operands[3], operands[4]));
|
1950 |
|
|
DONE;
|
1951 |
|
|
})
|
1952 |
|
|
|
1953 |
|
|
(define_insn_and_split "*div3_adjusted"
|
1954 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
1955 |
|
|
(div:VSF (match_operand:VSF 1 "spu_reg_operand" "r")
|
1956 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")))
|
1957 |
|
|
(clobber (match_scratch:VSF 3 "=&r"))
|
1958 |
|
|
(clobber (match_scratch:VSF 4 "=&r"))
|
1959 |
|
|
(clobber (match_scratch:VSF 5 "=&r"))]
|
1960 |
|
|
"!flag_unsafe_math_optimizations"
|
1961 |
|
|
"#"
|
1962 |
|
|
"reload_completed"
|
1963 |
|
|
[(set (match_dup:VSF 0)
|
1964 |
|
|
(div:VSF (match_dup:VSF 1)
|
1965 |
|
|
(match_dup:VSF 2)))
|
1966 |
|
|
(clobber (match_dup:VSF 3))
|
1967 |
|
|
(clobber (match_dup:VSF 4))
|
1968 |
|
|
(clobber (match_dup:VSF 5))]
|
1969 |
|
|
{
|
1970 |
|
|
emit_insn (gen_frest_ (operands[3], operands[2]));
|
1971 |
|
|
emit_insn (gen_fi_ (operands[3], operands[2], operands[3]));
|
1972 |
|
|
emit_insn (gen_mul3 (operands[4], operands[1], operands[3]));
|
1973 |
|
|
emit_insn (gen_fnma4 (operands[5], operands[4], operands[2], operands[1]));
|
1974 |
|
|
emit_insn (gen_fma4 (operands[3], operands[5], operands[3], operands[4]));
|
1975 |
|
|
|
1976 |
|
|
/* Due to truncation error, the quotient result may be low by 1 ulp.
|
1977 |
|
|
Conditionally add one if the estimate is too small in magnitude. */
|
1978 |
|
|
|
1979 |
|
|
emit_move_insn (gen_lowpart (mode, operands[4]),
|
1980 |
|
|
spu_const (mode, 0x80000000ULL));
|
1981 |
|
|
emit_move_insn (gen_lowpart (mode, operands[5]),
|
1982 |
|
|
spu_const (mode, 0x3f800000ULL));
|
1983 |
|
|
emit_insn (gen_selb (operands[5], operands[5], operands[1], operands[4]));
|
1984 |
|
|
|
1985 |
|
|
emit_insn (gen_add3 (gen_lowpart (mode, operands[4]),
|
1986 |
|
|
gen_lowpart (mode, operands[3]),
|
1987 |
|
|
spu_const (mode, 1)));
|
1988 |
|
|
emit_insn (gen_fnma4 (operands[0], operands[2], operands[4], operands[1]));
|
1989 |
|
|
emit_insn (gen_mul3 (operands[0], operands[0], operands[5]));
|
1990 |
|
|
emit_insn (gen_cgt_ (gen_lowpart (mode, operands[0]),
|
1991 |
|
|
gen_lowpart (mode, operands[0]),
|
1992 |
|
|
spu_const (mode, -1)));
|
1993 |
|
|
emit_insn (gen_selb (operands[0], operands[3], operands[4], operands[0]));
|
1994 |
|
|
DONE;
|
1995 |
|
|
})
|
1996 |
|
|
|
1997 |
|
|
|
1998 |
|
|
;; sqrt
|
1999 |
|
|
|
2000 |
|
|
(define_insn_and_split "sqrtsf2"
|
2001 |
|
|
[(set (match_operand:SF 0 "spu_reg_operand" "=r")
|
2002 |
|
|
(sqrt:SF (match_operand:SF 1 "spu_reg_operand" "r")))
|
2003 |
|
|
(clobber (match_scratch:SF 2 "=&r"))
|
2004 |
|
|
(clobber (match_scratch:SF 3 "=&r"))
|
2005 |
|
|
(clobber (match_scratch:SF 4 "=&r"))
|
2006 |
|
|
(clobber (match_scratch:SF 5 "=&r"))]
|
2007 |
|
|
""
|
2008 |
|
|
"#"
|
2009 |
|
|
"reload_completed"
|
2010 |
|
|
[(set (match_dup:SF 0)
|
2011 |
|
|
(sqrt:SF (match_dup:SF 1)))
|
2012 |
|
|
(clobber (match_dup:SF 2))
|
2013 |
|
|
(clobber (match_dup:SF 3))
|
2014 |
|
|
(clobber (match_dup:SF 4))
|
2015 |
|
|
(clobber (match_dup:SF 5))]
|
2016 |
|
|
{
|
2017 |
|
|
emit_move_insn (operands[3],spu_float_const(\"0.5\",SFmode));
|
2018 |
|
|
emit_move_insn (operands[4],spu_float_const(\"1.00000011920928955078125\",SFmode));
|
2019 |
|
|
emit_insn (gen_frsqest_sf(operands[2],operands[1]));
|
2020 |
|
|
emit_insn (gen_fi_sf(operands[2],operands[1],operands[2]));
|
2021 |
|
|
emit_insn (gen_mulsf3(operands[5],operands[2],operands[1]));
|
2022 |
|
|
emit_insn (gen_mulsf3(operands[3],operands[5],operands[3]));
|
2023 |
|
|
emit_insn (gen_fnmasf4(operands[4],operands[2],operands[5],operands[4]));
|
2024 |
|
|
emit_insn (gen_fmasf4(operands[0],operands[4],operands[3],operands[5]));
|
2025 |
|
|
DONE;
|
2026 |
|
|
})
|
2027 |
|
|
|
2028 |
|
|
(define_insn "frest_"
|
2029 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
2030 |
|
|
(unspec:VSF [(match_operand:VSF 1 "spu_reg_operand" "r")] UNSPEC_FREST))]
|
2031 |
|
|
""
|
2032 |
|
|
"frest\t%0,%1"
|
2033 |
|
|
[(set_attr "type" "shuf")])
|
2034 |
|
|
|
2035 |
|
|
(define_insn "frsqest_"
|
2036 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
2037 |
|
|
(unspec:VSF [(match_operand:VSF 1 "spu_reg_operand" "r")] UNSPEC_FRSQEST))]
|
2038 |
|
|
""
|
2039 |
|
|
"frsqest\t%0,%1"
|
2040 |
|
|
[(set_attr "type" "shuf")])
|
2041 |
|
|
|
2042 |
|
|
(define_insn "fi_"
|
2043 |
|
|
[(set (match_operand:VSF 0 "spu_reg_operand" "=r")
|
2044 |
|
|
(unspec:VSF [(match_operand:VSF 1 "spu_reg_operand" "r")
|
2045 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")] UNSPEC_FI))]
|
2046 |
|
|
""
|
2047 |
|
|
"fi\t%0,%1,%2"
|
2048 |
|
|
[(set_attr "type" "fp7")])
|
2049 |
|
|
|
2050 |
|
|
|
2051 |
|
|
;; and
|
2052 |
|
|
|
2053 |
|
|
(define_insn "and3"
|
2054 |
|
|
[(set (match_operand:MOV 0 "spu_reg_operand" "=r,r")
|
2055 |
|
|
(and:MOV (match_operand:MOV 1 "spu_reg_operand" "r,r")
|
2056 |
|
|
(match_operand:MOV 2 "spu_logical_operand" "r,C")))]
|
2057 |
|
|
""
|
2058 |
|
|
"@
|
2059 |
|
|
and\t%0,%1,%2
|
2060 |
|
|
and%j2i\t%0,%1,%J2")
|
2061 |
|
|
|
2062 |
|
|
(define_insn "anddi3"
|
2063 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r,r")
|
2064 |
|
|
(and:DI (match_operand:DI 1 "spu_reg_operand" "r,r")
|
2065 |
|
|
(match_operand:DI 2 "spu_logical_operand" "r,c")))]
|
2066 |
|
|
""
|
2067 |
|
|
"@
|
2068 |
|
|
and\t%0,%1,%2
|
2069 |
|
|
and%k2i\t%0,%1,%K2")
|
2070 |
|
|
|
2071 |
|
|
(define_insn "andti3"
|
2072 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2073 |
|
|
(and:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2074 |
|
|
(match_operand:TI 2 "spu_logical_operand" "r,Y")))]
|
2075 |
|
|
""
|
2076 |
|
|
"@
|
2077 |
|
|
and\t%0,%1,%2
|
2078 |
|
|
and%m2i\t%0,%1,%L2")
|
2079 |
|
|
|
2080 |
|
|
(define_insn "andc_"
|
2081 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "=r")
|
2082 |
|
|
(and:ALL (not:ALL (match_operand:ALL 2 "spu_reg_operand" "r"))
|
2083 |
|
|
(match_operand:ALL 1 "spu_reg_operand" "r")))]
|
2084 |
|
|
""
|
2085 |
|
|
"andc\t%0,%1,%2")
|
2086 |
|
|
|
2087 |
|
|
(define_insn "nand_"
|
2088 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "=r")
|
2089 |
|
|
(not:ALL (and:ALL (match_operand:ALL 2 "spu_reg_operand" "r")
|
2090 |
|
|
(match_operand:ALL 1 "spu_reg_operand" "r"))))]
|
2091 |
|
|
""
|
2092 |
|
|
"nand\t%0,%1,%2")
|
2093 |
|
|
|
2094 |
|
|
|
2095 |
|
|
;; ior
|
2096 |
|
|
|
2097 |
|
|
(define_insn "ior3"
|
2098 |
|
|
[(set (match_operand:MOV 0 "spu_reg_operand" "=r,r,r")
|
2099 |
|
|
(ior:MOV (match_operand:MOV 1 "spu_reg_operand" "r,r,0")
|
2100 |
|
|
(match_operand:MOV 2 "spu_ior_operand" "r,C,D")))]
|
2101 |
|
|
""
|
2102 |
|
|
"@
|
2103 |
|
|
or\t%0,%1,%2
|
2104 |
|
|
or%j2i\t%0,%1,%J2
|
2105 |
|
|
iohl\t%0,%J2")
|
2106 |
|
|
|
2107 |
|
|
(define_insn "iordi3"
|
2108 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r,r,r")
|
2109 |
|
|
(ior:DI (match_operand:DI 1 "spu_reg_operand" "r,r,0")
|
2110 |
|
|
(match_operand:DI 2 "spu_ior_operand" "r,c,d")))]
|
2111 |
|
|
""
|
2112 |
|
|
"@
|
2113 |
|
|
or\t%0,%1,%2
|
2114 |
|
|
or%k2i\t%0,%1,%K2
|
2115 |
|
|
iohl\t%0,%K2")
|
2116 |
|
|
|
2117 |
|
|
(define_insn "iorti3"
|
2118 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r,r")
|
2119 |
|
|
(ior:TI (match_operand:TI 1 "spu_reg_operand" "r,r,0")
|
2120 |
|
|
(match_operand:TI 2 "spu_ior_operand" "r,Y,Z")))]
|
2121 |
|
|
""
|
2122 |
|
|
"@
|
2123 |
|
|
or\t%0,%1,%2
|
2124 |
|
|
or%m2i\t%0,%1,%L2
|
2125 |
|
|
iohl\t%0,%L2")
|
2126 |
|
|
|
2127 |
|
|
(define_insn "orc_"
|
2128 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "=r")
|
2129 |
|
|
(ior:ALL (not:ALL (match_operand:ALL 2 "spu_reg_operand" "r"))
|
2130 |
|
|
(match_operand:ALL 1 "spu_reg_operand" "r")))]
|
2131 |
|
|
""
|
2132 |
|
|
"orc\t%0,%1,%2")
|
2133 |
|
|
|
2134 |
|
|
(define_insn "nor_"
|
2135 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "=r")
|
2136 |
|
|
(not:ALL (ior:ALL (match_operand:ALL 1 "spu_reg_operand" "r")
|
2137 |
|
|
(match_operand:ALL 2 "spu_reg_operand" "r"))))]
|
2138 |
|
|
""
|
2139 |
|
|
"nor\t%0,%1,%2")
|
2140 |
|
|
|
2141 |
|
|
;; xor
|
2142 |
|
|
|
2143 |
|
|
(define_insn "xor3"
|
2144 |
|
|
[(set (match_operand:MOV 0 "spu_reg_operand" "=r,r")
|
2145 |
|
|
(xor:MOV (match_operand:MOV 1 "spu_reg_operand" "r,r")
|
2146 |
|
|
(match_operand:MOV 2 "spu_logical_operand" "r,B")))]
|
2147 |
|
|
""
|
2148 |
|
|
"@
|
2149 |
|
|
xor\t%0,%1,%2
|
2150 |
|
|
xor%j2i\t%0,%1,%J2")
|
2151 |
|
|
|
2152 |
|
|
(define_insn "xordi3"
|
2153 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r,r")
|
2154 |
|
|
(xor:DI (match_operand:DI 1 "spu_reg_operand" "r,r")
|
2155 |
|
|
(match_operand:DI 2 "spu_logical_operand" "r,c")))]
|
2156 |
|
|
""
|
2157 |
|
|
"@
|
2158 |
|
|
xor\t%0,%1,%2
|
2159 |
|
|
xor%k2i\t%0,%1,%K2")
|
2160 |
|
|
|
2161 |
|
|
(define_insn "xorti3"
|
2162 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2163 |
|
|
(xor:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2164 |
|
|
(match_operand:TI 2 "spu_logical_operand" "r,Y")))]
|
2165 |
|
|
""
|
2166 |
|
|
"@
|
2167 |
|
|
xor\t%0,%1,%2
|
2168 |
|
|
xor%m2i\t%0,%1,%L2")
|
2169 |
|
|
|
2170 |
|
|
(define_insn "eqv_"
|
2171 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "=r")
|
2172 |
|
|
(not:ALL (xor:ALL (match_operand:ALL 1 "spu_reg_operand" "r")
|
2173 |
|
|
(match_operand:ALL 2 "spu_reg_operand" "r"))))]
|
2174 |
|
|
""
|
2175 |
|
|
"eqv\t%0,%1,%2")
|
2176 |
|
|
|
2177 |
|
|
;; one_cmpl
|
2178 |
|
|
|
2179 |
|
|
(define_insn "one_cmpl2"
|
2180 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "=r")
|
2181 |
|
|
(not:ALL (match_operand:ALL 1 "spu_reg_operand" "r")))]
|
2182 |
|
|
""
|
2183 |
|
|
"nor\t%0,%1,%1")
|
2184 |
|
|
|
2185 |
|
|
|
2186 |
|
|
;; selb
|
2187 |
|
|
|
2188 |
|
|
(define_expand "selb"
|
2189 |
|
|
[(set (match_operand 0 "spu_reg_operand" "")
|
2190 |
|
|
(unspec [(match_operand 1 "spu_reg_operand" "")
|
2191 |
|
|
(match_operand 2 "spu_reg_operand" "")
|
2192 |
|
|
(match_operand 3 "spu_reg_operand" "")] UNSPEC_SELB))]
|
2193 |
|
|
""
|
2194 |
|
|
{
|
2195 |
|
|
rtx s = gen__selb (operands[0], operands[1], operands[2], operands[3]);
|
2196 |
|
|
PUT_MODE (SET_SRC (s), GET_MODE (operands[0]));
|
2197 |
|
|
emit_insn (s);
|
2198 |
|
|
DONE;
|
2199 |
|
|
})
|
2200 |
|
|
|
2201 |
|
|
;; This could be defined as a combination of logical operations, but at
|
2202 |
|
|
;; one time it caused a crash due to recursive expansion of rtl during CSE.
|
2203 |
|
|
(define_insn "_selb"
|
2204 |
|
|
[(set (match_operand 0 "spu_reg_operand" "=r")
|
2205 |
|
|
(unspec [(match_operand 1 "spu_reg_operand" "r")
|
2206 |
|
|
(match_operand 2 "spu_reg_operand" "r")
|
2207 |
|
|
(match_operand 3 "spu_reg_operand" "r")] UNSPEC_SELB))]
|
2208 |
|
|
"GET_MODE(operands[0]) == GET_MODE(operands[1])
|
2209 |
|
|
&& GET_MODE(operands[1]) == GET_MODE(operands[2])"
|
2210 |
|
|
"selb\t%0,%1,%2,%3")
|
2211 |
|
|
|
2212 |
|
|
|
2213 |
|
|
;; Misc. byte/bit operations
|
2214 |
|
|
;; clz/ctz/ffs/popcount/parity
|
2215 |
|
|
;; cntb/sumb
|
2216 |
|
|
|
2217 |
|
|
(define_insn "clz2"
|
2218 |
|
|
[(set (match_operand:VSI 0 "spu_reg_operand" "=r")
|
2219 |
|
|
(clz:VSI (match_operand:VSI 1 "spu_reg_operand" "r")))]
|
2220 |
|
|
""
|
2221 |
|
|
"clz\t%0,%1")
|
2222 |
|
|
|
2223 |
|
|
(define_expand "ctz2"
|
2224 |
|
|
[(set (match_dup 2)
|
2225 |
|
|
(neg:VSI (match_operand:VSI 1 "spu_reg_operand" "")))
|
2226 |
|
|
(set (match_dup 3) (and:VSI (match_dup 1)
|
2227 |
|
|
(match_dup 2)))
|
2228 |
|
|
(set (match_dup 4) (clz:VSI (match_dup 3)))
|
2229 |
|
|
(set (match_operand:VSI 0 "spu_reg_operand" "")
|
2230 |
|
|
(minus:VSI (match_dup 5) (match_dup 4)))]
|
2231 |
|
|
""
|
2232 |
|
|
{
|
2233 |
|
|
operands[2] = gen_reg_rtx (mode);
|
2234 |
|
|
operands[3] = gen_reg_rtx (mode);
|
2235 |
|
|
operands[4] = gen_reg_rtx (mode);
|
2236 |
|
|
operands[5] = spu_const(mode, 31);
|
2237 |
|
|
})
|
2238 |
|
|
|
2239 |
|
|
(define_expand "clrsb2"
|
2240 |
|
|
[(set (match_dup 2)
|
2241 |
|
|
(gt:VSI (match_operand:VSI 1 "spu_reg_operand" "") (match_dup 5)))
|
2242 |
|
|
(set (match_dup 3) (not:VSI (xor:VSI (match_dup 1) (match_dup 2))))
|
2243 |
|
|
(set (match_dup 4) (clz:VSI (match_dup 3)))
|
2244 |
|
|
(set (match_operand:VSI 0 "spu_reg_operand")
|
2245 |
|
|
(plus:VSI (match_dup 4) (match_dup 5)))]
|
2246 |
|
|
""
|
2247 |
|
|
{
|
2248 |
|
|
operands[2] = gen_reg_rtx (mode);
|
2249 |
|
|
operands[3] = gen_reg_rtx (mode);
|
2250 |
|
|
operands[4] = gen_reg_rtx (mode);
|
2251 |
|
|
operands[5] = spu_const(mode, -1);
|
2252 |
|
|
})
|
2253 |
|
|
|
2254 |
|
|
(define_expand "ffs2"
|
2255 |
|
|
[(set (match_dup 2)
|
2256 |
|
|
(neg:VSI (match_operand:VSI 1 "spu_reg_operand" "")))
|
2257 |
|
|
(set (match_dup 3) (and:VSI (match_dup 1)
|
2258 |
|
|
(match_dup 2)))
|
2259 |
|
|
(set (match_dup 4) (clz:VSI (match_dup 3)))
|
2260 |
|
|
(set (match_operand:VSI 0 "spu_reg_operand" "")
|
2261 |
|
|
(minus:VSI (match_dup 5) (match_dup 4)))]
|
2262 |
|
|
""
|
2263 |
|
|
{
|
2264 |
|
|
operands[2] = gen_reg_rtx (mode);
|
2265 |
|
|
operands[3] = gen_reg_rtx (mode);
|
2266 |
|
|
operands[4] = gen_reg_rtx (mode);
|
2267 |
|
|
operands[5] = spu_const(mode, 32);
|
2268 |
|
|
})
|
2269 |
|
|
|
2270 |
|
|
(define_expand "popcountsi2"
|
2271 |
|
|
[(set (match_dup 2)
|
2272 |
|
|
(unspec:SI [(match_operand:SI 1 "spu_reg_operand" "")]
|
2273 |
|
|
UNSPEC_CNTB))
|
2274 |
|
|
(set (match_dup 3)
|
2275 |
|
|
(unspec:HI [(match_dup 2)] UNSPEC_SUMB))
|
2276 |
|
|
(set (match_operand:SI 0 "spu_reg_operand" "")
|
2277 |
|
|
(sign_extend:SI (match_dup 3)))]
|
2278 |
|
|
""
|
2279 |
|
|
{
|
2280 |
|
|
operands[2] = gen_reg_rtx (SImode);
|
2281 |
|
|
operands[3] = gen_reg_rtx (HImode);
|
2282 |
|
|
})
|
2283 |
|
|
|
2284 |
|
|
(define_expand "paritysi2"
|
2285 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "")
|
2286 |
|
|
(parity:SI (match_operand:SI 1 "spu_reg_operand" "")))]
|
2287 |
|
|
""
|
2288 |
|
|
{
|
2289 |
|
|
operands[2] = gen_reg_rtx (SImode);
|
2290 |
|
|
emit_insn (gen_popcountsi2(operands[2], operands[1]));
|
2291 |
|
|
emit_insn (gen_andsi3(operands[0], operands[2], GEN_INT (1)));
|
2292 |
|
|
DONE;
|
2293 |
|
|
})
|
2294 |
|
|
|
2295 |
|
|
(define_insn "cntb_si"
|
2296 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
2297 |
|
|
(unspec:SI [(match_operand:SI 1 "spu_reg_operand" "r")]
|
2298 |
|
|
UNSPEC_CNTB))]
|
2299 |
|
|
""
|
2300 |
|
|
"cntb\t%0,%1"
|
2301 |
|
|
[(set_attr "type" "fxb")])
|
2302 |
|
|
|
2303 |
|
|
(define_insn "cntb_v16qi"
|
2304 |
|
|
[(set (match_operand:V16QI 0 "spu_reg_operand" "=r")
|
2305 |
|
|
(unspec:V16QI [(match_operand:V16QI 1 "spu_reg_operand" "r")]
|
2306 |
|
|
UNSPEC_CNTB))]
|
2307 |
|
|
""
|
2308 |
|
|
"cntb\t%0,%1"
|
2309 |
|
|
[(set_attr "type" "fxb")])
|
2310 |
|
|
|
2311 |
|
|
(define_insn "sumb_si"
|
2312 |
|
|
[(set (match_operand:HI 0 "spu_reg_operand" "=r")
|
2313 |
|
|
(unspec:HI [(match_operand:SI 1 "spu_reg_operand" "r")] UNSPEC_SUMB))]
|
2314 |
|
|
""
|
2315 |
|
|
"sumb\t%0,%1,%1"
|
2316 |
|
|
[(set_attr "type" "fxb")])
|
2317 |
|
|
|
2318 |
|
|
|
2319 |
|
|
;; ashl, vashl
|
2320 |
|
|
|
2321 |
|
|
(define_insn "ashl3"
|
2322 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
2323 |
|
|
(ashift:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r")
|
2324 |
|
|
(match_operand:VHSI 2 "spu_nonmem_operand" "r,W")))]
|
2325 |
|
|
""
|
2326 |
|
|
"@
|
2327 |
|
|
shl\t%0,%1,%2
|
2328 |
|
|
shli\t%0,%1,%2"
|
2329 |
|
|
[(set_attr "type" "fx3")])
|
2330 |
|
|
|
2331 |
|
|
(define_insn_and_split "ashldi3"
|
2332 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r,r")
|
2333 |
|
|
(ashift:DI (match_operand:DI 1 "spu_reg_operand" "r,r")
|
2334 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "r,I")))
|
2335 |
|
|
(clobber (match_scratch:SI 3 "=&r,X"))]
|
2336 |
|
|
""
|
2337 |
|
|
"#"
|
2338 |
|
|
"reload_completed"
|
2339 |
|
|
[(set (match_dup:DI 0)
|
2340 |
|
|
(ashift:DI (match_dup:DI 1)
|
2341 |
|
|
(match_dup:SI 2)))]
|
2342 |
|
|
{
|
2343 |
|
|
rtx op0 = gen_rtx_REG (TImode, REGNO (operands[0]));
|
2344 |
|
|
rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
|
2345 |
|
|
rtx op2 = operands[2];
|
2346 |
|
|
rtx op3 = operands[3];
|
2347 |
|
|
|
2348 |
|
|
if (GET_CODE (operands[2]) == REG)
|
2349 |
|
|
{
|
2350 |
|
|
emit_insn (gen_addsi3 (op3, op2, GEN_INT (64)));
|
2351 |
|
|
emit_insn (gen_rotlti3 (op0, op1, GEN_INT (64)));
|
2352 |
|
|
emit_insn (gen_shlqbybi_ti (op0, op0, op3));
|
2353 |
|
|
emit_insn (gen_shlqbi_ti (op0, op0, op3));
|
2354 |
|
|
}
|
2355 |
|
|
else
|
2356 |
|
|
{
|
2357 |
|
|
HOST_WIDE_INT val = INTVAL (operands[2]);
|
2358 |
|
|
emit_insn (gen_rotlti3 (op0, op1, GEN_INT (64)));
|
2359 |
|
|
emit_insn (gen_shlqby_ti (op0, op0, GEN_INT (val / 8 + 8)));
|
2360 |
|
|
if (val % 8)
|
2361 |
|
|
emit_insn (gen_shlqbi_ti (op0, op0, GEN_INT (val % 8)));
|
2362 |
|
|
}
|
2363 |
|
|
DONE;
|
2364 |
|
|
})
|
2365 |
|
|
|
2366 |
|
|
(define_expand "ashlti3"
|
2367 |
|
|
[(parallel [(set (match_operand:TI 0 "spu_reg_operand" "")
|
2368 |
|
|
(ashift:TI (match_operand:TI 1 "spu_reg_operand" "")
|
2369 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "")))
|
2370 |
|
|
(clobber (match_dup:TI 3))])]
|
2371 |
|
|
""
|
2372 |
|
|
"if (GET_CODE (operands[2]) == CONST_INT)
|
2373 |
|
|
{
|
2374 |
|
|
emit_insn (gen_ashlti3_imm(operands[0], operands[1], operands[2]));
|
2375 |
|
|
DONE;
|
2376 |
|
|
}
|
2377 |
|
|
operands[3] = gen_reg_rtx (TImode);")
|
2378 |
|
|
|
2379 |
|
|
(define_insn_and_split "ashlti3_imm"
|
2380 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2381 |
|
|
(ashift:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2382 |
|
|
(match_operand:SI 2 "immediate_operand" "O,P")))]
|
2383 |
|
|
""
|
2384 |
|
|
"@
|
2385 |
|
|
shlqbyi\t%0,%1,%h2
|
2386 |
|
|
shlqbii\t%0,%1,%e2"
|
2387 |
|
|
"!satisfies_constraint_O (operands[2]) && !satisfies_constraint_P (operands[2])"
|
2388 |
|
|
[(set (match_dup:TI 0)
|
2389 |
|
|
(ashift:TI (match_dup:TI 1)
|
2390 |
|
|
(match_dup:SI 3)))
|
2391 |
|
|
(set (match_dup:TI 0)
|
2392 |
|
|
(ashift:TI (match_dup:TI 0)
|
2393 |
|
|
(match_dup:SI 4)))]
|
2394 |
|
|
{
|
2395 |
|
|
HOST_WIDE_INT val = INTVAL(operands[2]);
|
2396 |
|
|
operands[3] = GEN_INT (val&7);
|
2397 |
|
|
operands[4] = GEN_INT (val&-8);
|
2398 |
|
|
}
|
2399 |
|
|
[(set_attr "type" "shuf,shuf")])
|
2400 |
|
|
|
2401 |
|
|
(define_insn_and_split "ashlti3_reg"
|
2402 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
2403 |
|
|
(ashift:TI (match_operand:TI 1 "spu_reg_operand" "r")
|
2404 |
|
|
(match_operand:SI 2 "spu_reg_operand" "r")))
|
2405 |
|
|
(clobber (match_operand:TI 3 "spu_reg_operand" "=&r"))]
|
2406 |
|
|
""
|
2407 |
|
|
"#"
|
2408 |
|
|
""
|
2409 |
|
|
[(set (match_dup:TI 3)
|
2410 |
|
|
(ashift:TI (match_dup:TI 1)
|
2411 |
|
|
(and:SI (match_dup:SI 2)
|
2412 |
|
|
(const_int 7))))
|
2413 |
|
|
(set (match_dup:TI 0)
|
2414 |
|
|
(ashift:TI (match_dup:TI 3)
|
2415 |
|
|
(and:SI (match_dup:SI 2)
|
2416 |
|
|
(const_int -8))))]
|
2417 |
|
|
"")
|
2418 |
|
|
|
2419 |
|
|
(define_insn "shlqbybi_ti"
|
2420 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2421 |
|
|
(ashift:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2422 |
|
|
(and:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2423 |
|
|
(const_int -8))))]
|
2424 |
|
|
""
|
2425 |
|
|
"@
|
2426 |
|
|
shlqbybi\t%0,%1,%2
|
2427 |
|
|
shlqbyi\t%0,%1,%h2"
|
2428 |
|
|
[(set_attr "type" "shuf,shuf")])
|
2429 |
|
|
|
2430 |
|
|
(define_insn "shlqbi_ti"
|
2431 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2432 |
|
|
(ashift:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2433 |
|
|
(and:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2434 |
|
|
(const_int 7))))]
|
2435 |
|
|
""
|
2436 |
|
|
"@
|
2437 |
|
|
shlqbi\t%0,%1,%2
|
2438 |
|
|
shlqbii\t%0,%1,%e2"
|
2439 |
|
|
[(set_attr "type" "shuf,shuf")])
|
2440 |
|
|
|
2441 |
|
|
(define_insn "shlqby_ti"
|
2442 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2443 |
|
|
(ashift:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2444 |
|
|
(mult:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2445 |
|
|
(const_int 8))))]
|
2446 |
|
|
""
|
2447 |
|
|
"@
|
2448 |
|
|
shlqby\t%0,%1,%2
|
2449 |
|
|
shlqbyi\t%0,%1,%f2"
|
2450 |
|
|
[(set_attr "type" "shuf,shuf")])
|
2451 |
|
|
|
2452 |
|
|
|
2453 |
|
|
;; lshr, vlshr
|
2454 |
|
|
|
2455 |
|
|
(define_insn_and_split "lshr3"
|
2456 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
2457 |
|
|
(lshiftrt:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r")
|
2458 |
|
|
(match_operand:VHSI 2 "spu_nonmem_operand" "r,W")))
|
2459 |
|
|
(clobber (match_scratch:VHSI 3 "=&r,X"))]
|
2460 |
|
|
""
|
2461 |
|
|
"@
|
2462 |
|
|
#
|
2463 |
|
|
rotmi\t%0,%1,-%2"
|
2464 |
|
|
"reload_completed && GET_CODE (operands[2]) == REG"
|
2465 |
|
|
[(set (match_dup:VHSI 3)
|
2466 |
|
|
(neg:VHSI (match_dup:VHSI 2)))
|
2467 |
|
|
(set (match_dup:VHSI 0)
|
2468 |
|
|
(lshiftrt:VHSI (match_dup:VHSI 1)
|
2469 |
|
|
(neg:VHSI (match_dup:VHSI 3))))]
|
2470 |
|
|
""
|
2471 |
|
|
[(set_attr "type" "*,fx3")])
|
2472 |
|
|
|
2473 |
|
|
(define_insn "lshr3_imm"
|
2474 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r")
|
2475 |
|
|
(lshiftrt:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r")
|
2476 |
|
|
(match_operand:VHSI 2 "immediate_operand" "W")))]
|
2477 |
|
|
""
|
2478 |
|
|
"rotmi\t%0,%1,-%2"
|
2479 |
|
|
[(set_attr "type" "fx3")])
|
2480 |
|
|
|
2481 |
|
|
(define_insn "rotm_"
|
2482 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
2483 |
|
|
(lshiftrt:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r")
|
2484 |
|
|
(neg:VHSI (match_operand:VHSI 2 "spu_nonmem_operand" "r,W"))))]
|
2485 |
|
|
""
|
2486 |
|
|
"@
|
2487 |
|
|
rotm\t%0,%1,%2
|
2488 |
|
|
rotmi\t%0,%1,-%2"
|
2489 |
|
|
[(set_attr "type" "fx3")])
|
2490 |
|
|
|
2491 |
|
|
(define_insn_and_split "lshr3"
|
2492 |
|
|
[(set (match_operand:DTI 0 "spu_reg_operand" "=r,r,r")
|
2493 |
|
|
(lshiftrt:DTI (match_operand:DTI 1 "spu_reg_operand" "r,r,r")
|
2494 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "r,O,P")))]
|
2495 |
|
|
""
|
2496 |
|
|
"@
|
2497 |
|
|
#
|
2498 |
|
|
rotqmbyi\t%0,%1,-%h2
|
2499 |
|
|
rotqmbii\t%0,%1,-%e2"
|
2500 |
|
|
"REG_P (operands[2]) || (!satisfies_constraint_O (operands[2]) && !satisfies_constraint_P (operands[2]))"
|
2501 |
|
|
[(set (match_dup:DTI 3)
|
2502 |
|
|
(lshiftrt:DTI (match_dup:DTI 1)
|
2503 |
|
|
(match_dup:SI 4)))
|
2504 |
|
|
(set (match_dup:DTI 0)
|
2505 |
|
|
(lshiftrt:DTI (match_dup:DTI 3)
|
2506 |
|
|
(match_dup:SI 5)))]
|
2507 |
|
|
{
|
2508 |
|
|
operands[3] = gen_reg_rtx (mode);
|
2509 |
|
|
if (GET_CODE (operands[2]) == CONST_INT)
|
2510 |
|
|
{
|
2511 |
|
|
HOST_WIDE_INT val = INTVAL(operands[2]);
|
2512 |
|
|
operands[4] = GEN_INT (val & 7);
|
2513 |
|
|
operands[5] = GEN_INT (val & -8);
|
2514 |
|
|
}
|
2515 |
|
|
else
|
2516 |
|
|
{
|
2517 |
|
|
rtx t0 = gen_reg_rtx (SImode);
|
2518 |
|
|
rtx t1 = gen_reg_rtx (SImode);
|
2519 |
|
|
emit_insn (gen_subsi3(t0, GEN_INT(0), operands[2]));
|
2520 |
|
|
emit_insn (gen_subsi3(t1, GEN_INT(7), operands[2]));
|
2521 |
|
|
operands[4] = gen_rtx_AND (SImode, gen_rtx_NEG (SImode, t0), GEN_INT (7));
|
2522 |
|
|
operands[5] = gen_rtx_AND (SImode, gen_rtx_NEG (SImode, gen_rtx_AND (SImode, t1, GEN_INT (-8))), GEN_INT (-8));
|
2523 |
|
|
}
|
2524 |
|
|
}
|
2525 |
|
|
[(set_attr "type" "*,shuf,shuf")])
|
2526 |
|
|
|
2527 |
|
|
(define_expand "shrqbybi_"
|
2528 |
|
|
[(set (match_operand:DTI 0 "spu_reg_operand" "=r,r")
|
2529 |
|
|
(lshiftrt:DTI (match_operand:DTI 1 "spu_reg_operand" "r,r")
|
2530 |
|
|
(and:SI (neg:SI (and:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2531 |
|
|
(const_int -8)))
|
2532 |
|
|
(const_int -8))))]
|
2533 |
|
|
""
|
2534 |
|
|
{
|
2535 |
|
|
if (GET_CODE (operands[2]) == CONST_INT)
|
2536 |
|
|
operands[2] = GEN_INT (7 - INTVAL (operands[2]));
|
2537 |
|
|
else
|
2538 |
|
|
{
|
2539 |
|
|
rtx t0 = gen_reg_rtx (SImode);
|
2540 |
|
|
emit_insn (gen_subsi3 (t0, GEN_INT (7), operands[2]));
|
2541 |
|
|
operands[2] = t0;
|
2542 |
|
|
}
|
2543 |
|
|
})
|
2544 |
|
|
|
2545 |
|
|
(define_insn "rotqmbybi_"
|
2546 |
|
|
[(set (match_operand:DTI 0 "spu_reg_operand" "=r,r")
|
2547 |
|
|
(lshiftrt:DTI (match_operand:DTI 1 "spu_reg_operand" "r,r")
|
2548 |
|
|
(and:SI (neg:SI (and:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2549 |
|
|
(const_int -8)))
|
2550 |
|
|
(const_int -8))))]
|
2551 |
|
|
""
|
2552 |
|
|
"@
|
2553 |
|
|
rotqmbybi\t%0,%1,%2
|
2554 |
|
|
rotqmbyi\t%0,%1,-%H2"
|
2555 |
|
|
[(set_attr "type" "shuf")])
|
2556 |
|
|
|
2557 |
|
|
(define_insn_and_split "shrqbi_"
|
2558 |
|
|
[(set (match_operand:DTI 0 "spu_reg_operand" "=r,r")
|
2559 |
|
|
(lshiftrt:DTI (match_operand:DTI 1 "spu_reg_operand" "r,r")
|
2560 |
|
|
(and:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2561 |
|
|
(const_int 7))))
|
2562 |
|
|
(clobber (match_scratch:SI 3 "=&r,X"))]
|
2563 |
|
|
""
|
2564 |
|
|
"#"
|
2565 |
|
|
"reload_completed"
|
2566 |
|
|
[(set (match_dup:DTI 0)
|
2567 |
|
|
(lshiftrt:DTI (match_dup:DTI 1)
|
2568 |
|
|
(and:SI (neg:SI (match_dup:SI 3)) (const_int 7))))]
|
2569 |
|
|
{
|
2570 |
|
|
if (GET_CODE (operands[2]) == CONST_INT)
|
2571 |
|
|
operands[3] = GEN_INT (-INTVAL (operands[2]));
|
2572 |
|
|
else
|
2573 |
|
|
emit_insn (gen_subsi3 (operands[3], GEN_INT (0), operands[2]));
|
2574 |
|
|
}
|
2575 |
|
|
[(set_attr "type" "shuf")])
|
2576 |
|
|
|
2577 |
|
|
(define_insn "rotqmbi_"
|
2578 |
|
|
[(set (match_operand:DTI 0 "spu_reg_operand" "=r,r")
|
2579 |
|
|
(lshiftrt:DTI (match_operand:DTI 1 "spu_reg_operand" "r,r")
|
2580 |
|
|
(and:SI (neg:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I"))
|
2581 |
|
|
(const_int 7))))]
|
2582 |
|
|
""
|
2583 |
|
|
"@
|
2584 |
|
|
rotqmbi\t%0,%1,%2
|
2585 |
|
|
rotqmbii\t%0,%1,-%E2"
|
2586 |
|
|
[(set_attr "type" "shuf")])
|
2587 |
|
|
|
2588 |
|
|
(define_expand "shrqby_"
|
2589 |
|
|
[(set (match_operand:DTI 0 "spu_reg_operand" "=r,r")
|
2590 |
|
|
(lshiftrt:DTI (match_operand:DTI 1 "spu_reg_operand" "r,r")
|
2591 |
|
|
(mult:SI (neg:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I"))
|
2592 |
|
|
(const_int 8))))]
|
2593 |
|
|
""
|
2594 |
|
|
{
|
2595 |
|
|
if (GET_CODE (operands[2]) == CONST_INT)
|
2596 |
|
|
operands[2] = GEN_INT (-INTVAL (operands[2]));
|
2597 |
|
|
else
|
2598 |
|
|
{
|
2599 |
|
|
rtx t0 = gen_reg_rtx (SImode);
|
2600 |
|
|
emit_insn (gen_subsi3 (t0, GEN_INT (0), operands[2]));
|
2601 |
|
|
operands[2] = t0;
|
2602 |
|
|
}
|
2603 |
|
|
})
|
2604 |
|
|
|
2605 |
|
|
(define_insn "rotqmby_"
|
2606 |
|
|
[(set (match_operand:DTI 0 "spu_reg_operand" "=r,r")
|
2607 |
|
|
(lshiftrt:DTI (match_operand:DTI 1 "spu_reg_operand" "r,r")
|
2608 |
|
|
(mult:SI (neg:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I"))
|
2609 |
|
|
(const_int 8))))]
|
2610 |
|
|
""
|
2611 |
|
|
"@
|
2612 |
|
|
rotqmby\t%0,%1,%2
|
2613 |
|
|
rotqmbyi\t%0,%1,-%F2"
|
2614 |
|
|
[(set_attr "type" "shuf")])
|
2615 |
|
|
|
2616 |
|
|
|
2617 |
|
|
;; ashr, vashr
|
2618 |
|
|
|
2619 |
|
|
(define_insn_and_split "ashr3"
|
2620 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
2621 |
|
|
(ashiftrt:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r")
|
2622 |
|
|
(match_operand:VHSI 2 "spu_nonmem_operand" "r,W")))
|
2623 |
|
|
(clobber (match_scratch:VHSI 3 "=&r,X"))]
|
2624 |
|
|
""
|
2625 |
|
|
"@
|
2626 |
|
|
#
|
2627 |
|
|
rotmai\t%0,%1,-%2"
|
2628 |
|
|
"reload_completed && GET_CODE (operands[2]) == REG"
|
2629 |
|
|
[(set (match_dup:VHSI 3)
|
2630 |
|
|
(neg:VHSI (match_dup:VHSI 2)))
|
2631 |
|
|
(set (match_dup:VHSI 0)
|
2632 |
|
|
(ashiftrt:VHSI (match_dup:VHSI 1)
|
2633 |
|
|
(neg:VHSI (match_dup:VHSI 3))))]
|
2634 |
|
|
""
|
2635 |
|
|
[(set_attr "type" "*,fx3")])
|
2636 |
|
|
|
2637 |
|
|
(define_insn "ashr3_imm"
|
2638 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r")
|
2639 |
|
|
(ashiftrt:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r")
|
2640 |
|
|
(match_operand:VHSI 2 "immediate_operand" "W")))]
|
2641 |
|
|
""
|
2642 |
|
|
"rotmai\t%0,%1,-%2"
|
2643 |
|
|
[(set_attr "type" "fx3")])
|
2644 |
|
|
|
2645 |
|
|
|
2646 |
|
|
(define_insn "rotma_"
|
2647 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
2648 |
|
|
(ashiftrt:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r")
|
2649 |
|
|
(neg:VHSI (match_operand:VHSI 2 "spu_nonmem_operand" "r,W"))))]
|
2650 |
|
|
""
|
2651 |
|
|
"@
|
2652 |
|
|
rotma\t%0,%1,%2
|
2653 |
|
|
rotmai\t%0,%1,-%2"
|
2654 |
|
|
[(set_attr "type" "fx3")])
|
2655 |
|
|
|
2656 |
|
|
(define_insn_and_split "ashrdi3"
|
2657 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r,r")
|
2658 |
|
|
(ashiftrt:DI (match_operand:DI 1 "spu_reg_operand" "r,r")
|
2659 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "r,I")))
|
2660 |
|
|
(clobber (match_scratch:TI 3 "=&r,&r"))
|
2661 |
|
|
(clobber (match_scratch:TI 4 "=&r,&r"))
|
2662 |
|
|
(clobber (match_scratch:SI 5 "=&r,&r"))]
|
2663 |
|
|
""
|
2664 |
|
|
"#"
|
2665 |
|
|
"reload_completed"
|
2666 |
|
|
[(set (match_dup:DI 0)
|
2667 |
|
|
(ashiftrt:DI (match_dup:DI 1)
|
2668 |
|
|
(match_dup:SI 2)))]
|
2669 |
|
|
{
|
2670 |
|
|
rtx op0 = gen_rtx_REG (TImode, REGNO (operands[0]));
|
2671 |
|
|
rtx op0v = gen_rtx_REG (V4SImode, REGNO (op0));
|
2672 |
|
|
rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
|
2673 |
|
|
rtx op1s = gen_rtx_REG (SImode, REGNO (op1));
|
2674 |
|
|
rtx op2 = operands[2];
|
2675 |
|
|
rtx op3 = operands[3];
|
2676 |
|
|
rtx op4 = operands[4];
|
2677 |
|
|
rtx op5 = operands[5];
|
2678 |
|
|
|
2679 |
|
|
if (GET_CODE (op2) == CONST_INT && INTVAL (op2) >= 63)
|
2680 |
|
|
{
|
2681 |
|
|
rtx op0s = gen_rtx_REG (SImode, REGNO (op0));
|
2682 |
|
|
emit_insn (gen_ashrsi3 (op0s, op1s, GEN_INT (32)));
|
2683 |
|
|
emit_insn (gen_spu_fsm (op0v, op0s));
|
2684 |
|
|
}
|
2685 |
|
|
else if (GET_CODE (op2) == CONST_INT && INTVAL (op2) >= 32)
|
2686 |
|
|
{
|
2687 |
|
|
rtx op0d = gen_rtx_REG (V2DImode, REGNO (op0));
|
2688 |
|
|
HOST_WIDE_INT val = INTVAL (op2);
|
2689 |
|
|
emit_insn (gen_lshrti3 (op0, op1, GEN_INT (32)));
|
2690 |
|
|
emit_insn (gen_spu_xswd (op0d, op0v));
|
2691 |
|
|
if (val > 32)
|
2692 |
|
|
emit_insn (gen_vashrv4si3 (op0v, op0v, spu_const (V4SImode, val - 32)));
|
2693 |
|
|
}
|
2694 |
|
|
else
|
2695 |
|
|
{
|
2696 |
|
|
rtx op3v = gen_rtx_REG (V4SImode, REGNO (op3));
|
2697 |
|
|
unsigned char arr[16] = {
|
2698 |
|
|
0xff, 0xff, 0xff, 0xff,
|
2699 |
|
|
0xff, 0xff, 0xff, 0xff,
|
2700 |
|
|
0x00, 0x00, 0x00, 0x00,
|
2701 |
|
|
0x00, 0x00, 0x00, 0x00
|
2702 |
|
|
};
|
2703 |
|
|
|
2704 |
|
|
emit_insn (gen_ashrsi3 (op5, op1s, GEN_INT (31)));
|
2705 |
|
|
emit_move_insn (op4, array_to_constant (TImode, arr));
|
2706 |
|
|
emit_insn (gen_spu_fsm (op3v, op5));
|
2707 |
|
|
|
2708 |
|
|
if (GET_CODE (operands[2]) == REG)
|
2709 |
|
|
{
|
2710 |
|
|
emit_insn (gen_selb (op4, op3, op1, op4));
|
2711 |
|
|
emit_insn (gen_negsi2 (op5, op2));
|
2712 |
|
|
emit_insn (gen_rotqbybi_ti (op0, op4, op5));
|
2713 |
|
|
emit_insn (gen_rotqbi_ti (op0, op0, op5));
|
2714 |
|
|
}
|
2715 |
|
|
else
|
2716 |
|
|
{
|
2717 |
|
|
HOST_WIDE_INT val = -INTVAL (op2);
|
2718 |
|
|
emit_insn (gen_selb (op0, op3, op1, op4));
|
2719 |
|
|
if ((val - 7) / 8)
|
2720 |
|
|
emit_insn (gen_rotqby_ti (op0, op0, GEN_INT ((val - 7) / 8)));
|
2721 |
|
|
if (val % 8)
|
2722 |
|
|
emit_insn (gen_rotqbi_ti (op0, op0, GEN_INT (val % 8)));
|
2723 |
|
|
}
|
2724 |
|
|
}
|
2725 |
|
|
DONE;
|
2726 |
|
|
})
|
2727 |
|
|
|
2728 |
|
|
|
2729 |
|
|
(define_insn_and_split "ashrti3"
|
2730 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2731 |
|
|
(ashiftrt:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2732 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "r,i")))]
|
2733 |
|
|
""
|
2734 |
|
|
"#"
|
2735 |
|
|
""
|
2736 |
|
|
[(set (match_dup:TI 0)
|
2737 |
|
|
(ashiftrt:TI (match_dup:TI 1)
|
2738 |
|
|
(match_dup:SI 2)))]
|
2739 |
|
|
{
|
2740 |
|
|
rtx sign_shift = gen_reg_rtx (SImode);
|
2741 |
|
|
rtx sign_mask = gen_reg_rtx (TImode);
|
2742 |
|
|
rtx sign_mask_v4si = gen_rtx_SUBREG (V4SImode, sign_mask, 0);
|
2743 |
|
|
rtx op1_v4si = spu_gen_subreg (V4SImode, operands[1]);
|
2744 |
|
|
rtx t = gen_reg_rtx (TImode);
|
2745 |
|
|
emit_insn (gen_subsi3 (sign_shift, GEN_INT (128), force_reg (SImode, operands[2])));
|
2746 |
|
|
emit_insn (gen_vashrv4si3 (sign_mask_v4si, op1_v4si, spu_const (V4SImode, 31)));
|
2747 |
|
|
emit_insn (gen_fsm_ti (sign_mask, sign_mask));
|
2748 |
|
|
emit_insn (gen_ashlti3 (sign_mask, sign_mask, sign_shift));
|
2749 |
|
|
emit_insn (gen_lshrti3 (t, operands[1], operands[2]));
|
2750 |
|
|
emit_insn (gen_iorti3 (operands[0], t, sign_mask));
|
2751 |
|
|
DONE;
|
2752 |
|
|
})
|
2753 |
|
|
|
2754 |
|
|
;; fsm is used after rotam to replicate the sign across the whole register.
|
2755 |
|
|
(define_insn "fsm_ti"
|
2756 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
2757 |
|
|
(unspec:TI [(match_operand:TI 1 "spu_reg_operand" "r")] UNSPEC_FSM))]
|
2758 |
|
|
""
|
2759 |
|
|
"fsm\t%0,%1"
|
2760 |
|
|
[(set_attr "type" "shuf")])
|
2761 |
|
|
|
2762 |
|
|
|
2763 |
|
|
;; vrotl, rotl
|
2764 |
|
|
|
2765 |
|
|
(define_insn "rotl3"
|
2766 |
|
|
[(set (match_operand:VHSI 0 "spu_reg_operand" "=r,r")
|
2767 |
|
|
(rotate:VHSI (match_operand:VHSI 1 "spu_reg_operand" "r,r")
|
2768 |
|
|
(match_operand:VHSI 2 "spu_nonmem_operand" "r,W")))]
|
2769 |
|
|
""
|
2770 |
|
|
"@
|
2771 |
|
|
rot\t%0,%1,%2
|
2772 |
|
|
roti\t%0,%1,%2"
|
2773 |
|
|
[(set_attr "type" "fx3")])
|
2774 |
|
|
|
2775 |
|
|
(define_insn "rotlti3"
|
2776 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=&r,r,r,r")
|
2777 |
|
|
(rotate:TI (match_operand:TI 1 "spu_reg_operand" "r,r,r,r")
|
2778 |
|
|
(match_operand:SI 2 "spu_nonmem_operand" "r,O,P,I")))]
|
2779 |
|
|
""
|
2780 |
|
|
"@
|
2781 |
|
|
rotqbybi\t%0,%1,%2\;rotqbi\t%0,%0,%2
|
2782 |
|
|
rotqbyi\t%0,%1,%h2
|
2783 |
|
|
rotqbii\t%0,%1,%e2
|
2784 |
|
|
rotqbyi\t%0,%1,%h2\;rotqbii\t%0,%0,%e2"
|
2785 |
|
|
[(set_attr "length" "8,4,4,8")
|
2786 |
|
|
(set_attr "type" "multi1,shuf,shuf,multi1")])
|
2787 |
|
|
|
2788 |
|
|
(define_insn "rotqbybi_ti"
|
2789 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2790 |
|
|
(rotate:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2791 |
|
|
(and:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2792 |
|
|
(const_int -8))))]
|
2793 |
|
|
""
|
2794 |
|
|
"@
|
2795 |
|
|
rotqbybi\t%0,%1,%2
|
2796 |
|
|
rotqbyi\t%0,%1,%h2"
|
2797 |
|
|
[(set_attr "type" "shuf,shuf")])
|
2798 |
|
|
|
2799 |
|
|
(define_insn "rotqby_ti"
|
2800 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2801 |
|
|
(rotate:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2802 |
|
|
(mult:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2803 |
|
|
(const_int 8))))]
|
2804 |
|
|
""
|
2805 |
|
|
"@
|
2806 |
|
|
rotqby\t%0,%1,%2
|
2807 |
|
|
rotqbyi\t%0,%1,%f2"
|
2808 |
|
|
[(set_attr "type" "shuf,shuf")])
|
2809 |
|
|
|
2810 |
|
|
(define_insn "rotqbi_ti"
|
2811 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r,r")
|
2812 |
|
|
(rotate:TI (match_operand:TI 1 "spu_reg_operand" "r,r")
|
2813 |
|
|
(and:SI (match_operand:SI 2 "spu_nonmem_operand" "r,I")
|
2814 |
|
|
(const_int 7))))]
|
2815 |
|
|
""
|
2816 |
|
|
"@
|
2817 |
|
|
rotqbi\t%0,%1,%2
|
2818 |
|
|
rotqbii\t%0,%1,%e2"
|
2819 |
|
|
[(set_attr "type" "shuf,shuf")])
|
2820 |
|
|
|
2821 |
|
|
|
2822 |
|
|
;; struct extract/insert
|
2823 |
|
|
;; We handle mem's because GCC will generate invalid SUBREG's
|
2824 |
|
|
;; and inefficient code.
|
2825 |
|
|
|
2826 |
|
|
(define_expand "extv"
|
2827 |
|
|
[(set (match_operand:TI 0 "register_operand" "")
|
2828 |
|
|
(sign_extract:TI (match_operand 1 "nonimmediate_operand" "")
|
2829 |
|
|
(match_operand:SI 2 "const_int_operand" "")
|
2830 |
|
|
(match_operand:SI 3 "const_int_operand" "")))]
|
2831 |
|
|
""
|
2832 |
|
|
{
|
2833 |
|
|
spu_expand_extv (operands, 0);
|
2834 |
|
|
DONE;
|
2835 |
|
|
})
|
2836 |
|
|
|
2837 |
|
|
(define_expand "extzv"
|
2838 |
|
|
[(set (match_operand:TI 0 "register_operand" "")
|
2839 |
|
|
(zero_extract:TI (match_operand 1 "nonimmediate_operand" "")
|
2840 |
|
|
(match_operand:SI 2 "const_int_operand" "")
|
2841 |
|
|
(match_operand:SI 3 "const_int_operand" "")))]
|
2842 |
|
|
""
|
2843 |
|
|
{
|
2844 |
|
|
spu_expand_extv (operands, 1);
|
2845 |
|
|
DONE;
|
2846 |
|
|
})
|
2847 |
|
|
|
2848 |
|
|
(define_expand "insv"
|
2849 |
|
|
[(set (zero_extract (match_operand 0 "nonimmediate_operand" "")
|
2850 |
|
|
(match_operand:SI 1 "const_int_operand" "")
|
2851 |
|
|
(match_operand:SI 2 "const_int_operand" ""))
|
2852 |
|
|
(match_operand 3 "nonmemory_operand" ""))]
|
2853 |
|
|
""
|
2854 |
|
|
{ spu_expand_insv(operands); DONE; })
|
2855 |
|
|
|
2856 |
|
|
;; Simplify a number of patterns that get generated by extv, extzv,
|
2857 |
|
|
;; insv, and loads.
|
2858 |
|
|
(define_insn_and_split "trunc_shr_ti"
|
2859 |
|
|
[(set (match_operand:QHSI 0 "spu_reg_operand" "=r")
|
2860 |
|
|
(truncate:QHSI (match_operator:TI 2 "shiftrt_operator" [(match_operand:TI 1 "spu_reg_operand" "0")
|
2861 |
|
|
(const_int 96)])))]
|
2862 |
|
|
""
|
2863 |
|
|
"#"
|
2864 |
|
|
"reload_completed"
|
2865 |
|
|
[(const_int 0)]
|
2866 |
|
|
{
|
2867 |
|
|
spu_split_convert (operands);
|
2868 |
|
|
DONE;
|
2869 |
|
|
}
|
2870 |
|
|
[(set_attr "type" "convert")
|
2871 |
|
|
(set_attr "length" "0")])
|
2872 |
|
|
|
2873 |
|
|
(define_insn_and_split "trunc_shr_tidi"
|
2874 |
|
|
[(set (match_operand:DI 0 "spu_reg_operand" "=r")
|
2875 |
|
|
(truncate:DI (match_operator:TI 2 "shiftrt_operator" [(match_operand:TI 1 "spu_reg_operand" "0")
|
2876 |
|
|
(const_int 64)])))]
|
2877 |
|
|
""
|
2878 |
|
|
"#"
|
2879 |
|
|
"reload_completed"
|
2880 |
|
|
[(const_int 0)]
|
2881 |
|
|
{
|
2882 |
|
|
spu_split_convert (operands);
|
2883 |
|
|
DONE;
|
2884 |
|
|
}
|
2885 |
|
|
[(set_attr "type" "convert")
|
2886 |
|
|
(set_attr "length" "0")])
|
2887 |
|
|
|
2888 |
|
|
(define_insn_and_split "shl_ext_ti"
|
2889 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
2890 |
|
|
(ashift:TI (match_operator:TI 2 "extend_operator" [(match_operand:QHSI 1 "spu_reg_operand" "0")])
|
2891 |
|
|
(const_int 96)))]
|
2892 |
|
|
""
|
2893 |
|
|
"#"
|
2894 |
|
|
"reload_completed"
|
2895 |
|
|
[(const_int 0)]
|
2896 |
|
|
{
|
2897 |
|
|
spu_split_convert (operands);
|
2898 |
|
|
DONE;
|
2899 |
|
|
}
|
2900 |
|
|
[(set_attr "type" "convert")
|
2901 |
|
|
(set_attr "length" "0")])
|
2902 |
|
|
|
2903 |
|
|
(define_insn_and_split "shl_ext_diti"
|
2904 |
|
|
[(set (match_operand:TI 0 "spu_reg_operand" "=r")
|
2905 |
|
|
(ashift:TI (match_operator:TI 2 "extend_operator" [(match_operand:DI 1 "spu_reg_operand" "0")])
|
2906 |
|
|
(const_int 64)))]
|
2907 |
|
|
""
|
2908 |
|
|
"#"
|
2909 |
|
|
"reload_completed"
|
2910 |
|
|
[(const_int 0)]
|
2911 |
|
|
{
|
2912 |
|
|
spu_split_convert (operands);
|
2913 |
|
|
DONE;
|
2914 |
|
|
}
|
2915 |
|
|
[(set_attr "type" "convert")
|
2916 |
|
|
(set_attr "length" "0")])
|
2917 |
|
|
|
2918 |
|
|
(define_insn "sext_trunc_lshr_tiqisi"
|
2919 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
2920 |
|
|
(sign_extend:SI (truncate:QI (match_operator:TI 2 "shiftrt_operator" [(match_operand:TI 1 "spu_reg_operand" "r")
|
2921 |
|
|
(const_int 120)]))))]
|
2922 |
|
|
""
|
2923 |
|
|
"rotmai\t%0,%1,-24"
|
2924 |
|
|
[(set_attr "type" "fx3")])
|
2925 |
|
|
|
2926 |
|
|
(define_insn "zext_trunc_lshr_tiqisi"
|
2927 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
2928 |
|
|
(zero_extend:SI (truncate:QI (match_operator:TI 2 "shiftrt_operator" [(match_operand:TI 1 "spu_reg_operand" "r")
|
2929 |
|
|
(const_int 120)]))))]
|
2930 |
|
|
""
|
2931 |
|
|
"rotmi\t%0,%1,-24"
|
2932 |
|
|
[(set_attr "type" "fx3")])
|
2933 |
|
|
|
2934 |
|
|
(define_insn "sext_trunc_lshr_tihisi"
|
2935 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
2936 |
|
|
(sign_extend:SI (truncate:HI (match_operator:TI 2 "shiftrt_operator" [(match_operand:TI 1 "spu_reg_operand" "r")
|
2937 |
|
|
(const_int 112)]))))]
|
2938 |
|
|
""
|
2939 |
|
|
"rotmai\t%0,%1,-16"
|
2940 |
|
|
[(set_attr "type" "fx3")])
|
2941 |
|
|
|
2942 |
|
|
(define_insn "zext_trunc_lshr_tihisi"
|
2943 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
2944 |
|
|
(zero_extend:SI (truncate:HI (match_operator:TI 2 "shiftrt_operator" [(match_operand:TI 1 "spu_reg_operand" "r")
|
2945 |
|
|
(const_int 112)]))))]
|
2946 |
|
|
""
|
2947 |
|
|
"rotmi\t%0,%1,-16"
|
2948 |
|
|
[(set_attr "type" "fx3")])
|
2949 |
|
|
|
2950 |
|
|
|
2951 |
|
|
;; String/block move insn.
|
2952 |
|
|
;; Argument 0 is the destination
|
2953 |
|
|
;; Argument 1 is the source
|
2954 |
|
|
;; Argument 2 is the length
|
2955 |
|
|
;; Argument 3 is the alignment
|
2956 |
|
|
|
2957 |
|
|
(define_expand "movstrsi"
|
2958 |
|
|
[(parallel [(set (match_operand:BLK 0 "" "")
|
2959 |
|
|
(match_operand:BLK 1 "" ""))
|
2960 |
|
|
(use (match_operand:SI 2 "" ""))
|
2961 |
|
|
(use (match_operand:SI 3 "" ""))])]
|
2962 |
|
|
""
|
2963 |
|
|
"
|
2964 |
|
|
{
|
2965 |
|
|
if (spu_expand_block_move (operands))
|
2966 |
|
|
DONE;
|
2967 |
|
|
else
|
2968 |
|
|
FAIL;
|
2969 |
|
|
}")
|
2970 |
|
|
|
2971 |
|
|
|
2972 |
|
|
;; jump
|
2973 |
|
|
|
2974 |
|
|
(define_insn "indirect_jump"
|
2975 |
|
|
[(set (pc) (match_operand:SI 0 "spu_reg_operand" "r"))]
|
2976 |
|
|
""
|
2977 |
|
|
"bi\t%0"
|
2978 |
|
|
[(set_attr "type" "br")])
|
2979 |
|
|
|
2980 |
|
|
(define_insn "jump"
|
2981 |
|
|
[(set (pc)
|
2982 |
|
|
(label_ref (match_operand 0 "" "")))]
|
2983 |
|
|
""
|
2984 |
|
|
"br\t%0"
|
2985 |
|
|
[(set_attr "type" "br")])
|
2986 |
|
|
|
2987 |
|
|
|
2988 |
|
|
;; return
|
2989 |
|
|
|
2990 |
|
|
;; This will be used for leaf functions, that don't save any regs and
|
2991 |
|
|
;; don't have locals on stack, maybe... that is for functions that
|
2992 |
|
|
;; don't change $sp and don't need to save $lr.
|
2993 |
|
|
(define_expand "return"
|
2994 |
|
|
[(return)]
|
2995 |
|
|
"direct_return()"
|
2996 |
|
|
"")
|
2997 |
|
|
|
2998 |
|
|
;; used in spu_expand_epilogue to generate return from a function and
|
2999 |
|
|
;; explicitly set use of $lr.
|
3000 |
|
|
|
3001 |
|
|
(define_insn "_return"
|
3002 |
|
|
[(return)]
|
3003 |
|
|
""
|
3004 |
|
|
"bi\t$lr"
|
3005 |
|
|
[(set_attr "type" "br")])
|
3006 |
|
|
|
3007 |
|
|
|
3008 |
|
|
|
3009 |
|
|
;; ceq
|
3010 |
|
|
|
3011 |
|
|
(define_insn "ceq_"
|
3012 |
|
|
[(set (match_operand:VQHSI 0 "spu_reg_operand" "=r,r")
|
3013 |
|
|
(eq:VQHSI (match_operand:VQHSI 1 "spu_reg_operand" "r,r")
|
3014 |
|
|
(match_operand:VQHSI 2 "spu_arith_operand" "r,B")))]
|
3015 |
|
|
""
|
3016 |
|
|
"@
|
3017 |
|
|
ceq\t%0,%1,%2
|
3018 |
|
|
ceqi\t%0,%1,%2")
|
3019 |
|
|
|
3020 |
|
|
(define_insn_and_split "ceq_di"
|
3021 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3022 |
|
|
(eq:SI (match_operand:DI 1 "spu_reg_operand" "r")
|
3023 |
|
|
(match_operand:DI 2 "spu_reg_operand" "r")))]
|
3024 |
|
|
""
|
3025 |
|
|
"#"
|
3026 |
|
|
"reload_completed"
|
3027 |
|
|
[(set (match_dup:SI 0)
|
3028 |
|
|
(eq:SI (match_dup:DI 1)
|
3029 |
|
|
(match_dup:DI 2)))]
|
3030 |
|
|
{
|
3031 |
|
|
rtx op0 = gen_rtx_REG (V4SImode, REGNO (operands[0]));
|
3032 |
|
|
rtx op1 = gen_rtx_REG (V4SImode, REGNO (operands[1]));
|
3033 |
|
|
rtx op2 = gen_rtx_REG (V4SImode, REGNO (operands[2]));
|
3034 |
|
|
emit_insn (gen_ceq_v4si (op0, op1, op2));
|
3035 |
|
|
emit_insn (gen_spu_gb (op0, op0));
|
3036 |
|
|
emit_insn (gen_cgt_si (operands[0], operands[0], GEN_INT (11)));
|
3037 |
|
|
DONE;
|
3038 |
|
|
})
|
3039 |
|
|
|
3040 |
|
|
|
3041 |
|
|
;; We provide the TI compares for completeness and because some parts of
|
3042 |
|
|
;; gcc/libgcc use them, even though user code might never see it.
|
3043 |
|
|
(define_insn "ceq_ti"
|
3044 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3045 |
|
|
(eq:SI (match_operand:TI 1 "spu_reg_operand" "r")
|
3046 |
|
|
(match_operand:TI 2 "spu_reg_operand" "r")))]
|
3047 |
|
|
""
|
3048 |
|
|
"ceq\t%0,%1,%2\;gb\t%0,%0\;ceqi\t%0,%0,15"
|
3049 |
|
|
[(set_attr "type" "multi0")
|
3050 |
|
|
(set_attr "length" "12")])
|
3051 |
|
|
|
3052 |
|
|
(define_insn "ceq_"
|
3053 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3054 |
|
|
(eq: (match_operand:VSF 1 "spu_reg_operand" "r")
|
3055 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")))]
|
3056 |
|
|
""
|
3057 |
|
|
"fceq\t%0,%1,%2")
|
3058 |
|
|
|
3059 |
|
|
(define_insn "cmeq_"
|
3060 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3061 |
|
|
(eq: (abs:VSF (match_operand:VSF 1 "spu_reg_operand" "r"))
|
3062 |
|
|
(abs:VSF (match_operand:VSF 2 "spu_reg_operand" "r"))))]
|
3063 |
|
|
""
|
3064 |
|
|
"fcmeq\t%0,%1,%2")
|
3065 |
|
|
|
3066 |
|
|
;; These implementations will ignore checking of NaN or INF if
|
3067 |
|
|
;; compiled with option -ffinite-math-only.
|
3068 |
|
|
(define_expand "ceq_df"
|
3069 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3070 |
|
|
(eq:SI (match_operand:DF 1 "spu_reg_operand" "r")
|
3071 |
|
|
(match_operand:DF 2 "const_zero_operand" "i")))]
|
3072 |
|
|
""
|
3073 |
|
|
{
|
3074 |
|
|
if (spu_arch == PROCESSOR_CELL)
|
3075 |
|
|
{
|
3076 |
|
|
rtx ra = gen_reg_rtx (V4SImode);
|
3077 |
|
|
rtx rb = gen_reg_rtx (V4SImode);
|
3078 |
|
|
rtx temp = gen_reg_rtx (TImode);
|
3079 |
|
|
rtx temp_v4si = spu_gen_subreg (V4SImode, temp);
|
3080 |
|
|
rtx temp2 = gen_reg_rtx (V4SImode);
|
3081 |
|
|
rtx biteq = gen_reg_rtx (V4SImode);
|
3082 |
|
|
rtx ahi_inf = gen_reg_rtx (V4SImode);
|
3083 |
|
|
rtx a_nan = gen_reg_rtx (V4SImode);
|
3084 |
|
|
rtx a_abs = gen_reg_rtx (V4SImode);
|
3085 |
|
|
rtx b_abs = gen_reg_rtx (V4SImode);
|
3086 |
|
|
rtx iszero = gen_reg_rtx (V4SImode);
|
3087 |
|
|
rtx sign_mask = gen_reg_rtx (V4SImode);
|
3088 |
|
|
rtx nan_mask = gen_reg_rtx (V4SImode);
|
3089 |
|
|
rtx hihi_promote = gen_reg_rtx (TImode);
|
3090 |
|
|
rtx pat = spu_const_from_ints (V4SImode, 0x7FFFFFFF, 0xFFFFFFFF,
|
3091 |
|
|
0x7FFFFFFF, 0xFFFFFFFF);
|
3092 |
|
|
|
3093 |
|
|
emit_move_insn (sign_mask, pat);
|
3094 |
|
|
pat = spu_const_from_ints (V4SImode, 0x7FF00000, 0x0,
|
3095 |
|
|
0x7FF00000, 0x0);
|
3096 |
|
|
emit_move_insn (nan_mask, pat);
|
3097 |
|
|
pat = spu_const_from_ints (TImode, 0x00010203, 0x10111213,
|
3098 |
|
|
0x08090A0B, 0x18191A1B);
|
3099 |
|
|
emit_move_insn (hihi_promote, pat);
|
3100 |
|
|
|
3101 |
|
|
emit_insn (gen_spu_convert (ra, operands[1]));
|
3102 |
|
|
emit_insn (gen_spu_convert (rb, operands[2]));
|
3103 |
|
|
emit_insn (gen_ceq_v4si (biteq, ra, rb));
|
3104 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, biteq),
|
3105 |
|
|
GEN_INT (4 * 8)));
|
3106 |
|
|
emit_insn (gen_andv4si3 (biteq, biteq, temp_v4si));
|
3107 |
|
|
|
3108 |
|
|
emit_insn (gen_andv4si3 (a_abs, ra, sign_mask));
|
3109 |
|
|
emit_insn (gen_andv4si3 (b_abs, rb, sign_mask));
|
3110 |
|
|
if (!flag_finite_math_only)
|
3111 |
|
|
{
|
3112 |
|
|
emit_insn (gen_clgt_v4si (a_nan, a_abs, nan_mask));
|
3113 |
|
|
emit_insn (gen_ceq_v4si (ahi_inf, a_abs, nan_mask));
|
3114 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, a_nan),
|
3115 |
|
|
GEN_INT (4 * 8)));
|
3116 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, ahi_inf));
|
3117 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, temp2));
|
3118 |
|
|
}
|
3119 |
|
|
emit_insn (gen_iorv4si3 (temp2, a_abs, b_abs));
|
3120 |
|
|
emit_insn (gen_ceq_v4si (iszero, temp2, CONST0_RTX (V4SImode)));
|
3121 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, iszero),
|
3122 |
|
|
GEN_INT (4 * 8)));
|
3123 |
|
|
emit_insn (gen_andv4si3 (iszero, iszero, temp_v4si));
|
3124 |
|
|
emit_insn (gen_iorv4si3 (temp2, biteq, iszero));
|
3125 |
|
|
if (!flag_finite_math_only)
|
3126 |
|
|
{
|
3127 |
|
|
emit_insn (gen_andc_v4si (temp2, temp2, a_nan));
|
3128 |
|
|
}
|
3129 |
|
|
emit_insn (gen_shufb (operands[0], temp2, temp2, hihi_promote));
|
3130 |
|
|
DONE;
|
3131 |
|
|
}
|
3132 |
|
|
})
|
3133 |
|
|
|
3134 |
|
|
(define_insn "ceq__celledp"
|
3135 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3136 |
|
|
(eq: (match_operand:VDF 1 "spu_reg_operand" "r")
|
3137 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")))]
|
3138 |
|
|
"spu_arch == PROCESSOR_CELLEDP"
|
3139 |
|
|
"dfceq\t%0,%1,%2"
|
3140 |
|
|
[(set_attr "type" "fpd")])
|
3141 |
|
|
|
3142 |
|
|
(define_insn "cmeq__celledp"
|
3143 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3144 |
|
|
(eq: (abs:VDF (match_operand:VDF 1 "spu_reg_operand" "r"))
|
3145 |
|
|
(abs:VDF (match_operand:VDF 2 "spu_reg_operand" "r"))))]
|
3146 |
|
|
"spu_arch == PROCESSOR_CELLEDP"
|
3147 |
|
|
"dfcmeq\t%0,%1,%2"
|
3148 |
|
|
[(set_attr "type" "fpd")])
|
3149 |
|
|
|
3150 |
|
|
(define_expand "ceq_v2df"
|
3151 |
|
|
[(set (match_operand:V2DI 0 "spu_reg_operand" "=r")
|
3152 |
|
|
(eq:V2DI (match_operand:V2DF 1 "spu_reg_operand" "r")
|
3153 |
|
|
(match_operand:V2DF 2 "spu_reg_operand" "r")))]
|
3154 |
|
|
""
|
3155 |
|
|
{
|
3156 |
|
|
if (spu_arch == PROCESSOR_CELL)
|
3157 |
|
|
{
|
3158 |
|
|
rtx ra = spu_gen_subreg (V4SImode, operands[1]);
|
3159 |
|
|
rtx rb = spu_gen_subreg (V4SImode, operands[2]);
|
3160 |
|
|
rtx temp = gen_reg_rtx (TImode);
|
3161 |
|
|
rtx temp_v4si = spu_gen_subreg (V4SImode, temp);
|
3162 |
|
|
rtx temp2 = gen_reg_rtx (V4SImode);
|
3163 |
|
|
rtx biteq = gen_reg_rtx (V4SImode);
|
3164 |
|
|
rtx ahi_inf = gen_reg_rtx (V4SImode);
|
3165 |
|
|
rtx a_nan = gen_reg_rtx (V4SImode);
|
3166 |
|
|
rtx a_abs = gen_reg_rtx (V4SImode);
|
3167 |
|
|
rtx b_abs = gen_reg_rtx (V4SImode);
|
3168 |
|
|
rtx iszero = gen_reg_rtx (V4SImode);
|
3169 |
|
|
rtx pat = spu_const_from_ints (V4SImode, 0x7FFFFFFF, 0xFFFFFFFF,
|
3170 |
|
|
0x7FFFFFFF, 0xFFFFFFFF);
|
3171 |
|
|
rtx sign_mask = gen_reg_rtx (V4SImode);
|
3172 |
|
|
rtx nan_mask = gen_reg_rtx (V4SImode);
|
3173 |
|
|
rtx hihi_promote = gen_reg_rtx (TImode);
|
3174 |
|
|
|
3175 |
|
|
emit_move_insn (sign_mask, pat);
|
3176 |
|
|
pat = spu_const_from_ints (V4SImode, 0x7FF00000, 0x0,
|
3177 |
|
|
0x7FF00000, 0x0);
|
3178 |
|
|
emit_move_insn (nan_mask, pat);
|
3179 |
|
|
pat = spu_const_from_ints (TImode, 0x00010203, 0x10111213,
|
3180 |
|
|
0x08090A0B, 0x18191A1B);
|
3181 |
|
|
emit_move_insn (hihi_promote, pat);
|
3182 |
|
|
|
3183 |
|
|
emit_insn (gen_ceq_v4si (biteq, ra, rb));
|
3184 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, biteq),
|
3185 |
|
|
GEN_INT (4 * 8)));
|
3186 |
|
|
emit_insn (gen_andv4si3 (biteq, biteq, temp_v4si));
|
3187 |
|
|
emit_insn (gen_andv4si3 (a_abs, ra, sign_mask));
|
3188 |
|
|
emit_insn (gen_andv4si3 (b_abs, rb, sign_mask));
|
3189 |
|
|
emit_insn (gen_clgt_v4si (a_nan, a_abs, nan_mask));
|
3190 |
|
|
emit_insn (gen_ceq_v4si (ahi_inf, a_abs, nan_mask));
|
3191 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, a_nan),
|
3192 |
|
|
GEN_INT (4 * 8)));
|
3193 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, ahi_inf));
|
3194 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, temp2));
|
3195 |
|
|
emit_insn (gen_iorv4si3 (temp2, a_abs, b_abs));
|
3196 |
|
|
emit_insn (gen_ceq_v4si (iszero, temp2, CONST0_RTX (V4SImode)));
|
3197 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, iszero),
|
3198 |
|
|
GEN_INT (4 * 8)));
|
3199 |
|
|
emit_insn (gen_andv4si3 (iszero, iszero, temp_v4si));
|
3200 |
|
|
emit_insn (gen_iorv4si3 (temp2, biteq, iszero));
|
3201 |
|
|
emit_insn (gen_andc_v4si (temp2, temp2, a_nan));
|
3202 |
|
|
emit_insn (gen_shufb (operands[0], temp2, temp2, hihi_promote));
|
3203 |
|
|
DONE;
|
3204 |
|
|
}
|
3205 |
|
|
})
|
3206 |
|
|
|
3207 |
|
|
(define_expand "cmeq_v2df"
|
3208 |
|
|
[(set (match_operand:V2DI 0 "spu_reg_operand" "=r")
|
3209 |
|
|
(eq:V2DI (abs:V2DF (match_operand:V2DF 1 "spu_reg_operand" "r"))
|
3210 |
|
|
(abs:V2DF (match_operand:V2DF 2 "spu_reg_operand" "r"))))]
|
3211 |
|
|
""
|
3212 |
|
|
{
|
3213 |
|
|
if (spu_arch == PROCESSOR_CELL)
|
3214 |
|
|
{
|
3215 |
|
|
rtx ra = spu_gen_subreg (V4SImode, operands[1]);
|
3216 |
|
|
rtx rb = spu_gen_subreg (V4SImode, operands[2]);
|
3217 |
|
|
rtx temp = gen_reg_rtx (TImode);
|
3218 |
|
|
rtx temp_v4si = spu_gen_subreg (V4SImode, temp);
|
3219 |
|
|
rtx temp2 = gen_reg_rtx (V4SImode);
|
3220 |
|
|
rtx biteq = gen_reg_rtx (V4SImode);
|
3221 |
|
|
rtx ahi_inf = gen_reg_rtx (V4SImode);
|
3222 |
|
|
rtx a_nan = gen_reg_rtx (V4SImode);
|
3223 |
|
|
rtx a_abs = gen_reg_rtx (V4SImode);
|
3224 |
|
|
rtx b_abs = gen_reg_rtx (V4SImode);
|
3225 |
|
|
|
3226 |
|
|
rtx pat = spu_const_from_ints (V4SImode, 0x7FFFFFFF, 0xFFFFFFFF,
|
3227 |
|
|
0x7FFFFFFF, 0xFFFFFFFF);
|
3228 |
|
|
rtx sign_mask = gen_reg_rtx (V4SImode);
|
3229 |
|
|
rtx nan_mask = gen_reg_rtx (V4SImode);
|
3230 |
|
|
rtx hihi_promote = gen_reg_rtx (TImode);
|
3231 |
|
|
|
3232 |
|
|
emit_move_insn (sign_mask, pat);
|
3233 |
|
|
|
3234 |
|
|
pat = spu_const_from_ints (V4SImode, 0x7FF00000, 0x0,
|
3235 |
|
|
0x7FF00000, 0x0);
|
3236 |
|
|
emit_move_insn (nan_mask, pat);
|
3237 |
|
|
pat = spu_const_from_ints (TImode, 0x00010203, 0x10111213,
|
3238 |
|
|
0x08090A0B, 0x18191A1B);
|
3239 |
|
|
emit_move_insn (hihi_promote, pat);
|
3240 |
|
|
|
3241 |
|
|
emit_insn (gen_andv4si3 (a_abs, ra, sign_mask));
|
3242 |
|
|
emit_insn (gen_andv4si3 (b_abs, rb, sign_mask));
|
3243 |
|
|
emit_insn (gen_ceq_v4si (biteq, a_abs, b_abs));
|
3244 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, biteq),
|
3245 |
|
|
GEN_INT (4 * 8)));
|
3246 |
|
|
emit_insn (gen_andv4si3 (biteq, biteq, temp_v4si));
|
3247 |
|
|
emit_insn (gen_clgt_v4si (a_nan, a_abs, nan_mask));
|
3248 |
|
|
emit_insn (gen_ceq_v4si (ahi_inf, a_abs, nan_mask));
|
3249 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, a_nan),
|
3250 |
|
|
GEN_INT (4 * 8)));
|
3251 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, ahi_inf));
|
3252 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, temp2));
|
3253 |
|
|
emit_insn (gen_andc_v4si (temp2, biteq, a_nan));
|
3254 |
|
|
emit_insn (gen_shufb (operands[0], temp2, temp2, hihi_promote));
|
3255 |
|
|
DONE;
|
3256 |
|
|
}
|
3257 |
|
|
})
|
3258 |
|
|
|
3259 |
|
|
|
3260 |
|
|
;; cgt
|
3261 |
|
|
|
3262 |
|
|
(define_insn "cgt_"
|
3263 |
|
|
[(set (match_operand:VQHSI 0 "spu_reg_operand" "=r,r")
|
3264 |
|
|
(gt:VQHSI (match_operand:VQHSI 1 "spu_reg_operand" "r,r")
|
3265 |
|
|
(match_operand:VQHSI 2 "spu_arith_operand" "r,B")))]
|
3266 |
|
|
""
|
3267 |
|
|
"@
|
3268 |
|
|
cgt\t%0,%1,%2
|
3269 |
|
|
cgti\t%0,%1,%2")
|
3270 |
|
|
|
3271 |
|
|
(define_insn "cgt_di_m1"
|
3272 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3273 |
|
|
(gt:SI (match_operand:DI 1 "spu_reg_operand" "r")
|
3274 |
|
|
(const_int -1)))]
|
3275 |
|
|
""
|
3276 |
|
|
"cgti\t%0,%1,-1")
|
3277 |
|
|
|
3278 |
|
|
(define_insn_and_split "cgt_di"
|
3279 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3280 |
|
|
(gt:SI (match_operand:DI 1 "spu_reg_operand" "r")
|
3281 |
|
|
(match_operand:DI 2 "spu_reg_operand" "r")))
|
3282 |
|
|
(clobber (match_scratch:V4SI 3 "=&r"))
|
3283 |
|
|
(clobber (match_scratch:V4SI 4 "=&r"))
|
3284 |
|
|
(clobber (match_scratch:V4SI 5 "=&r"))]
|
3285 |
|
|
""
|
3286 |
|
|
"#"
|
3287 |
|
|
"reload_completed"
|
3288 |
|
|
[(set (match_dup:SI 0)
|
3289 |
|
|
(gt:SI (match_dup:DI 1)
|
3290 |
|
|
(match_dup:DI 2)))]
|
3291 |
|
|
{
|
3292 |
|
|
rtx op0 = gen_rtx_REG (V4SImode, REGNO (operands[0]));
|
3293 |
|
|
rtx op1 = gen_rtx_REG (V4SImode, REGNO (operands[1]));
|
3294 |
|
|
rtx op2 = gen_rtx_REG (V4SImode, REGNO (operands[2]));
|
3295 |
|
|
rtx op3 = operands[3];
|
3296 |
|
|
rtx op4 = operands[4];
|
3297 |
|
|
rtx op5 = operands[5];
|
3298 |
|
|
rtx op3d = gen_rtx_REG (V2DImode, REGNO (operands[3]));
|
3299 |
|
|
emit_insn (gen_clgt_v4si (op3, op1, op2));
|
3300 |
|
|
emit_insn (gen_ceq_v4si (op4, op1, op2));
|
3301 |
|
|
emit_insn (gen_cgt_v4si (op5, op1, op2));
|
3302 |
|
|
emit_insn (gen_spu_xswd (op3d, op3));
|
3303 |
|
|
emit_insn (gen_selb (op0, op5, op3, op4));
|
3304 |
|
|
DONE;
|
3305 |
|
|
})
|
3306 |
|
|
|
3307 |
|
|
(define_insn "cgt_ti_m1"
|
3308 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3309 |
|
|
(gt:SI (match_operand:TI 1 "spu_reg_operand" "r")
|
3310 |
|
|
(const_int -1)))]
|
3311 |
|
|
""
|
3312 |
|
|
"cgti\t%0,%1,-1")
|
3313 |
|
|
|
3314 |
|
|
(define_insn "cgt_ti"
|
3315 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3316 |
|
|
(gt:SI (match_operand:TI 1 "spu_reg_operand" "r")
|
3317 |
|
|
(match_operand:TI 2 "spu_reg_operand" "r")))
|
3318 |
|
|
(clobber (match_scratch:V4SI 3 "=&r"))
|
3319 |
|
|
(clobber (match_scratch:V4SI 4 "=&r"))
|
3320 |
|
|
(clobber (match_scratch:V4SI 5 "=&r"))]
|
3321 |
|
|
""
|
3322 |
|
|
"clgt\t%4,%1,%2\;\
|
3323 |
|
|
ceq\t%3,%1,%2\;\
|
3324 |
|
|
cgt\t%5,%1,%2\;\
|
3325 |
|
|
shlqbyi\t%0,%4,4\;\
|
3326 |
|
|
selb\t%0,%4,%0,%3\;\
|
3327 |
|
|
shlqbyi\t%0,%0,4\;\
|
3328 |
|
|
selb\t%0,%4,%0,%3\;\
|
3329 |
|
|
shlqbyi\t%0,%0,4\;\
|
3330 |
|
|
selb\t%0,%5,%0,%3"
|
3331 |
|
|
[(set_attr "type" "multi0")
|
3332 |
|
|
(set_attr "length" "36")])
|
3333 |
|
|
|
3334 |
|
|
(define_insn "cgt_"
|
3335 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3336 |
|
|
(gt: (match_operand:VSF 1 "spu_reg_operand" "r")
|
3337 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "r")))]
|
3338 |
|
|
""
|
3339 |
|
|
"fcgt\t%0,%1,%2")
|
3340 |
|
|
|
3341 |
|
|
(define_insn "cmgt_"
|
3342 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3343 |
|
|
(gt: (abs:VSF (match_operand:VSF 1 "spu_reg_operand" "r"))
|
3344 |
|
|
(abs:VSF (match_operand:VSF 2 "spu_reg_operand" "r"))))]
|
3345 |
|
|
""
|
3346 |
|
|
"fcmgt\t%0,%1,%2")
|
3347 |
|
|
|
3348 |
|
|
(define_expand "cgt_df"
|
3349 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3350 |
|
|
(gt:SI (match_operand:DF 1 "spu_reg_operand" "r")
|
3351 |
|
|
(match_operand:DF 2 "const_zero_operand" "i")))]
|
3352 |
|
|
""
|
3353 |
|
|
{
|
3354 |
|
|
if (spu_arch == PROCESSOR_CELL)
|
3355 |
|
|
{
|
3356 |
|
|
rtx ra = gen_reg_rtx (V4SImode);
|
3357 |
|
|
rtx rb = gen_reg_rtx (V4SImode);
|
3358 |
|
|
rtx zero = gen_reg_rtx (V4SImode);
|
3359 |
|
|
rtx temp = gen_reg_rtx (TImode);
|
3360 |
|
|
rtx temp_v4si = spu_gen_subreg (V4SImode, temp);
|
3361 |
|
|
rtx temp2 = gen_reg_rtx (V4SImode);
|
3362 |
|
|
rtx hi_inf = gen_reg_rtx (V4SImode);
|
3363 |
|
|
rtx a_nan = gen_reg_rtx (V4SImode);
|
3364 |
|
|
rtx b_nan = gen_reg_rtx (V4SImode);
|
3365 |
|
|
rtx a_abs = gen_reg_rtx (V4SImode);
|
3366 |
|
|
rtx b_abs = gen_reg_rtx (V4SImode);
|
3367 |
|
|
rtx asel = gen_reg_rtx (V4SImode);
|
3368 |
|
|
rtx bsel = gen_reg_rtx (V4SImode);
|
3369 |
|
|
rtx abor = gen_reg_rtx (V4SImode);
|
3370 |
|
|
rtx bbor = gen_reg_rtx (V4SImode);
|
3371 |
|
|
rtx gt_hi = gen_reg_rtx (V4SImode);
|
3372 |
|
|
rtx gt_lo = gen_reg_rtx (V4SImode);
|
3373 |
|
|
rtx sign_mask = gen_reg_rtx (V4SImode);
|
3374 |
|
|
rtx nan_mask = gen_reg_rtx (V4SImode);
|
3375 |
|
|
rtx hi_promote = gen_reg_rtx (TImode);
|
3376 |
|
|
rtx borrow_shuffle = gen_reg_rtx (TImode);
|
3377 |
|
|
|
3378 |
|
|
rtx pat = spu_const_from_ints (V4SImode, 0x7FFFFFFF, 0xFFFFFFFF,
|
3379 |
|
|
0x7FFFFFFF, 0xFFFFFFFF);
|
3380 |
|
|
emit_move_insn (sign_mask, pat);
|
3381 |
|
|
pat = spu_const_from_ints (V4SImode, 0x7FF00000, 0x0,
|
3382 |
|
|
0x7FF00000, 0x0);
|
3383 |
|
|
emit_move_insn (nan_mask, pat);
|
3384 |
|
|
pat = spu_const_from_ints (TImode, 0x00010203, 0x00010203,
|
3385 |
|
|
0x08090A0B, 0x08090A0B);
|
3386 |
|
|
emit_move_insn (hi_promote, pat);
|
3387 |
|
|
pat = spu_const_from_ints (TImode, 0x04050607, 0xC0C0C0C0,
|
3388 |
|
|
0x0C0D0E0F, 0xC0C0C0C0);
|
3389 |
|
|
emit_move_insn (borrow_shuffle, pat);
|
3390 |
|
|
|
3391 |
|
|
emit_insn (gen_spu_convert (ra, operands[1]));
|
3392 |
|
|
emit_insn (gen_spu_convert (rb, operands[2]));
|
3393 |
|
|
emit_insn (gen_andv4si3 (a_abs, ra, sign_mask));
|
3394 |
|
|
emit_insn (gen_andv4si3 (b_abs, rb, sign_mask));
|
3395 |
|
|
|
3396 |
|
|
if (!flag_finite_math_only)
|
3397 |
|
|
{
|
3398 |
|
|
/* check if ra is NaN */
|
3399 |
|
|
emit_insn (gen_ceq_v4si (hi_inf, a_abs, nan_mask));
|
3400 |
|
|
emit_insn (gen_clgt_v4si (a_nan, a_abs, nan_mask));
|
3401 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, a_nan),
|
3402 |
|
|
GEN_INT (4 * 8)));
|
3403 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, hi_inf));
|
3404 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, temp2));
|
3405 |
|
|
emit_insn (gen_shufb (a_nan, a_nan, a_nan, hi_promote));
|
3406 |
|
|
|
3407 |
|
|
/* check if rb is NaN */
|
3408 |
|
|
emit_insn (gen_ceq_v4si (hi_inf, b_abs, nan_mask));
|
3409 |
|
|
emit_insn (gen_clgt_v4si (b_nan, b_abs, nan_mask));
|
3410 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, b_nan),
|
3411 |
|
|
GEN_INT (4 * 8)));
|
3412 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, hi_inf));
|
3413 |
|
|
emit_insn (gen_iorv4si3 (b_nan, b_nan, temp2));
|
3414 |
|
|
emit_insn (gen_shufb (b_nan, b_nan, b_nan, hi_promote));
|
3415 |
|
|
|
3416 |
|
|
/* check if ra or rb is NaN */
|
3417 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, b_nan));
|
3418 |
|
|
}
|
3419 |
|
|
emit_move_insn (zero, CONST0_RTX (V4SImode));
|
3420 |
|
|
emit_insn (gen_vashrv4si3 (asel, ra, spu_const (V4SImode, 31)));
|
3421 |
|
|
emit_insn (gen_shufb (asel, asel, asel, hi_promote));
|
3422 |
|
|
emit_insn (gen_bg_v4si (abor, zero, a_abs));
|
3423 |
|
|
emit_insn (gen_shufb (abor, abor, abor, borrow_shuffle));
|
3424 |
|
|
emit_insn (gen_sfx_v4si (abor, zero, a_abs, abor));
|
3425 |
|
|
emit_insn (gen_selb (abor, a_abs, abor, asel));
|
3426 |
|
|
|
3427 |
|
|
emit_insn (gen_vashrv4si3 (bsel, rb, spu_const (V4SImode, 31)));
|
3428 |
|
|
emit_insn (gen_shufb (bsel, bsel, bsel, hi_promote));
|
3429 |
|
|
emit_insn (gen_bg_v4si (bbor, zero, b_abs));
|
3430 |
|
|
emit_insn (gen_shufb (bbor, bbor, bbor, borrow_shuffle));
|
3431 |
|
|
emit_insn (gen_sfx_v4si (bbor, zero, b_abs, bbor));
|
3432 |
|
|
emit_insn (gen_selb (bbor, b_abs, bbor, bsel));
|
3433 |
|
|
|
3434 |
|
|
emit_insn (gen_cgt_v4si (gt_hi, abor, bbor));
|
3435 |
|
|
emit_insn (gen_clgt_v4si (gt_lo, abor, bbor));
|
3436 |
|
|
emit_insn (gen_ceq_v4si (temp2, abor, bbor));
|
3437 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, gt_lo),
|
3438 |
|
|
GEN_INT (4 * 8)));
|
3439 |
|
|
emit_insn (gen_andv4si3 (temp2, temp2, temp_v4si));
|
3440 |
|
|
emit_insn (gen_iorv4si3 (temp2, gt_hi, temp2));
|
3441 |
|
|
emit_insn (gen_shufb (temp2, temp2, temp2, hi_promote));
|
3442 |
|
|
if (!flag_finite_math_only)
|
3443 |
|
|
{
|
3444 |
|
|
/* correct for NaNs */
|
3445 |
|
|
emit_insn (gen_andc_v4si (temp2, temp2, a_nan));
|
3446 |
|
|
}
|
3447 |
|
|
emit_insn (gen_spu_convert (operands[0], temp2));
|
3448 |
|
|
DONE;
|
3449 |
|
|
}
|
3450 |
|
|
})
|
3451 |
|
|
|
3452 |
|
|
(define_insn "cgt__celledp"
|
3453 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3454 |
|
|
(gt: (match_operand:VDF 1 "spu_reg_operand" "r")
|
3455 |
|
|
(match_operand:VDF 2 "spu_reg_operand" "r")))]
|
3456 |
|
|
"spu_arch == PROCESSOR_CELLEDP"
|
3457 |
|
|
"dfcgt\t%0,%1,%2"
|
3458 |
|
|
[(set_attr "type" "fpd")])
|
3459 |
|
|
|
3460 |
|
|
(define_insn "cmgt__celledp"
|
3461 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
3462 |
|
|
(gt: (abs:VDF (match_operand:VDF 1 "spu_reg_operand" "r"))
|
3463 |
|
|
(abs:VDF (match_operand:VDF 2 "spu_reg_operand" "r"))))]
|
3464 |
|
|
"spu_arch == PROCESSOR_CELLEDP"
|
3465 |
|
|
"dfcmgt\t%0,%1,%2"
|
3466 |
|
|
[(set_attr "type" "fpd")])
|
3467 |
|
|
|
3468 |
|
|
(define_expand "cgt_v2df"
|
3469 |
|
|
[(set (match_operand:V2DI 0 "spu_reg_operand" "=r")
|
3470 |
|
|
(gt:V2DI (match_operand:V2DF 1 "spu_reg_operand" "r")
|
3471 |
|
|
(match_operand:V2DF 2 "spu_reg_operand" "r")))]
|
3472 |
|
|
""
|
3473 |
|
|
{
|
3474 |
|
|
if (spu_arch == PROCESSOR_CELL)
|
3475 |
|
|
{
|
3476 |
|
|
rtx ra = spu_gen_subreg (V4SImode, operands[1]);
|
3477 |
|
|
rtx rb = spu_gen_subreg (V4SImode, operands[2]);
|
3478 |
|
|
rtx zero = gen_reg_rtx (V4SImode);
|
3479 |
|
|
rtx temp = gen_reg_rtx (TImode);
|
3480 |
|
|
rtx temp_v4si = spu_gen_subreg (V4SImode, temp);
|
3481 |
|
|
rtx temp2 = gen_reg_rtx (V4SImode);
|
3482 |
|
|
rtx hi_inf = gen_reg_rtx (V4SImode);
|
3483 |
|
|
rtx a_nan = gen_reg_rtx (V4SImode);
|
3484 |
|
|
rtx b_nan = gen_reg_rtx (V4SImode);
|
3485 |
|
|
rtx a_abs = gen_reg_rtx (V4SImode);
|
3486 |
|
|
rtx b_abs = gen_reg_rtx (V4SImode);
|
3487 |
|
|
rtx asel = gen_reg_rtx (V4SImode);
|
3488 |
|
|
rtx bsel = gen_reg_rtx (V4SImode);
|
3489 |
|
|
rtx abor = gen_reg_rtx (V4SImode);
|
3490 |
|
|
rtx bbor = gen_reg_rtx (V4SImode);
|
3491 |
|
|
rtx gt_hi = gen_reg_rtx (V4SImode);
|
3492 |
|
|
rtx gt_lo = gen_reg_rtx (V4SImode);
|
3493 |
|
|
rtx sign_mask = gen_reg_rtx (V4SImode);
|
3494 |
|
|
rtx nan_mask = gen_reg_rtx (V4SImode);
|
3495 |
|
|
rtx hi_promote = gen_reg_rtx (TImode);
|
3496 |
|
|
rtx borrow_shuffle = gen_reg_rtx (TImode);
|
3497 |
|
|
rtx pat = spu_const_from_ints (V4SImode, 0x7FFFFFFF, 0xFFFFFFFF,
|
3498 |
|
|
0x7FFFFFFF, 0xFFFFFFFF);
|
3499 |
|
|
emit_move_insn (sign_mask, pat);
|
3500 |
|
|
pat = spu_const_from_ints (V4SImode, 0x7FF00000, 0x0,
|
3501 |
|
|
0x7FF00000, 0x0);
|
3502 |
|
|
emit_move_insn (nan_mask, pat);
|
3503 |
|
|
pat = spu_const_from_ints (TImode, 0x00010203, 0x00010203,
|
3504 |
|
|
0x08090A0B, 0x08090A0B);
|
3505 |
|
|
emit_move_insn (hi_promote, pat);
|
3506 |
|
|
pat = spu_const_from_ints (TImode, 0x04050607, 0xC0C0C0C0,
|
3507 |
|
|
0x0C0D0E0F, 0xC0C0C0C0);
|
3508 |
|
|
emit_move_insn (borrow_shuffle, pat);
|
3509 |
|
|
|
3510 |
|
|
emit_insn (gen_andv4si3 (a_abs, ra, sign_mask));
|
3511 |
|
|
emit_insn (gen_ceq_v4si (hi_inf, a_abs, nan_mask));
|
3512 |
|
|
emit_insn (gen_clgt_v4si (a_nan, a_abs, nan_mask));
|
3513 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, a_nan),
|
3514 |
|
|
GEN_INT (4 * 8)));
|
3515 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, hi_inf));
|
3516 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, temp2));
|
3517 |
|
|
emit_insn (gen_shufb (a_nan, a_nan, a_nan, hi_promote));
|
3518 |
|
|
emit_insn (gen_andv4si3 (b_abs, rb, sign_mask));
|
3519 |
|
|
emit_insn (gen_ceq_v4si (hi_inf, b_abs, nan_mask));
|
3520 |
|
|
emit_insn (gen_clgt_v4si (b_nan, b_abs, nan_mask));
|
3521 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, b_nan),
|
3522 |
|
|
GEN_INT (4 * 8)));
|
3523 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, hi_inf));
|
3524 |
|
|
emit_insn (gen_iorv4si3 (b_nan, b_nan, temp2));
|
3525 |
|
|
emit_insn (gen_shufb (b_nan, b_nan, b_nan, hi_promote));
|
3526 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, b_nan));
|
3527 |
|
|
emit_move_insn (zero, CONST0_RTX (V4SImode));
|
3528 |
|
|
emit_insn (gen_vashrv4si3 (asel, ra, spu_const (V4SImode, 31)));
|
3529 |
|
|
emit_insn (gen_shufb (asel, asel, asel, hi_promote));
|
3530 |
|
|
emit_insn (gen_bg_v4si (abor, zero, a_abs));
|
3531 |
|
|
emit_insn (gen_shufb (abor, abor, abor, borrow_shuffle));
|
3532 |
|
|
emit_insn (gen_sfx_v4si (abor, zero, a_abs, abor));
|
3533 |
|
|
emit_insn (gen_selb (abor, a_abs, abor, asel));
|
3534 |
|
|
emit_insn (gen_vashrv4si3 (bsel, rb, spu_const (V4SImode, 31)));
|
3535 |
|
|
emit_insn (gen_shufb (bsel, bsel, bsel, hi_promote));
|
3536 |
|
|
emit_insn (gen_bg_v4si (bbor, zero, b_abs));
|
3537 |
|
|
emit_insn (gen_shufb (bbor, bbor, bbor, borrow_shuffle));
|
3538 |
|
|
emit_insn (gen_sfx_v4si (bbor, zero, b_abs, bbor));
|
3539 |
|
|
emit_insn (gen_selb (bbor, b_abs, bbor, bsel));
|
3540 |
|
|
emit_insn (gen_cgt_v4si (gt_hi, abor, bbor));
|
3541 |
|
|
emit_insn (gen_clgt_v4si (gt_lo, abor, bbor));
|
3542 |
|
|
emit_insn (gen_ceq_v4si (temp2, abor, bbor));
|
3543 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, gt_lo),
|
3544 |
|
|
GEN_INT (4 * 8)));
|
3545 |
|
|
emit_insn (gen_andv4si3 (temp2, temp2, temp_v4si));
|
3546 |
|
|
emit_insn (gen_iorv4si3 (temp2, gt_hi, temp2));
|
3547 |
|
|
|
3548 |
|
|
emit_insn (gen_shufb (temp2, temp2, temp2, hi_promote));
|
3549 |
|
|
emit_insn (gen_andc_v4si (temp2, temp2, a_nan));
|
3550 |
|
|
emit_move_insn (operands[0], spu_gen_subreg (V2DImode, temp2));
|
3551 |
|
|
DONE;
|
3552 |
|
|
}
|
3553 |
|
|
})
|
3554 |
|
|
|
3555 |
|
|
(define_expand "cmgt_v2df"
|
3556 |
|
|
[(set (match_operand:V2DI 0 "spu_reg_operand" "=r")
|
3557 |
|
|
(gt:V2DI (abs:V2DF (match_operand:V2DF 1 "spu_reg_operand" "r"))
|
3558 |
|
|
(abs:V2DF (match_operand:V2DF 2 "spu_reg_operand" "r"))))]
|
3559 |
|
|
""
|
3560 |
|
|
{
|
3561 |
|
|
if (spu_arch == PROCESSOR_CELL)
|
3562 |
|
|
{
|
3563 |
|
|
rtx ra = spu_gen_subreg (V4SImode, operands[1]);
|
3564 |
|
|
rtx rb = spu_gen_subreg (V4SImode, operands[2]);
|
3565 |
|
|
rtx temp = gen_reg_rtx (TImode);
|
3566 |
|
|
rtx temp_v4si = spu_gen_subreg (V4SImode, temp);
|
3567 |
|
|
rtx temp2 = gen_reg_rtx (V4SImode);
|
3568 |
|
|
rtx hi_inf = gen_reg_rtx (V4SImode);
|
3569 |
|
|
rtx a_nan = gen_reg_rtx (V4SImode);
|
3570 |
|
|
rtx b_nan = gen_reg_rtx (V4SImode);
|
3571 |
|
|
rtx a_abs = gen_reg_rtx (V4SImode);
|
3572 |
|
|
rtx b_abs = gen_reg_rtx (V4SImode);
|
3573 |
|
|
rtx gt_hi = gen_reg_rtx (V4SImode);
|
3574 |
|
|
rtx gt_lo = gen_reg_rtx (V4SImode);
|
3575 |
|
|
rtx sign_mask = gen_reg_rtx (V4SImode);
|
3576 |
|
|
rtx nan_mask = gen_reg_rtx (V4SImode);
|
3577 |
|
|
rtx hi_promote = gen_reg_rtx (TImode);
|
3578 |
|
|
rtx pat = spu_const_from_ints (V4SImode, 0x7FFFFFFF, 0xFFFFFFFF,
|
3579 |
|
|
0x7FFFFFFF, 0xFFFFFFFF);
|
3580 |
|
|
emit_move_insn (sign_mask, pat);
|
3581 |
|
|
pat = spu_const_from_ints (V4SImode, 0x7FF00000, 0x0,
|
3582 |
|
|
0x7FF00000, 0x0);
|
3583 |
|
|
emit_move_insn (nan_mask, pat);
|
3584 |
|
|
pat = spu_const_from_ints (TImode, 0x00010203, 0x00010203,
|
3585 |
|
|
0x08090A0B, 0x08090A0B);
|
3586 |
|
|
emit_move_insn (hi_promote, pat);
|
3587 |
|
|
|
3588 |
|
|
emit_insn (gen_andv4si3 (a_abs, ra, sign_mask));
|
3589 |
|
|
emit_insn (gen_ceq_v4si (hi_inf, a_abs, nan_mask));
|
3590 |
|
|
emit_insn (gen_clgt_v4si (a_nan, a_abs, nan_mask));
|
3591 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, a_nan),
|
3592 |
|
|
GEN_INT (4 * 8)));
|
3593 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, hi_inf));
|
3594 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, temp2));
|
3595 |
|
|
emit_insn (gen_shufb (a_nan, a_nan, a_nan, hi_promote));
|
3596 |
|
|
emit_insn (gen_andv4si3 (b_abs, rb, sign_mask));
|
3597 |
|
|
emit_insn (gen_ceq_v4si (hi_inf, b_abs, nan_mask));
|
3598 |
|
|
emit_insn (gen_clgt_v4si (b_nan, b_abs, nan_mask));
|
3599 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, b_nan),
|
3600 |
|
|
GEN_INT (4 * 8)));
|
3601 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, hi_inf));
|
3602 |
|
|
emit_insn (gen_iorv4si3 (b_nan, b_nan, temp2));
|
3603 |
|
|
emit_insn (gen_shufb (b_nan, b_nan, b_nan, hi_promote));
|
3604 |
|
|
emit_insn (gen_iorv4si3 (a_nan, a_nan, b_nan));
|
3605 |
|
|
|
3606 |
|
|
emit_insn (gen_clgt_v4si (gt_hi, a_abs, b_abs));
|
3607 |
|
|
emit_insn (gen_clgt_v4si (gt_lo, a_abs, b_abs));
|
3608 |
|
|
emit_insn (gen_ceq_v4si (temp2, a_abs, b_abs));
|
3609 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, gt_lo),
|
3610 |
|
|
GEN_INT (4 * 8)));
|
3611 |
|
|
emit_insn (gen_andv4si3 (temp2, temp2, temp_v4si));
|
3612 |
|
|
emit_insn (gen_iorv4si3 (temp2, gt_hi, temp2));
|
3613 |
|
|
emit_insn (gen_shufb (temp2, temp2, temp2, hi_promote));
|
3614 |
|
|
emit_insn (gen_andc_v4si (temp2, temp2, a_nan));
|
3615 |
|
|
emit_move_insn (operands[0], spu_gen_subreg (V2DImode, temp2));
|
3616 |
|
|
DONE;
|
3617 |
|
|
}
|
3618 |
|
|
})
|
3619 |
|
|
|
3620 |
|
|
|
3621 |
|
|
;; clgt
|
3622 |
|
|
|
3623 |
|
|
(define_insn "clgt_"
|
3624 |
|
|
[(set (match_operand:VQHSI 0 "spu_reg_operand" "=r,r")
|
3625 |
|
|
(gtu:VQHSI (match_operand:VQHSI 1 "spu_reg_operand" "r,r")
|
3626 |
|
|
(match_operand:VQHSI 2 "spu_arith_operand" "r,B")))]
|
3627 |
|
|
""
|
3628 |
|
|
"@
|
3629 |
|
|
clgt\t%0,%1,%2
|
3630 |
|
|
clgti\t%0,%1,%2")
|
3631 |
|
|
|
3632 |
|
|
(define_insn_and_split "clgt_di"
|
3633 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3634 |
|
|
(gtu:SI (match_operand:DI 1 "spu_reg_operand" "r")
|
3635 |
|
|
(match_operand:DI 2 "spu_reg_operand" "r")))
|
3636 |
|
|
(clobber (match_scratch:V4SI 3 "=&r"))
|
3637 |
|
|
(clobber (match_scratch:V4SI 4 "=&r"))
|
3638 |
|
|
(clobber (match_scratch:V4SI 5 "=&r"))]
|
3639 |
|
|
""
|
3640 |
|
|
"#"
|
3641 |
|
|
"reload_completed"
|
3642 |
|
|
[(set (match_dup:SI 0)
|
3643 |
|
|
(gtu:SI (match_dup:DI 1)
|
3644 |
|
|
(match_dup:DI 2)))]
|
3645 |
|
|
{
|
3646 |
|
|
rtx op0 = gen_rtx_REG (V4SImode, REGNO (operands[0]));
|
3647 |
|
|
rtx op1 = gen_rtx_REG (V4SImode, REGNO (operands[1]));
|
3648 |
|
|
rtx op2 = gen_rtx_REG (V4SImode, REGNO (operands[2]));
|
3649 |
|
|
rtx op3 = operands[3];
|
3650 |
|
|
rtx op4 = operands[4];
|
3651 |
|
|
rtx op5 = operands[5];
|
3652 |
|
|
rtx op5d = gen_rtx_REG (V2DImode, REGNO (operands[5]));
|
3653 |
|
|
emit_insn (gen_clgt_v4si (op3, op1, op2));
|
3654 |
|
|
emit_insn (gen_ceq_v4si (op4, op1, op2));
|
3655 |
|
|
emit_insn (gen_spu_xswd (op5d, op3));
|
3656 |
|
|
emit_insn (gen_selb (op0, op3, op5, op4));
|
3657 |
|
|
DONE;
|
3658 |
|
|
})
|
3659 |
|
|
|
3660 |
|
|
(define_insn "clgt_ti"
|
3661 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
3662 |
|
|
(gtu:SI (match_operand:TI 1 "spu_reg_operand" "r")
|
3663 |
|
|
(match_operand:TI 2 "spu_reg_operand" "r")))
|
3664 |
|
|
(clobber (match_scratch:V4SI 3 "=&r"))
|
3665 |
|
|
(clobber (match_scratch:V4SI 4 "=&r"))]
|
3666 |
|
|
""
|
3667 |
|
|
"ceq\t%3,%1,%2\;\
|
3668 |
|
|
clgt\t%4,%1,%2\;\
|
3669 |
|
|
shlqbyi\t%0,%4,4\;\
|
3670 |
|
|
selb\t%0,%4,%0,%3\;\
|
3671 |
|
|
shlqbyi\t%0,%0,4\;\
|
3672 |
|
|
selb\t%0,%4,%0,%3\;\
|
3673 |
|
|
shlqbyi\t%0,%0,4\;\
|
3674 |
|
|
selb\t%0,%4,%0,%3"
|
3675 |
|
|
[(set_attr "type" "multi0")
|
3676 |
|
|
(set_attr "length" "32")])
|
3677 |
|
|
|
3678 |
|
|
|
3679 |
|
|
;; dftsv
|
3680 |
|
|
(define_insn "dftsv_celledp"
|
3681 |
|
|
[(set (match_operand:V2DI 0 "spu_reg_operand" "=r")
|
3682 |
|
|
(unspec:V2DI [(match_operand:V2DF 1 "spu_reg_operand" "r")
|
3683 |
|
|
(match_operand:SI 2 "const_int_operand" "i")]
|
3684 |
|
|
UNSPEC_DFTSV))]
|
3685 |
|
|
"spu_arch == PROCESSOR_CELLEDP"
|
3686 |
|
|
"dftsv\t%0,%1,%2"
|
3687 |
|
|
[(set_attr "type" "fpd")])
|
3688 |
|
|
|
3689 |
|
|
(define_expand "dftsv"
|
3690 |
|
|
[(set (match_operand:V2DI 0 "spu_reg_operand" "=r")
|
3691 |
|
|
(unspec:V2DI [(match_operand:V2DF 1 "spu_reg_operand" "r")
|
3692 |
|
|
(match_operand:SI 2 "const_int_operand" "i")]
|
3693 |
|
|
UNSPEC_DFTSV))]
|
3694 |
|
|
""
|
3695 |
|
|
{
|
3696 |
|
|
if (spu_arch == PROCESSOR_CELL)
|
3697 |
|
|
{
|
3698 |
|
|
rtx result = gen_reg_rtx (V4SImode);
|
3699 |
|
|
emit_move_insn (result, CONST0_RTX (V4SImode));
|
3700 |
|
|
|
3701 |
|
|
if (INTVAL (operands[2]))
|
3702 |
|
|
{
|
3703 |
|
|
rtx ra = spu_gen_subreg (V4SImode, operands[1]);
|
3704 |
|
|
rtx abs = gen_reg_rtx (V4SImode);
|
3705 |
|
|
rtx sign = gen_reg_rtx (V4SImode);
|
3706 |
|
|
rtx temp = gen_reg_rtx (TImode);
|
3707 |
|
|
rtx temp_v4si = spu_gen_subreg (V4SImode, temp);
|
3708 |
|
|
rtx temp2 = gen_reg_rtx (V4SImode);
|
3709 |
|
|
rtx pat = spu_const_from_ints (V4SImode, 0x7FFFFFFF, 0xFFFFFFFF,
|
3710 |
|
|
0x7FFFFFFF, 0xFFFFFFFF);
|
3711 |
|
|
rtx sign_mask = gen_reg_rtx (V4SImode);
|
3712 |
|
|
rtx hi_promote = gen_reg_rtx (TImode);
|
3713 |
|
|
emit_move_insn (sign_mask, pat);
|
3714 |
|
|
pat = spu_const_from_ints (TImode, 0x00010203, 0x00010203,
|
3715 |
|
|
0x08090A0B, 0x08090A0B);
|
3716 |
|
|
emit_move_insn (hi_promote, pat);
|
3717 |
|
|
|
3718 |
|
|
emit_insn (gen_vashrv4si3 (sign, ra, spu_const (V4SImode, 31)));
|
3719 |
|
|
emit_insn (gen_shufb (sign, sign, sign, hi_promote));
|
3720 |
|
|
emit_insn (gen_andv4si3 (abs, ra, sign_mask));
|
3721 |
|
|
|
3722 |
|
|
/* NaN or +inf or -inf */
|
3723 |
|
|
if (INTVAL (operands[2]) & 0x70)
|
3724 |
|
|
{
|
3725 |
|
|
rtx nan_mask = gen_reg_rtx (V4SImode);
|
3726 |
|
|
rtx isinf = gen_reg_rtx (V4SImode);
|
3727 |
|
|
pat = spu_const_from_ints (V4SImode, 0x7FF00000, 0x0,
|
3728 |
|
|
0x7FF00000, 0x0);
|
3729 |
|
|
emit_move_insn (nan_mask, pat);
|
3730 |
|
|
emit_insn (gen_ceq_v4si (isinf, abs, nan_mask));
|
3731 |
|
|
|
3732 |
|
|
/* NaN */
|
3733 |
|
|
if (INTVAL (operands[2]) & 0x40)
|
3734 |
|
|
{
|
3735 |
|
|
rtx isnan = gen_reg_rtx (V4SImode);
|
3736 |
|
|
emit_insn (gen_clgt_v4si (isnan, abs, nan_mask));
|
3737 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, isnan),
|
3738 |
|
|
GEN_INT (4 * 8)));
|
3739 |
|
|
emit_insn (gen_andv4si3 (temp2, temp_v4si, isinf));
|
3740 |
|
|
emit_insn (gen_iorv4si3 (isnan, isnan, temp2));
|
3741 |
|
|
emit_insn (gen_shufb (isnan, isnan, isnan, hi_promote));
|
3742 |
|
|
emit_insn (gen_iorv4si3 (result, result, isnan));
|
3743 |
|
|
}
|
3744 |
|
|
/* +inf or -inf */
|
3745 |
|
|
if (INTVAL (operands[2]) & 0x30)
|
3746 |
|
|
{
|
3747 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, isinf),
|
3748 |
|
|
GEN_INT (4 * 8)));
|
3749 |
|
|
emit_insn (gen_andv4si3 (isinf, isinf, temp_v4si));
|
3750 |
|
|
emit_insn (gen_shufb (isinf, isinf, isinf, hi_promote));
|
3751 |
|
|
|
3752 |
|
|
/* +inf */
|
3753 |
|
|
if (INTVAL (operands[2]) & 0x20)
|
3754 |
|
|
{
|
3755 |
|
|
emit_insn (gen_andc_v4si (temp2, isinf, sign));
|
3756 |
|
|
emit_insn (gen_iorv4si3 (result, result, temp2));
|
3757 |
|
|
}
|
3758 |
|
|
/* -inf */
|
3759 |
|
|
if (INTVAL (operands[2]) & 0x10)
|
3760 |
|
|
{
|
3761 |
|
|
emit_insn (gen_andv4si3 (temp2, isinf, sign));
|
3762 |
|
|
emit_insn (gen_iorv4si3 (result, result, temp2));
|
3763 |
|
|
}
|
3764 |
|
|
}
|
3765 |
|
|
}
|
3766 |
|
|
|
3767 |
|
|
/* 0 or denorm */
|
3768 |
|
|
if (INTVAL (operands[2]) & 0xF)
|
3769 |
|
|
{
|
3770 |
|
|
rtx iszero = gen_reg_rtx (V4SImode);
|
3771 |
|
|
emit_insn (gen_ceq_v4si (iszero, abs, CONST0_RTX (V4SImode)));
|
3772 |
|
|
emit_insn (gen_rotlti3 (temp, spu_gen_subreg (TImode, iszero),
|
3773 |
|
|
GEN_INT (4 * 8)));
|
3774 |
|
|
emit_insn (gen_andv4si3 (iszero, iszero, temp_v4si));
|
3775 |
|
|
|
3776 |
|
|
/* denorm */
|
3777 |
|
|
if (INTVAL (operands[2]) & 0x3)
|
3778 |
|
|
{
|
3779 |
|
|
rtx isdenorm = gen_reg_rtx (V4SImode);
|
3780 |
|
|
rtx denorm_mask = gen_reg_rtx (V4SImode);
|
3781 |
|
|
emit_move_insn (denorm_mask, spu_const (V4SImode, 0xFFFFF));
|
3782 |
|
|
emit_insn (gen_clgt_v4si (isdenorm, abs, denorm_mask));
|
3783 |
|
|
emit_insn (gen_nor_v4si (isdenorm, isdenorm, iszero));
|
3784 |
|
|
emit_insn (gen_shufb (isdenorm, isdenorm,
|
3785 |
|
|
isdenorm, hi_promote));
|
3786 |
|
|
/* +denorm */
|
3787 |
|
|
if (INTVAL (operands[2]) & 0x2)
|
3788 |
|
|
{
|
3789 |
|
|
emit_insn (gen_andc_v4si (temp2, isdenorm, sign));
|
3790 |
|
|
emit_insn (gen_iorv4si3 (result, result, temp2));
|
3791 |
|
|
}
|
3792 |
|
|
/* -denorm */
|
3793 |
|
|
if (INTVAL (operands[2]) & 0x1)
|
3794 |
|
|
{
|
3795 |
|
|
emit_insn (gen_andv4si3 (temp2, isdenorm, sign));
|
3796 |
|
|
emit_insn (gen_iorv4si3 (result, result, temp2));
|
3797 |
|
|
}
|
3798 |
|
|
}
|
3799 |
|
|
|
3800 |
|
|
/* 0 */
|
3801 |
|
|
if (INTVAL (operands[2]) & 0xC)
|
3802 |
|
|
{
|
3803 |
|
|
emit_insn (gen_shufb (iszero, iszero, iszero, hi_promote));
|
3804 |
|
|
/* +0 */
|
3805 |
|
|
if (INTVAL (operands[2]) & 0x8)
|
3806 |
|
|
{
|
3807 |
|
|
emit_insn (gen_andc_v4si (temp2, iszero, sign));
|
3808 |
|
|
emit_insn (gen_iorv4si3 (result, result, temp2));
|
3809 |
|
|
}
|
3810 |
|
|
/* -0 */
|
3811 |
|
|
if (INTVAL (operands[2]) & 0x4)
|
3812 |
|
|
{
|
3813 |
|
|
emit_insn (gen_andv4si3 (temp2, iszero, sign));
|
3814 |
|
|
emit_insn (gen_iorv4si3 (result, result, temp2));
|
3815 |
|
|
}
|
3816 |
|
|
}
|
3817 |
|
|
}
|
3818 |
|
|
}
|
3819 |
|
|
emit_move_insn (operands[0], spu_gen_subreg (V2DImode, result));
|
3820 |
|
|
DONE;
|
3821 |
|
|
}
|
3822 |
|
|
})
|
3823 |
|
|
|
3824 |
|
|
|
3825 |
|
|
;; branches
|
3826 |
|
|
|
3827 |
|
|
(define_insn ""
|
3828 |
|
|
[(set (pc)
|
3829 |
|
|
(if_then_else (match_operator 1 "branch_comparison_operator"
|
3830 |
|
|
[(match_operand 2
|
3831 |
|
|
"spu_reg_operand" "r")
|
3832 |
|
|
(const_int 0)])
|
3833 |
|
|
(label_ref (match_operand 0 "" ""))
|
3834 |
|
|
(pc)))]
|
3835 |
|
|
""
|
3836 |
|
|
"br%b2%b1z\t%2,%0"
|
3837 |
|
|
[(set_attr "type" "br")])
|
3838 |
|
|
|
3839 |
|
|
(define_insn ""
|
3840 |
|
|
[(set (pc)
|
3841 |
|
|
(if_then_else (match_operator 0 "branch_comparison_operator"
|
3842 |
|
|
[(match_operand 1
|
3843 |
|
|
"spu_reg_operand" "r")
|
3844 |
|
|
(const_int 0)])
|
3845 |
|
|
(return)
|
3846 |
|
|
(pc)))]
|
3847 |
|
|
"direct_return ()"
|
3848 |
|
|
"bi%b1%b0z\t%1,$lr"
|
3849 |
|
|
[(set_attr "type" "br")])
|
3850 |
|
|
|
3851 |
|
|
(define_insn ""
|
3852 |
|
|
[(set (pc)
|
3853 |
|
|
(if_then_else (match_operator 1 "branch_comparison_operator"
|
3854 |
|
|
[(match_operand 2
|
3855 |
|
|
"spu_reg_operand" "r")
|
3856 |
|
|
(const_int 0)])
|
3857 |
|
|
(pc)
|
3858 |
|
|
(label_ref (match_operand 0 "" ""))))]
|
3859 |
|
|
""
|
3860 |
|
|
"br%b2%b1z\t%2,%0"
|
3861 |
|
|
[(set_attr "type" "br")])
|
3862 |
|
|
|
3863 |
|
|
(define_insn ""
|
3864 |
|
|
[(set (pc)
|
3865 |
|
|
(if_then_else (match_operator 0 "branch_comparison_operator"
|
3866 |
|
|
[(match_operand 1
|
3867 |
|
|
"spu_reg_operand" "r")
|
3868 |
|
|
(const_int 0)])
|
3869 |
|
|
(pc)
|
3870 |
|
|
(return)))]
|
3871 |
|
|
"direct_return ()"
|
3872 |
|
|
"bi%b1%b0z\t%1,$lr"
|
3873 |
|
|
[(set_attr "type" "br")])
|
3874 |
|
|
|
3875 |
|
|
|
3876 |
|
|
;; vector conditional compare patterns
|
3877 |
|
|
(define_expand "vcond"
|
3878 |
|
|
[(set (match_operand:VCMP 0 "spu_reg_operand" "=r")
|
3879 |
|
|
(if_then_else:VCMP
|
3880 |
|
|
(match_operator 3 "comparison_operator"
|
3881 |
|
|
[(match_operand:VCMP 4 "spu_reg_operand" "r")
|
3882 |
|
|
(match_operand:VCMP 5 "spu_reg_operand" "r")])
|
3883 |
|
|
(match_operand:VCMP 1 "spu_reg_operand" "r")
|
3884 |
|
|
(match_operand:VCMP 2 "spu_reg_operand" "r")))]
|
3885 |
|
|
""
|
3886 |
|
|
{
|
3887 |
|
|
if (spu_emit_vector_cond_expr (operands[0], operands[1], operands[2],
|
3888 |
|
|
operands[3], operands[4], operands[5]))
|
3889 |
|
|
DONE;
|
3890 |
|
|
else
|
3891 |
|
|
FAIL;
|
3892 |
|
|
})
|
3893 |
|
|
|
3894 |
|
|
(define_expand "vcondu"
|
3895 |
|
|
[(set (match_operand:VCMPU 0 "spu_reg_operand" "=r")
|
3896 |
|
|
(if_then_else:VCMPU
|
3897 |
|
|
(match_operator 3 "comparison_operator"
|
3898 |
|
|
[(match_operand:VCMPU 4 "spu_reg_operand" "r")
|
3899 |
|
|
(match_operand:VCMPU 5 "spu_reg_operand" "r")])
|
3900 |
|
|
(match_operand:VCMPU 1 "spu_reg_operand" "r")
|
3901 |
|
|
(match_operand:VCMPU 2 "spu_reg_operand" "r")))]
|
3902 |
|
|
""
|
3903 |
|
|
{
|
3904 |
|
|
if (spu_emit_vector_cond_expr (operands[0], operands[1], operands[2],
|
3905 |
|
|
operands[3], operands[4], operands[5]))
|
3906 |
|
|
DONE;
|
3907 |
|
|
else
|
3908 |
|
|
FAIL;
|
3909 |
|
|
})
|
3910 |
|
|
|
3911 |
|
|
|
3912 |
|
|
;; branch on condition
|
3913 |
|
|
|
3914 |
|
|
(define_expand "cbranch4"
|
3915 |
|
|
[(use (match_operator 0 "ordered_comparison_operator"
|
3916 |
|
|
[(match_operand:VQHSI 1 "spu_reg_operand" "")
|
3917 |
|
|
(match_operand:VQHSI 2 "spu_nonmem_operand" "")]))
|
3918 |
|
|
(use (match_operand 3 ""))]
|
3919 |
|
|
""
|
3920 |
|
|
{ spu_emit_branch_or_set (0, operands[0], operands); DONE; })
|
3921 |
|
|
|
3922 |
|
|
(define_expand "cbranch4"
|
3923 |
|
|
[(use (match_operator 0 "ordered_comparison_operator"
|
3924 |
|
|
[(match_operand:DTI 1 "spu_reg_operand" "")
|
3925 |
|
|
(match_operand:DTI 2 "spu_reg_operand" "")]))
|
3926 |
|
|
(use (match_operand 3 ""))]
|
3927 |
|
|
""
|
3928 |
|
|
{ spu_emit_branch_or_set (0, operands[0], operands); DONE; })
|
3929 |
|
|
|
3930 |
|
|
(define_expand "cbranch4"
|
3931 |
|
|
[(use (match_operator 0 "ordered_comparison_operator"
|
3932 |
|
|
[(match_operand:VSF 1 "spu_reg_operand" "")
|
3933 |
|
|
(match_operand:VSF 2 "spu_reg_operand" "")]))
|
3934 |
|
|
(use (match_operand 3 ""))]
|
3935 |
|
|
""
|
3936 |
|
|
{ spu_emit_branch_or_set (0, operands[0], operands); DONE; })
|
3937 |
|
|
|
3938 |
|
|
(define_expand "cbranchdf4"
|
3939 |
|
|
[(use (match_operator 0 "ordered_comparison_operator"
|
3940 |
|
|
[(match_operand:DF 1 "spu_reg_operand" "")
|
3941 |
|
|
(match_operand:DF 2 "spu_reg_operand" "")]))
|
3942 |
|
|
(use (match_operand 3 ""))]
|
3943 |
|
|
""
|
3944 |
|
|
{ spu_emit_branch_or_set (0, operands[0], operands); DONE; })
|
3945 |
|
|
|
3946 |
|
|
|
3947 |
|
|
;; set on condition
|
3948 |
|
|
|
3949 |
|
|
(define_expand "cstore4"
|
3950 |
|
|
[(use (match_operator 1 "ordered_comparison_operator"
|
3951 |
|
|
[(match_operand:VQHSI 2 "spu_reg_operand" "")
|
3952 |
|
|
(match_operand:VQHSI 3 "spu_nonmem_operand" "")]))
|
3953 |
|
|
(clobber (match_operand:SI 0 "spu_reg_operand"))]
|
3954 |
|
|
""
|
3955 |
|
|
{ spu_emit_branch_or_set (1, operands[1], operands); DONE; })
|
3956 |
|
|
|
3957 |
|
|
(define_expand "cstore4"
|
3958 |
|
|
[(use (match_operator 1 "ordered_comparison_operator"
|
3959 |
|
|
[(match_operand:DTI 2 "spu_reg_operand" "")
|
3960 |
|
|
(match_operand:DTI 3 "spu_reg_operand" "")]))
|
3961 |
|
|
(clobber (match_operand:SI 0 "spu_reg_operand"))]
|
3962 |
|
|
""
|
3963 |
|
|
{ spu_emit_branch_or_set (1, operands[1], operands); DONE; })
|
3964 |
|
|
|
3965 |
|
|
(define_expand "cstore4"
|
3966 |
|
|
[(use (match_operator 1 "ordered_comparison_operator"
|
3967 |
|
|
[(match_operand:VSF 2 "spu_reg_operand" "")
|
3968 |
|
|
(match_operand:VSF 3 "spu_reg_operand" "")]))
|
3969 |
|
|
(clobber (match_operand:SI 0 "spu_reg_operand"))]
|
3970 |
|
|
""
|
3971 |
|
|
{ spu_emit_branch_or_set (1, operands[1], operands); DONE; })
|
3972 |
|
|
|
3973 |
|
|
(define_expand "cstoredf4"
|
3974 |
|
|
[(use (match_operator 1 "ordered_comparison_operator"
|
3975 |
|
|
[(match_operand:DF 2 "spu_reg_operand" "")
|
3976 |
|
|
(match_operand:DF 3 "spu_reg_operand" "")]))
|
3977 |
|
|
(clobber (match_operand:SI 0 "spu_reg_operand"))]
|
3978 |
|
|
""
|
3979 |
|
|
{ spu_emit_branch_or_set (1, operands[1], operands); DONE; })
|
3980 |
|
|
|
3981 |
|
|
|
3982 |
|
|
;; conditional move
|
3983 |
|
|
|
3984 |
|
|
;; Define this first one so HAVE_conditional_move is defined.
|
3985 |
|
|
(define_insn "movcc_dummy"
|
3986 |
|
|
[(set (match_operand 0 "" "")
|
3987 |
|
|
(if_then_else (match_operand 1 "" "")
|
3988 |
|
|
(match_operand 2 "" "")
|
3989 |
|
|
(match_operand 3 "" "")))]
|
3990 |
|
|
"!operands[0]"
|
3991 |
|
|
"")
|
3992 |
|
|
|
3993 |
|
|
(define_expand "movcc"
|
3994 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "")
|
3995 |
|
|
(if_then_else:ALL (match_operand 1 "ordered_comparison_operator" "")
|
3996 |
|
|
(match_operand:ALL 2 "spu_reg_operand" "")
|
3997 |
|
|
(match_operand:ALL 3 "spu_reg_operand" "")))]
|
3998 |
|
|
""
|
3999 |
|
|
{
|
4000 |
|
|
spu_emit_branch_or_set(2, operands[1], operands);
|
4001 |
|
|
DONE;
|
4002 |
|
|
})
|
4003 |
|
|
|
4004 |
|
|
;; This pattern is used when the result of a compare is not large
|
4005 |
|
|
;; enough to use in a selb when expanding conditional moves.
|
4006 |
|
|
(define_expand "extend_compare"
|
4007 |
|
|
[(set (match_operand 0 "spu_reg_operand" "=r")
|
4008 |
|
|
(unspec [(match_operand 1 "spu_reg_operand" "r")] UNSPEC_EXTEND_CMP))]
|
4009 |
|
|
""
|
4010 |
|
|
{
|
4011 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
|
4012 |
|
|
gen_rtx_UNSPEC (GET_MODE (operands[0]),
|
4013 |
|
|
gen_rtvec (1, operands[1]),
|
4014 |
|
|
UNSPEC_EXTEND_CMP)));
|
4015 |
|
|
DONE;
|
4016 |
|
|
})
|
4017 |
|
|
|
4018 |
|
|
(define_insn "extend_compare"
|
4019 |
|
|
[(set (match_operand:ALL 0 "spu_reg_operand" "=r")
|
4020 |
|
|
(unspec:ALL [(match_operand 1 "spu_reg_operand" "r")] UNSPEC_EXTEND_CMP))]
|
4021 |
|
|
"operands"
|
4022 |
|
|
"fsm\t%0,%1"
|
4023 |
|
|
[(set_attr "type" "shuf")])
|
4024 |
|
|
|
4025 |
|
|
|
4026 |
|
|
;; case
|
4027 |
|
|
|
4028 |
|
|
;; operand 0 is index
|
4029 |
|
|
;; operand 1 is the minimum bound
|
4030 |
|
|
;; operand 2 is the maximum bound - minimum bound + 1
|
4031 |
|
|
;; operand 3 is CODE_LABEL for the table;
|
4032 |
|
|
;; operand 4 is the CODE_LABEL to go to if index out of range.
|
4033 |
|
|
(define_expand "casesi"
|
4034 |
|
|
[(match_operand:SI 0 "spu_reg_operand" "")
|
4035 |
|
|
(match_operand:SI 1 "immediate_operand" "")
|
4036 |
|
|
(match_operand:SI 2 "immediate_operand" "")
|
4037 |
|
|
(match_operand 3 "" "")
|
4038 |
|
|
(match_operand 4 "" "")]
|
4039 |
|
|
""
|
4040 |
|
|
{
|
4041 |
|
|
rtx table = gen_reg_rtx (SImode);
|
4042 |
|
|
rtx index = gen_reg_rtx (SImode);
|
4043 |
|
|
rtx sindex = gen_reg_rtx (SImode);
|
4044 |
|
|
rtx addr = gen_reg_rtx (Pmode);
|
4045 |
|
|
|
4046 |
|
|
emit_move_insn (table, gen_rtx_LABEL_REF (SImode, operands[3]));
|
4047 |
|
|
|
4048 |
|
|
emit_insn (gen_subsi3(index, operands[0], force_reg(SImode, operands[1])));
|
4049 |
|
|
emit_insn (gen_ashlsi3(sindex, index, GEN_INT (2)));
|
4050 |
|
|
emit_move_insn (addr, gen_rtx_MEM (SImode,
|
4051 |
|
|
gen_rtx_PLUS (SImode, table, sindex)));
|
4052 |
|
|
if (flag_pic)
|
4053 |
|
|
emit_insn (gen_addsi3 (addr, addr, table));
|
4054 |
|
|
|
4055 |
|
|
emit_cmp_and_jump_insns (index, operands[2], GTU, NULL_RTX, SImode, 1, operands[4]);
|
4056 |
|
|
emit_jump_insn (gen_tablejump (addr, operands[3]));
|
4057 |
|
|
DONE;
|
4058 |
|
|
})
|
4059 |
|
|
|
4060 |
|
|
(define_insn "tablejump"
|
4061 |
|
|
[(set (pc) (match_operand:SI 0 "spu_reg_operand" "r"))
|
4062 |
|
|
(use (label_ref (match_operand 1 "" "")))]
|
4063 |
|
|
""
|
4064 |
|
|
"bi\t%0"
|
4065 |
|
|
[(set_attr "type" "br")])
|
4066 |
|
|
|
4067 |
|
|
|
4068 |
|
|
;; call
|
4069 |
|
|
|
4070 |
|
|
;; Note that operand 1 is total size of args, in bytes,
|
4071 |
|
|
;; and what the call insn wants is the number of words.
|
4072 |
|
|
(define_expand "sibcall"
|
4073 |
|
|
[(parallel
|
4074 |
|
|
[(call (match_operand:QI 0 "call_operand" "")
|
4075 |
|
|
(match_operand:QI 1 "" ""))
|
4076 |
|
|
(use (reg:SI 0))])]
|
4077 |
|
|
""
|
4078 |
|
|
{
|
4079 |
|
|
if (! call_operand (operands[0], QImode))
|
4080 |
|
|
XEXP (operands[0], 0) = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
|
4081 |
|
|
})
|
4082 |
|
|
|
4083 |
|
|
(define_insn "_sibcall"
|
4084 |
|
|
[(parallel
|
4085 |
|
|
[(call (match_operand:QI 0 "call_operand" "R,S")
|
4086 |
|
|
(match_operand:QI 1 "" "i,i"))
|
4087 |
|
|
(use (reg:SI 0))])]
|
4088 |
|
|
"SIBLING_CALL_P(insn)"
|
4089 |
|
|
"@
|
4090 |
|
|
bi\t%i0
|
4091 |
|
|
br\t%0"
|
4092 |
|
|
[(set_attr "type" "br,br")])
|
4093 |
|
|
|
4094 |
|
|
(define_expand "sibcall_value"
|
4095 |
|
|
[(parallel
|
4096 |
|
|
[(set (match_operand 0 "" "")
|
4097 |
|
|
(call (match_operand:QI 1 "call_operand" "")
|
4098 |
|
|
(match_operand:QI 2 "" "")))
|
4099 |
|
|
(use (reg:SI 0))])]
|
4100 |
|
|
""
|
4101 |
|
|
{
|
4102 |
|
|
if (! call_operand (operands[1], QImode))
|
4103 |
|
|
XEXP (operands[1], 0) = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
|
4104 |
|
|
})
|
4105 |
|
|
|
4106 |
|
|
(define_insn "_sibcall_value"
|
4107 |
|
|
[(parallel
|
4108 |
|
|
[(set (match_operand 0 "" "")
|
4109 |
|
|
(call (match_operand:QI 1 "call_operand" "R,S")
|
4110 |
|
|
(match_operand:QI 2 "" "i,i")))
|
4111 |
|
|
(use (reg:SI 0))])]
|
4112 |
|
|
"SIBLING_CALL_P(insn)"
|
4113 |
|
|
"@
|
4114 |
|
|
bi\t%i1
|
4115 |
|
|
br\t%1"
|
4116 |
|
|
[(set_attr "type" "br,br")])
|
4117 |
|
|
|
4118 |
|
|
;; Note that operand 1 is total size of args, in bytes,
|
4119 |
|
|
;; and what the call insn wants is the number of words.
|
4120 |
|
|
(define_expand "call"
|
4121 |
|
|
[(parallel
|
4122 |
|
|
[(call (match_operand:QI 0 "call_operand" "")
|
4123 |
|
|
(match_operand:QI 1 "" ""))
|
4124 |
|
|
(clobber (reg:SI 0))
|
4125 |
|
|
(clobber (reg:SI 130))])]
|
4126 |
|
|
""
|
4127 |
|
|
{
|
4128 |
|
|
if (! call_operand (operands[0], QImode))
|
4129 |
|
|
XEXP (operands[0], 0) = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
|
4130 |
|
|
})
|
4131 |
|
|
|
4132 |
|
|
(define_insn "_call"
|
4133 |
|
|
[(parallel
|
4134 |
|
|
[(call (match_operand:QI 0 "call_operand" "R,S,T")
|
4135 |
|
|
(match_operand:QI 1 "" "i,i,i"))
|
4136 |
|
|
(clobber (reg:SI 0))
|
4137 |
|
|
(clobber (reg:SI 130))])]
|
4138 |
|
|
""
|
4139 |
|
|
"@
|
4140 |
|
|
bisl\t$lr,%i0
|
4141 |
|
|
brsl\t$lr,%0
|
4142 |
|
|
brasl\t$lr,%0"
|
4143 |
|
|
[(set_attr "type" "br")])
|
4144 |
|
|
|
4145 |
|
|
(define_expand "call_value"
|
4146 |
|
|
[(parallel
|
4147 |
|
|
[(set (match_operand 0 "" "")
|
4148 |
|
|
(call (match_operand:QI 1 "call_operand" "")
|
4149 |
|
|
(match_operand:QI 2 "" "")))
|
4150 |
|
|
(clobber (reg:SI 0))
|
4151 |
|
|
(clobber (reg:SI 130))])]
|
4152 |
|
|
""
|
4153 |
|
|
{
|
4154 |
|
|
if (! call_operand (operands[1], QImode))
|
4155 |
|
|
XEXP (operands[1], 0) = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
|
4156 |
|
|
})
|
4157 |
|
|
|
4158 |
|
|
(define_insn "_call_value"
|
4159 |
|
|
[(parallel
|
4160 |
|
|
[(set (match_operand 0 "" "")
|
4161 |
|
|
(call (match_operand:QI 1 "call_operand" "R,S,T")
|
4162 |
|
|
(match_operand:QI 2 "" "i,i,i")))
|
4163 |
|
|
(clobber (reg:SI 0))
|
4164 |
|
|
(clobber (reg:SI 130))])]
|
4165 |
|
|
""
|
4166 |
|
|
"@
|
4167 |
|
|
bisl\t$lr,%i1
|
4168 |
|
|
brsl\t$lr,%1
|
4169 |
|
|
brasl\t$lr,%1"
|
4170 |
|
|
[(set_attr "type" "br")])
|
4171 |
|
|
|
4172 |
|
|
(define_expand "untyped_call"
|
4173 |
|
|
[(parallel [(call (match_operand 0 "" "")
|
4174 |
|
|
(const_int 0))
|
4175 |
|
|
(match_operand 1 "" "")
|
4176 |
|
|
(match_operand 2 "" "")])]
|
4177 |
|
|
""
|
4178 |
|
|
{
|
4179 |
|
|
int i;
|
4180 |
|
|
rtx reg = gen_rtx_REG (TImode, 3);
|
4181 |
|
|
|
4182 |
|
|
/* We need to use call_value so the return value registers don't get
|
4183 |
|
|
* clobbered. */
|
4184 |
|
|
emit_call_insn (gen_call_value (reg, operands[0], const0_rtx));
|
4185 |
|
|
|
4186 |
|
|
for (i = 0; i < XVECLEN (operands[2], 0); i++)
|
4187 |
|
|
{
|
4188 |
|
|
rtx set = XVECEXP (operands[2], 0, i);
|
4189 |
|
|
emit_move_insn (SET_DEST (set), SET_SRC (set));
|
4190 |
|
|
}
|
4191 |
|
|
|
4192 |
|
|
/* The optimizer does not know that the call sets the function value
|
4193 |
|
|
registers we stored in the result block. We avoid problems by
|
4194 |
|
|
claiming that all hard registers are used and clobbered at this
|
4195 |
|
|
point. */
|
4196 |
|
|
emit_insn (gen_blockage ());
|
4197 |
|
|
|
4198 |
|
|
DONE;
|
4199 |
|
|
})
|
4200 |
|
|
|
4201 |
|
|
|
4202 |
|
|
;; Patterns used for splitting and combining.
|
4203 |
|
|
|
4204 |
|
|
|
4205 |
|
|
;; Function prologue and epilogue.
|
4206 |
|
|
|
4207 |
|
|
(define_expand "prologue"
|
4208 |
|
|
[(const_int 1)]
|
4209 |
|
|
""
|
4210 |
|
|
{ spu_expand_prologue (); DONE; })
|
4211 |
|
|
|
4212 |
|
|
;; "blockage" is only emited in epilogue. This is what it took to
|
4213 |
|
|
;; make "basic block reordering" work with the insns sequence
|
4214 |
|
|
;; generated by the spu_expand_epilogue (taken from mips.md)
|
4215 |
|
|
|
4216 |
|
|
(define_insn "blockage"
|
4217 |
|
|
[(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
|
4218 |
|
|
""
|
4219 |
|
|
""
|
4220 |
|
|
[(set_attr "type" "convert")
|
4221 |
|
|
(set_attr "length" "0")])
|
4222 |
|
|
|
4223 |
|
|
(define_expand "epilogue"
|
4224 |
|
|
[(const_int 2)]
|
4225 |
|
|
""
|
4226 |
|
|
{ spu_expand_epilogue (false); DONE; })
|
4227 |
|
|
|
4228 |
|
|
(define_expand "sibcall_epilogue"
|
4229 |
|
|
[(const_int 2)]
|
4230 |
|
|
""
|
4231 |
|
|
{ spu_expand_epilogue (true); DONE; })
|
4232 |
|
|
|
4233 |
|
|
|
4234 |
|
|
;; stack manipulations
|
4235 |
|
|
|
4236 |
|
|
;; An insn to allocate new stack space for dynamic use (e.g., alloca).
|
4237 |
|
|
;; We move the back-chain and decrement the stack pointer.
|
4238 |
|
|
(define_expand "allocate_stack"
|
4239 |
|
|
[(set (match_operand 0 "spu_reg_operand" "")
|
4240 |
|
|
(minus (reg 1) (match_operand 1 "spu_nonmem_operand" "")))
|
4241 |
|
|
(set (reg 1)
|
4242 |
|
|
(minus (reg 1) (match_dup 1)))]
|
4243 |
|
|
""
|
4244 |
|
|
"spu_allocate_stack (operands[0], operands[1]); DONE;")
|
4245 |
|
|
|
4246 |
|
|
;; These patterns say how to save and restore the stack pointer. We need not
|
4247 |
|
|
;; save the stack pointer at function level since we are careful to preserve
|
4248 |
|
|
;; the backchain.
|
4249 |
|
|
;;
|
4250 |
|
|
|
4251 |
|
|
;; At block level the stack pointer is saved and restored, so that the
|
4252 |
|
|
;; stack space allocated within a block is deallocated when leaving
|
4253 |
|
|
;; block scope. By default, according to the SPU ABI, the stack
|
4254 |
|
|
;; pointer and available stack size are saved in a register. Upon
|
4255 |
|
|
;; restoration, the stack pointer is simply copied back, and the
|
4256 |
|
|
;; current available stack size is calculated against the restored
|
4257 |
|
|
;; stack pointer.
|
4258 |
|
|
;;
|
4259 |
|
|
;; For nonlocal gotos, we must save the stack pointer and its
|
4260 |
|
|
;; backchain and restore both. Note that in the nonlocal case, the
|
4261 |
|
|
;; save area is a memory location.
|
4262 |
|
|
|
4263 |
|
|
(define_expand "save_stack_function"
|
4264 |
|
|
[(match_operand 0 "general_operand" "")
|
4265 |
|
|
(match_operand 1 "general_operand" "")]
|
4266 |
|
|
""
|
4267 |
|
|
"DONE;")
|
4268 |
|
|
|
4269 |
|
|
(define_expand "restore_stack_function"
|
4270 |
|
|
[(match_operand 0 "general_operand" "")
|
4271 |
|
|
(match_operand 1 "general_operand" "")]
|
4272 |
|
|
""
|
4273 |
|
|
"DONE;")
|
4274 |
|
|
|
4275 |
|
|
(define_expand "restore_stack_block"
|
4276 |
|
|
[(match_operand 0 "spu_reg_operand" "")
|
4277 |
|
|
(match_operand 1 "memory_operand" "")]
|
4278 |
|
|
""
|
4279 |
|
|
"
|
4280 |
|
|
{
|
4281 |
|
|
spu_restore_stack_block (operands[0], operands[1]);
|
4282 |
|
|
DONE;
|
4283 |
|
|
}")
|
4284 |
|
|
|
4285 |
|
|
(define_expand "save_stack_nonlocal"
|
4286 |
|
|
[(match_operand 0 "memory_operand" "")
|
4287 |
|
|
(match_operand 1 "spu_reg_operand" "")]
|
4288 |
|
|
""
|
4289 |
|
|
"
|
4290 |
|
|
{
|
4291 |
|
|
rtx temp = gen_reg_rtx (Pmode);
|
4292 |
|
|
|
4293 |
|
|
/* Copy the backchain to the first word, sp to the second. We need to
|
4294 |
|
|
save the back chain because __builtin_apply appears to clobber it. */
|
4295 |
|
|
emit_move_insn (temp, gen_rtx_MEM (Pmode, operands[1]));
|
4296 |
|
|
emit_move_insn (adjust_address_nv (operands[0], SImode, 0), temp);
|
4297 |
|
|
emit_move_insn (adjust_address_nv (operands[0], SImode, 4), operands[1]);
|
4298 |
|
|
DONE;
|
4299 |
|
|
}")
|
4300 |
|
|
|
4301 |
|
|
(define_expand "restore_stack_nonlocal"
|
4302 |
|
|
[(match_operand 0 "spu_reg_operand" "")
|
4303 |
|
|
(match_operand 1 "memory_operand" "")]
|
4304 |
|
|
""
|
4305 |
|
|
"
|
4306 |
|
|
{
|
4307 |
|
|
spu_restore_stack_nonlocal(operands[0], operands[1]);
|
4308 |
|
|
DONE;
|
4309 |
|
|
}")
|
4310 |
|
|
|
4311 |
|
|
|
4312 |
|
|
;; vector patterns
|
4313 |
|
|
|
4314 |
|
|
;; Vector initialization
|
4315 |
|
|
(define_expand "vec_init"
|
4316 |
|
|
[(match_operand:V 0 "register_operand" "")
|
4317 |
|
|
(match_operand 1 "" "")]
|
4318 |
|
|
""
|
4319 |
|
|
{
|
4320 |
|
|
spu_expand_vector_init (operands[0], operands[1]);
|
4321 |
|
|
DONE;
|
4322 |
|
|
})
|
4323 |
|
|
|
4324 |
|
|
(define_expand "vec_set"
|
4325 |
|
|
[(use (match_operand:SI 2 "spu_nonmem_operand" ""))
|
4326 |
|
|
(set (match_dup:TI 3)
|
4327 |
|
|
(unspec:TI [(match_dup:SI 4)
|
4328 |
|
|
(match_dup:SI 5)
|
4329 |
|
|
(match_dup:SI 6)] UNSPEC_CPAT))
|
4330 |
|
|
(set (match_operand:V 0 "spu_reg_operand" "")
|
4331 |
|
|
(unspec:V [(match_operand: 1 "spu_reg_operand" "")
|
4332 |
|
|
(match_dup:V 0)
|
4333 |
|
|
(match_dup:TI 3)] UNSPEC_SHUFB))]
|
4334 |
|
|
""
|
4335 |
|
|
{
|
4336 |
|
|
HOST_WIDE_INT size = GET_MODE_SIZE (mode);
|
4337 |
|
|
rtx offset = GEN_INT (INTVAL (operands[2]) * size);
|
4338 |
|
|
operands[3] = gen_reg_rtx (TImode);
|
4339 |
|
|
operands[4] = stack_pointer_rtx;
|
4340 |
|
|
operands[5] = offset;
|
4341 |
|
|
operands[6] = GEN_INT (size);
|
4342 |
|
|
})
|
4343 |
|
|
|
4344 |
|
|
(define_expand "vec_extract"
|
4345 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
4346 |
|
|
(vec_select: (match_operand:V 1 "spu_reg_operand" "r")
|
4347 |
|
|
(parallel [(match_operand 2 "const_int_operand" "i")])))]
|
4348 |
|
|
""
|
4349 |
|
|
{
|
4350 |
|
|
if ((INTVAL (operands[2]) * + ) % 16 == 0)
|
4351 |
|
|
{
|
4352 |
|
|
emit_insn (gen_spu_convert (operands[0], operands[1]));
|
4353 |
|
|
DONE;
|
4354 |
|
|
}
|
4355 |
|
|
})
|
4356 |
|
|
|
4357 |
|
|
(define_insn "_vec_extract"
|
4358 |
|
|
[(set (match_operand: 0 "spu_reg_operand" "=r")
|
4359 |
|
|
(vec_select: (match_operand:V 1 "spu_reg_operand" "r")
|
4360 |
|
|
(parallel [(match_operand 2 "const_int_operand" "i")])))]
|
4361 |
|
|
""
|
4362 |
|
|
"rotqbyi\t%0,%1,(%2*+)%%16"
|
4363 |
|
|
[(set_attr "type" "shuf")])
|
4364 |
|
|
|
4365 |
|
|
(define_insn "_vec_extractv8hi_ze"
|
4366 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=r")
|
4367 |
|
|
(zero_extend:SI (vec_select:HI (match_operand:V8HI 1 "spu_reg_operand" "r")
|
4368 |
|
|
(parallel [(const_int 0)]))))]
|
4369 |
|
|
""
|
4370 |
|
|
"rotqmbyi\t%0,%1,-2"
|
4371 |
|
|
[(set_attr "type" "shuf")])
|
4372 |
|
|
|
4373 |
|
|
|
4374 |
|
|
;; misc
|
4375 |
|
|
|
4376 |
|
|
(define_expand "shufb"
|
4377 |
|
|
[(set (match_operand 0 "spu_reg_operand" "")
|
4378 |
|
|
(unspec [(match_operand 1 "spu_reg_operand" "")
|
4379 |
|
|
(match_operand 2 "spu_reg_operand" "")
|
4380 |
|
|
(match_operand:TI 3 "spu_reg_operand" "")] UNSPEC_SHUFB))]
|
4381 |
|
|
""
|
4382 |
|
|
{
|
4383 |
|
|
rtx s = gen__shufb (operands[0], operands[1], operands[2], operands[3]);
|
4384 |
|
|
PUT_MODE (SET_SRC (s), GET_MODE (operands[0]));
|
4385 |
|
|
emit_insn (s);
|
4386 |
|
|
DONE;
|
4387 |
|
|
})
|
4388 |
|
|
|
4389 |
|
|
(define_insn "_shufb"
|
4390 |
|
|
[(set (match_operand 0 "spu_reg_operand" "=r")
|
4391 |
|
|
(unspec [(match_operand 1 "spu_reg_operand" "r")
|
4392 |
|
|
(match_operand 2 "spu_reg_operand" "r")
|
4393 |
|
|
(match_operand:TI 3 "spu_reg_operand" "r")] UNSPEC_SHUFB))]
|
4394 |
|
|
"operands"
|
4395 |
|
|
"shufb\t%0,%1,%2,%3"
|
4396 |
|
|
[(set_attr "type" "shuf")])
|
4397 |
|
|
|
4398 |
|
|
; The semantics of vec_permv16qi are nearly identical to those of the SPU
|
4399 |
|
|
; shufb instruction, except that we need to reduce the selector modulo 32.
|
4400 |
|
|
(define_expand "vec_permv16qi"
|
4401 |
|
|
[(set (match_dup 4) (and:V16QI (match_operand:V16QI 3 "spu_reg_operand" "")
|
4402 |
|
|
(match_dup 6)))
|
4403 |
|
|
(set (match_operand:V16QI 0 "spu_reg_operand" "")
|
4404 |
|
|
(unspec:V16QI
|
4405 |
|
|
[(match_operand:V16QI 1 "spu_reg_operand" "")
|
4406 |
|
|
(match_operand:V16QI 2 "spu_reg_operand" "")
|
4407 |
|
|
(match_dup 5)]
|
4408 |
|
|
UNSPEC_SHUFB))]
|
4409 |
|
|
""
|
4410 |
|
|
{
|
4411 |
|
|
operands[4] = gen_reg_rtx (V16QImode);
|
4412 |
|
|
operands[5] = gen_lowpart (TImode, operands[4]);
|
4413 |
|
|
operands[6] = spu_const (V16QImode, 31);
|
4414 |
|
|
})
|
4415 |
|
|
|
4416 |
|
|
(define_insn "nop"
|
4417 |
|
|
[(unspec_volatile [(const_int 0)] UNSPECV_NOP)]
|
4418 |
|
|
""
|
4419 |
|
|
"nop"
|
4420 |
|
|
[(set_attr "type" "nop")])
|
4421 |
|
|
|
4422 |
|
|
(define_insn "nopn"
|
4423 |
|
|
[(unspec_volatile [(match_operand:SI 0 "immediate_operand" "K")] UNSPECV_NOP)]
|
4424 |
|
|
""
|
4425 |
|
|
"nop\t%0"
|
4426 |
|
|
[(set_attr "type" "nop")])
|
4427 |
|
|
|
4428 |
|
|
(define_insn "lnop"
|
4429 |
|
|
[(unspec_volatile [(const_int 0)] UNSPECV_LNOP)]
|
4430 |
|
|
""
|
4431 |
|
|
"lnop"
|
4432 |
|
|
[(set_attr "type" "lnop")])
|
4433 |
|
|
|
4434 |
|
|
;; The operand is so we know why we generated this hbrp.
|
4435 |
|
|
;; We clobber mem to make sure it isn't moved over any
|
4436 |
|
|
;; loads, stores or calls while scheduling.
|
4437 |
|
|
(define_insn "iprefetch"
|
4438 |
|
|
[(unspec [(match_operand:SI 0 "const_int_operand" "n")] UNSPEC_IPREFETCH)
|
4439 |
|
|
(clobber (mem:BLK (scratch)))]
|
4440 |
|
|
""
|
4441 |
|
|
"hbrp\t# %0"
|
4442 |
|
|
[(set_attr "type" "iprefetch")])
|
4443 |
|
|
|
4444 |
|
|
;; A non-volatile version so it gets scheduled
|
4445 |
|
|
(define_insn "nopn_nv"
|
4446 |
|
|
[(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_NOP)]
|
4447 |
|
|
""
|
4448 |
|
|
"nop\t%0"
|
4449 |
|
|
[(set_attr "type" "nop")])
|
4450 |
|
|
|
4451 |
|
|
(define_insn "hbr"
|
4452 |
|
|
[(set (reg:SI 130)
|
4453 |
|
|
(unspec:SI [(match_operand:SI 0 "immediate_operand" "i,i,i")
|
4454 |
|
|
(match_operand:SI 1 "nonmemory_operand" "r,s,i")] UNSPEC_HBR))
|
4455 |
|
|
(unspec [(const_int 0)] UNSPEC_HBR)]
|
4456 |
|
|
""
|
4457 |
|
|
"@
|
4458 |
|
|
hbr\t%0,%1
|
4459 |
|
|
hbrr\t%0,%1
|
4460 |
|
|
hbra\t%0,%1"
|
4461 |
|
|
[(set_attr "type" "hbr")])
|
4462 |
|
|
|
4463 |
|
|
(define_insn "sync"
|
4464 |
|
|
[(unspec_volatile [(const_int 0)] UNSPECV_SYNC)
|
4465 |
|
|
(clobber (mem:BLK (scratch)))]
|
4466 |
|
|
""
|
4467 |
|
|
"sync"
|
4468 |
|
|
[(set_attr "type" "br")])
|
4469 |
|
|
|
4470 |
|
|
(define_insn "syncc"
|
4471 |
|
|
[(unspec_volatile [(const_int 1)] UNSPECV_SYNC)
|
4472 |
|
|
(clobber (mem:BLK (scratch)))]
|
4473 |
|
|
""
|
4474 |
|
|
"syncc"
|
4475 |
|
|
[(set_attr "type" "br")])
|
4476 |
|
|
|
4477 |
|
|
(define_insn "dsync"
|
4478 |
|
|
[(unspec_volatile [(const_int 2)] UNSPECV_SYNC)
|
4479 |
|
|
(clobber (mem:BLK (scratch)))]
|
4480 |
|
|
""
|
4481 |
|
|
"dsync"
|
4482 |
|
|
[(set_attr "type" "br")])
|
4483 |
|
|
|
4484 |
|
|
|
4485 |
|
|
|
4486 |
|
|
;; Define the subtract-one-and-jump insns so loop.c
|
4487 |
|
|
;; knows what to generate.
|
4488 |
|
|
(define_expand "doloop_end"
|
4489 |
|
|
[(use (match_operand 0 "" "")) ; loop pseudo
|
4490 |
|
|
(use (match_operand 1 "" "")) ; iterations; zero if unknown
|
4491 |
|
|
(use (match_operand 2 "" "")) ; max iterations
|
4492 |
|
|
(use (match_operand 3 "" "")) ; loop level
|
4493 |
|
|
(use (match_operand 4 "" ""))] ; label
|
4494 |
|
|
""
|
4495 |
|
|
"
|
4496 |
|
|
{
|
4497 |
|
|
/* Currently SMS relies on the do-loop pattern to recognize loops
|
4498 |
|
|
where (1) the control part comprises of all insns defining and/or
|
4499 |
|
|
using a certain 'count' register and (2) the loop count can be
|
4500 |
|
|
adjusted by modifying this register prior to the loop.
|
4501 |
|
|
. ??? The possible introduction of a new block to initialize the
|
4502 |
|
|
new IV can potentially effects branch optimizations. */
|
4503 |
|
|
if (optimize > 0 && flag_modulo_sched)
|
4504 |
|
|
{
|
4505 |
|
|
rtx s0;
|
4506 |
|
|
rtx bcomp;
|
4507 |
|
|
rtx loc_ref;
|
4508 |
|
|
|
4509 |
|
|
/* Only use this on innermost loops. */
|
4510 |
|
|
if (INTVAL (operands[3]) > 1)
|
4511 |
|
|
FAIL;
|
4512 |
|
|
if (GET_MODE (operands[0]) != SImode)
|
4513 |
|
|
FAIL;
|
4514 |
|
|
|
4515 |
|
|
s0 = operands [0];
|
4516 |
|
|
emit_move_insn (s0, gen_rtx_PLUS (SImode, s0, GEN_INT (-1)));
|
4517 |
|
|
bcomp = gen_rtx_NE(SImode, s0, const0_rtx);
|
4518 |
|
|
loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands [4]);
|
4519 |
|
|
emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
|
4520 |
|
|
gen_rtx_IF_THEN_ELSE (VOIDmode, bcomp,
|
4521 |
|
|
loc_ref, pc_rtx)));
|
4522 |
|
|
|
4523 |
|
|
DONE;
|
4524 |
|
|
}else
|
4525 |
|
|
FAIL;
|
4526 |
|
|
}")
|
4527 |
|
|
|
4528 |
|
|
;; convert between any two modes, avoiding any GCC assumptions
|
4529 |
|
|
(define_expand "spu_convert"
|
4530 |
|
|
[(set (match_operand 0 "spu_reg_operand" "")
|
4531 |
|
|
(unspec [(match_operand 1 "spu_reg_operand" "")] UNSPEC_CONVERT))]
|
4532 |
|
|
""
|
4533 |
|
|
{
|
4534 |
|
|
rtx c = gen__spu_convert (operands[0], operands[1]);
|
4535 |
|
|
PUT_MODE (SET_SRC (c), GET_MODE (operands[0]));
|
4536 |
|
|
emit_insn (c);
|
4537 |
|
|
DONE;
|
4538 |
|
|
})
|
4539 |
|
|
|
4540 |
|
|
(define_insn_and_split "_spu_convert"
|
4541 |
|
|
[(set (match_operand 0 "spu_reg_operand" "=r")
|
4542 |
|
|
(unspec [(match_operand 1 "spu_reg_operand" "0")] UNSPEC_CONVERT))]
|
4543 |
|
|
""
|
4544 |
|
|
"#"
|
4545 |
|
|
"reload_completed"
|
4546 |
|
|
[(const_int 0)]
|
4547 |
|
|
{
|
4548 |
|
|
spu_split_convert (operands);
|
4549 |
|
|
DONE;
|
4550 |
|
|
}
|
4551 |
|
|
[(set_attr "type" "convert")
|
4552 |
|
|
(set_attr "length" "0")])
|
4553 |
|
|
|
4554 |
|
|
|
4555 |
|
|
;;
|
4556 |
|
|
(include "spu-builtins.md")
|
4557 |
|
|
|
4558 |
|
|
|
4559 |
|
|
(define_expand "smaxv4sf3"
|
4560 |
|
|
[(set (match_operand:V4SF 0 "register_operand" "=r")
|
4561 |
|
|
(smax:V4SF (match_operand:V4SF 1 "register_operand" "r")
|
4562 |
|
|
(match_operand:V4SF 2 "register_operand" "r")))]
|
4563 |
|
|
""
|
4564 |
|
|
"
|
4565 |
|
|
{
|
4566 |
|
|
rtx mask = gen_reg_rtx (V4SImode);
|
4567 |
|
|
|
4568 |
|
|
emit_insn (gen_cgt_v4sf (mask, operands[1], operands[2]));
|
4569 |
|
|
emit_insn (gen_selb (operands[0], operands[2], operands[1], mask));
|
4570 |
|
|
DONE;
|
4571 |
|
|
}")
|
4572 |
|
|
|
4573 |
|
|
(define_expand "sminv4sf3"
|
4574 |
|
|
[(set (match_operand:V4SF 0 "register_operand" "=r")
|
4575 |
|
|
(smin:V4SF (match_operand:V4SF 1 "register_operand" "r")
|
4576 |
|
|
(match_operand:V4SF 2 "register_operand" "r")))]
|
4577 |
|
|
""
|
4578 |
|
|
"
|
4579 |
|
|
{
|
4580 |
|
|
rtx mask = gen_reg_rtx (V4SImode);
|
4581 |
|
|
|
4582 |
|
|
emit_insn (gen_cgt_v4sf (mask, operands[1], operands[2]));
|
4583 |
|
|
emit_insn (gen_selb (operands[0], operands[1], operands[2], mask));
|
4584 |
|
|
DONE;
|
4585 |
|
|
}")
|
4586 |
|
|
|
4587 |
|
|
(define_expand "smaxv2df3"
|
4588 |
|
|
[(set (match_operand:V2DF 0 "register_operand" "=r")
|
4589 |
|
|
(smax:V2DF (match_operand:V2DF 1 "register_operand" "r")
|
4590 |
|
|
(match_operand:V2DF 2 "register_operand" "r")))]
|
4591 |
|
|
""
|
4592 |
|
|
"
|
4593 |
|
|
{
|
4594 |
|
|
rtx mask = gen_reg_rtx (V2DImode);
|
4595 |
|
|
emit_insn (gen_cgt_v2df (mask, operands[1], operands[2]));
|
4596 |
|
|
emit_insn (gen_selb (operands[0], operands[2], operands[1],
|
4597 |
|
|
spu_gen_subreg (V4SImode, mask)));
|
4598 |
|
|
DONE;
|
4599 |
|
|
}")
|
4600 |
|
|
|
4601 |
|
|
(define_expand "sminv2df3"
|
4602 |
|
|
[(set (match_operand:V2DF 0 "register_operand" "=r")
|
4603 |
|
|
(smin:V2DF (match_operand:V2DF 1 "register_operand" "r")
|
4604 |
|
|
(match_operand:V2DF 2 "register_operand" "r")))]
|
4605 |
|
|
""
|
4606 |
|
|
"
|
4607 |
|
|
{
|
4608 |
|
|
rtx mask = gen_reg_rtx (V2DImode);
|
4609 |
|
|
emit_insn (gen_cgt_v2df (mask, operands[1], operands[2]));
|
4610 |
|
|
emit_insn (gen_selb (operands[0], operands[1], operands[2],
|
4611 |
|
|
spu_gen_subreg (V4SImode, mask)));
|
4612 |
|
|
DONE;
|
4613 |
|
|
}")
|
4614 |
|
|
|
4615 |
|
|
(define_expand "vec_widen_umult_hi_v8hi"
|
4616 |
|
|
[(set (match_operand:V4SI 0 "register_operand" "=r")
|
4617 |
|
|
(mult:V4SI
|
4618 |
|
|
(zero_extend:V4SI
|
4619 |
|
|
(vec_select:V4HI
|
4620 |
|
|
(match_operand:V8HI 1 "register_operand" "r")
|
4621 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)])))
|
4622 |
|
|
(zero_extend:V4SI
|
4623 |
|
|
(vec_select:V4HI
|
4624 |
|
|
(match_operand:V8HI 2 "register_operand" "r")
|
4625 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)])))))]
|
4626 |
|
|
""
|
4627 |
|
|
"
|
4628 |
|
|
{
|
4629 |
|
|
rtx ve = gen_reg_rtx (V4SImode);
|
4630 |
|
|
rtx vo = gen_reg_rtx (V4SImode);
|
4631 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4632 |
|
|
unsigned char arr[16] = {
|
4633 |
|
|
0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
|
4634 |
|
|
0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17};
|
4635 |
|
|
|
4636 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4637 |
|
|
emit_insn (gen_spu_mpyhhu (ve, operands[1], operands[2]));
|
4638 |
|
|
emit_insn (gen_spu_mpyu (vo, operands[1], operands[2]));
|
4639 |
|
|
emit_insn (gen_shufb (operands[0], ve, vo, mask));
|
4640 |
|
|
DONE;
|
4641 |
|
|
}")
|
4642 |
|
|
|
4643 |
|
|
(define_expand "vec_widen_umult_lo_v8hi"
|
4644 |
|
|
[(set (match_operand:V4SI 0 "register_operand" "=r")
|
4645 |
|
|
(mult:V4SI
|
4646 |
|
|
(zero_extend:V4SI
|
4647 |
|
|
(vec_select:V4HI
|
4648 |
|
|
(match_operand:V8HI 1 "register_operand" "r")
|
4649 |
|
|
(parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)])))
|
4650 |
|
|
(zero_extend:V4SI
|
4651 |
|
|
(vec_select:V4HI
|
4652 |
|
|
(match_operand:V8HI 2 "register_operand" "r")
|
4653 |
|
|
(parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)])))))]
|
4654 |
|
|
""
|
4655 |
|
|
"
|
4656 |
|
|
{
|
4657 |
|
|
rtx ve = gen_reg_rtx (V4SImode);
|
4658 |
|
|
rtx vo = gen_reg_rtx (V4SImode);
|
4659 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4660 |
|
|
unsigned char arr[16] = {
|
4661 |
|
|
0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
|
4662 |
|
|
0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F};
|
4663 |
|
|
|
4664 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4665 |
|
|
emit_insn (gen_spu_mpyhhu (ve, operands[1], operands[2]));
|
4666 |
|
|
emit_insn (gen_spu_mpyu (vo, operands[1], operands[2]));
|
4667 |
|
|
emit_insn (gen_shufb (operands[0], ve, vo, mask));
|
4668 |
|
|
DONE;
|
4669 |
|
|
}")
|
4670 |
|
|
|
4671 |
|
|
(define_expand "vec_widen_smult_hi_v8hi"
|
4672 |
|
|
[(set (match_operand:V4SI 0 "register_operand" "=r")
|
4673 |
|
|
(mult:V4SI
|
4674 |
|
|
(sign_extend:V4SI
|
4675 |
|
|
(vec_select:V4HI
|
4676 |
|
|
(match_operand:V8HI 1 "register_operand" "r")
|
4677 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)])))
|
4678 |
|
|
(sign_extend:V4SI
|
4679 |
|
|
(vec_select:V4HI
|
4680 |
|
|
(match_operand:V8HI 2 "register_operand" "r")
|
4681 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)])))))]
|
4682 |
|
|
""
|
4683 |
|
|
"
|
4684 |
|
|
{
|
4685 |
|
|
rtx ve = gen_reg_rtx (V4SImode);
|
4686 |
|
|
rtx vo = gen_reg_rtx (V4SImode);
|
4687 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4688 |
|
|
unsigned char arr[16] = {
|
4689 |
|
|
0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
|
4690 |
|
|
0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17};
|
4691 |
|
|
|
4692 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4693 |
|
|
emit_insn (gen_spu_mpyhh (ve, operands[1], operands[2]));
|
4694 |
|
|
emit_insn (gen_spu_mpy (vo, operands[1], operands[2]));
|
4695 |
|
|
emit_insn (gen_shufb (operands[0], ve, vo, mask));
|
4696 |
|
|
DONE;
|
4697 |
|
|
}")
|
4698 |
|
|
|
4699 |
|
|
(define_expand "vec_widen_smult_lo_v8hi"
|
4700 |
|
|
[(set (match_operand:V4SI 0 "register_operand" "=r")
|
4701 |
|
|
(mult:V4SI
|
4702 |
|
|
(sign_extend:V4SI
|
4703 |
|
|
(vec_select:V4HI
|
4704 |
|
|
(match_operand:V8HI 1 "register_operand" "r")
|
4705 |
|
|
(parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)])))
|
4706 |
|
|
(sign_extend:V4SI
|
4707 |
|
|
(vec_select:V4HI
|
4708 |
|
|
(match_operand:V8HI 2 "register_operand" "r")
|
4709 |
|
|
(parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)])))))]
|
4710 |
|
|
""
|
4711 |
|
|
"
|
4712 |
|
|
{
|
4713 |
|
|
rtx ve = gen_reg_rtx (V4SImode);
|
4714 |
|
|
rtx vo = gen_reg_rtx (V4SImode);
|
4715 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4716 |
|
|
unsigned char arr[16] = {
|
4717 |
|
|
0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
|
4718 |
|
|
0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F};
|
4719 |
|
|
|
4720 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4721 |
|
|
emit_insn (gen_spu_mpyhh (ve, operands[1], operands[2]));
|
4722 |
|
|
emit_insn (gen_spu_mpy (vo, operands[1], operands[2]));
|
4723 |
|
|
emit_insn (gen_shufb (operands[0], ve, vo, mask));
|
4724 |
|
|
DONE;
|
4725 |
|
|
}")
|
4726 |
|
|
|
4727 |
|
|
(define_expand "vec_realign_load_"
|
4728 |
|
|
[(set (match_operand:ALL 0 "register_operand" "=r")
|
4729 |
|
|
(unspec:ALL [(match_operand:ALL 1 "register_operand" "r")
|
4730 |
|
|
(match_operand:ALL 2 "register_operand" "r")
|
4731 |
|
|
(match_operand:TI 3 "register_operand" "r")] UNSPEC_SPU_REALIGN_LOAD))]
|
4732 |
|
|
""
|
4733 |
|
|
"
|
4734 |
|
|
{
|
4735 |
|
|
emit_insn (gen_shufb (operands[0], operands[1], operands[2], operands[3]));
|
4736 |
|
|
DONE;
|
4737 |
|
|
}")
|
4738 |
|
|
|
4739 |
|
|
(define_expand "spu_lvsr"
|
4740 |
|
|
[(set (match_operand:V16QI 0 "register_operand" "")
|
4741 |
|
|
(unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_SPU_MASK_FOR_LOAD))]
|
4742 |
|
|
""
|
4743 |
|
|
"
|
4744 |
|
|
{
|
4745 |
|
|
rtx addr;
|
4746 |
|
|
rtx offset = gen_reg_rtx (V8HImode);
|
4747 |
|
|
rtx addr_bits = gen_reg_rtx (SImode);
|
4748 |
|
|
rtx addr_bits_vec = gen_reg_rtx (V8HImode);
|
4749 |
|
|
rtx splatqi = gen_reg_rtx (TImode);
|
4750 |
|
|
rtx result = gen_reg_rtx (V8HImode);
|
4751 |
|
|
unsigned char arr[16] = {
|
4752 |
|
|
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
4753 |
|
|
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
|
4754 |
|
|
unsigned char arr2[16] = {
|
4755 |
|
|
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
|
4756 |
|
|
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
|
4757 |
|
|
|
4758 |
|
|
emit_move_insn (offset, array_to_constant (V8HImode, arr));
|
4759 |
|
|
emit_move_insn (splatqi, array_to_constant (TImode, arr2));
|
4760 |
|
|
|
4761 |
|
|
gcc_assert (GET_CODE (operands[1]) == MEM);
|
4762 |
|
|
addr = force_reg (Pmode, XEXP (operands[1], 0));
|
4763 |
|
|
emit_insn (gen_andsi3 (addr_bits, addr, GEN_INT (0xF)));
|
4764 |
|
|
emit_insn (gen_shufb (addr_bits_vec, addr_bits, addr_bits, splatqi));
|
4765 |
|
|
|
4766 |
|
|
/* offset - (addr & 0xF)
|
4767 |
|
|
It is safe to use a single sfh, because each byte of offset is > 15 and
|
4768 |
|
|
each byte of addr is <= 15. */
|
4769 |
|
|
emit_insn (gen_subv8hi3 (result, offset, addr_bits_vec));
|
4770 |
|
|
|
4771 |
|
|
result = simplify_gen_subreg (V16QImode, result, V8HImode, 0);
|
4772 |
|
|
emit_move_insn (operands[0], result);
|
4773 |
|
|
|
4774 |
|
|
DONE;
|
4775 |
|
|
}")
|
4776 |
|
|
|
4777 |
|
|
(define_expand "vec_unpacku_hi_v8hi"
|
4778 |
|
|
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
|
4779 |
|
|
(zero_extend:V4SI
|
4780 |
|
|
(vec_select:V4HI
|
4781 |
|
|
(match_operand:V8HI 1 "spu_reg_operand" "r")
|
4782 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)]))))]
|
4783 |
|
|
""
|
4784 |
|
|
{
|
4785 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4786 |
|
|
unsigned char arr[16] = {
|
4787 |
|
|
0x80, 0x80, 0x00, 0x01, 0x80, 0x80, 0x02, 0x03,
|
4788 |
|
|
0x80, 0x80, 0x04, 0x05, 0x80, 0x80, 0x06, 0x07};
|
4789 |
|
|
|
4790 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4791 |
|
|
emit_insn (gen_shufb (operands[0], operands[1], operands[1], mask));
|
4792 |
|
|
|
4793 |
|
|
DONE;
|
4794 |
|
|
})
|
4795 |
|
|
|
4796 |
|
|
(define_expand "vec_unpacku_lo_v8hi"
|
4797 |
|
|
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
|
4798 |
|
|
(zero_extend:V4SI
|
4799 |
|
|
(vec_select:V4HI
|
4800 |
|
|
(match_operand:V8HI 1 "spu_reg_operand" "r")
|
4801 |
|
|
(parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
|
4802 |
|
|
""
|
4803 |
|
|
{
|
4804 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4805 |
|
|
unsigned char arr[16] = {
|
4806 |
|
|
0x80, 0x80, 0x08, 0x09, 0x80, 0x80, 0x0A, 0x0B,
|
4807 |
|
|
0x80, 0x80, 0x0C, 0x0D, 0x80, 0x80, 0x0E, 0x0F};
|
4808 |
|
|
|
4809 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4810 |
|
|
emit_insn (gen_shufb (operands[0], operands[1], operands[1], mask));
|
4811 |
|
|
|
4812 |
|
|
DONE;
|
4813 |
|
|
})
|
4814 |
|
|
|
4815 |
|
|
(define_expand "vec_unpacks_hi_v8hi"
|
4816 |
|
|
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
|
4817 |
|
|
(sign_extend:V4SI
|
4818 |
|
|
(vec_select:V4HI
|
4819 |
|
|
(match_operand:V8HI 1 "spu_reg_operand" "r")
|
4820 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)]))))]
|
4821 |
|
|
""
|
4822 |
|
|
{
|
4823 |
|
|
rtx tmp1 = gen_reg_rtx (V8HImode);
|
4824 |
|
|
rtx tmp2 = gen_reg_rtx (V4SImode);
|
4825 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4826 |
|
|
unsigned char arr[16] = {
|
4827 |
|
|
0x80, 0x80, 0x00, 0x01, 0x80, 0x80, 0x02, 0x03,
|
4828 |
|
|
0x80, 0x80, 0x04, 0x05, 0x80, 0x80, 0x06, 0x07};
|
4829 |
|
|
|
4830 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4831 |
|
|
emit_insn (gen_shufb (tmp1, operands[1], operands[1], mask));
|
4832 |
|
|
emit_insn (gen_spu_xshw (tmp2, tmp1));
|
4833 |
|
|
emit_move_insn (operands[0], tmp2);
|
4834 |
|
|
|
4835 |
|
|
DONE;
|
4836 |
|
|
})
|
4837 |
|
|
|
4838 |
|
|
(define_expand "vec_unpacks_lo_v8hi"
|
4839 |
|
|
[(set (match_operand:V4SI 0 "spu_reg_operand" "=r")
|
4840 |
|
|
(sign_extend:V4SI
|
4841 |
|
|
(vec_select:V4HI
|
4842 |
|
|
(match_operand:V8HI 1 "spu_reg_operand" "r")
|
4843 |
|
|
(parallel [(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
|
4844 |
|
|
""
|
4845 |
|
|
{
|
4846 |
|
|
rtx tmp1 = gen_reg_rtx (V8HImode);
|
4847 |
|
|
rtx tmp2 = gen_reg_rtx (V4SImode);
|
4848 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4849 |
|
|
unsigned char arr[16] = {
|
4850 |
|
|
0x80, 0x80, 0x08, 0x09, 0x80, 0x80, 0x0A, 0x0B,
|
4851 |
|
|
0x80, 0x80, 0x0C, 0x0D, 0x80, 0x80, 0x0E, 0x0F};
|
4852 |
|
|
|
4853 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4854 |
|
|
emit_insn (gen_shufb (tmp1, operands[1], operands[1], mask));
|
4855 |
|
|
emit_insn (gen_spu_xshw (tmp2, tmp1));
|
4856 |
|
|
emit_move_insn (operands[0], tmp2);
|
4857 |
|
|
|
4858 |
|
|
DONE;
|
4859 |
|
|
})
|
4860 |
|
|
|
4861 |
|
|
(define_expand "vec_unpacku_hi_v16qi"
|
4862 |
|
|
[(set (match_operand:V8HI 0 "spu_reg_operand" "=r")
|
4863 |
|
|
(zero_extend:V8HI
|
4864 |
|
|
(vec_select:V8QI
|
4865 |
|
|
(match_operand:V16QI 1 "spu_reg_operand" "r")
|
4866 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)
|
4867 |
|
|
(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
|
4868 |
|
|
""
|
4869 |
|
|
{
|
4870 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4871 |
|
|
unsigned char arr[16] = {
|
4872 |
|
|
0x80, 0x00, 0x80, 0x01, 0x80, 0x02, 0x80, 0x03,
|
4873 |
|
|
0x80, 0x04, 0x80, 0x05, 0x80, 0x06, 0x80, 0x07};
|
4874 |
|
|
|
4875 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4876 |
|
|
emit_insn (gen_shufb (operands[0], operands[1], operands[1], mask));
|
4877 |
|
|
|
4878 |
|
|
DONE;
|
4879 |
|
|
})
|
4880 |
|
|
|
4881 |
|
|
(define_expand "vec_unpacku_lo_v16qi"
|
4882 |
|
|
[(set (match_operand:V8HI 0 "spu_reg_operand" "=r")
|
4883 |
|
|
(zero_extend:V8HI
|
4884 |
|
|
(vec_select:V8QI
|
4885 |
|
|
(match_operand:V16QI 1 "spu_reg_operand" "r")
|
4886 |
|
|
(parallel [(const_int 8)(const_int 9)(const_int 10)(const_int 11)
|
4887 |
|
|
(const_int 12)(const_int 13)(const_int 14)(const_int 15)]))))]
|
4888 |
|
|
""
|
4889 |
|
|
{
|
4890 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4891 |
|
|
unsigned char arr[16] = {
|
4892 |
|
|
0x80, 0x08, 0x80, 0x09, 0x80, 0x0A, 0x80, 0x0B,
|
4893 |
|
|
0x80, 0x0C, 0x80, 0x0D, 0x80, 0x0E, 0x80, 0x0F};
|
4894 |
|
|
|
4895 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4896 |
|
|
emit_insn (gen_shufb (operands[0], operands[1], operands[1], mask));
|
4897 |
|
|
|
4898 |
|
|
DONE;
|
4899 |
|
|
})
|
4900 |
|
|
|
4901 |
|
|
(define_expand "vec_unpacks_hi_v16qi"
|
4902 |
|
|
[(set (match_operand:V8HI 0 "spu_reg_operand" "=r")
|
4903 |
|
|
(sign_extend:V8HI
|
4904 |
|
|
(vec_select:V8QI
|
4905 |
|
|
(match_operand:V16QI 1 "spu_reg_operand" "r")
|
4906 |
|
|
(parallel [(const_int 0)(const_int 1)(const_int 2)(const_int 3)
|
4907 |
|
|
(const_int 4)(const_int 5)(const_int 6)(const_int 7)]))))]
|
4908 |
|
|
""
|
4909 |
|
|
{
|
4910 |
|
|
rtx tmp1 = gen_reg_rtx (V16QImode);
|
4911 |
|
|
rtx tmp2 = gen_reg_rtx (V8HImode);
|
4912 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4913 |
|
|
unsigned char arr[16] = {
|
4914 |
|
|
0x80, 0x00, 0x80, 0x01, 0x80, 0x02, 0x80, 0x03,
|
4915 |
|
|
0x80, 0x04, 0x80, 0x05, 0x80, 0x06, 0x80, 0x07};
|
4916 |
|
|
|
4917 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4918 |
|
|
emit_insn (gen_shufb (tmp1, operands[1], operands[1], mask));
|
4919 |
|
|
emit_insn (gen_spu_xsbh (tmp2, tmp1));
|
4920 |
|
|
emit_move_insn (operands[0], tmp2);
|
4921 |
|
|
|
4922 |
|
|
DONE;
|
4923 |
|
|
})
|
4924 |
|
|
|
4925 |
|
|
(define_expand "vec_unpacks_lo_v16qi"
|
4926 |
|
|
[(set (match_operand:V8HI 0 "spu_reg_operand" "=r")
|
4927 |
|
|
(sign_extend:V8HI
|
4928 |
|
|
(vec_select:V8QI
|
4929 |
|
|
(match_operand:V16QI 1 "spu_reg_operand" "r")
|
4930 |
|
|
(parallel [(const_int 8)(const_int 9)(const_int 10)(const_int 11)
|
4931 |
|
|
(const_int 12)(const_int 13)(const_int 14)(const_int 15)]))))]
|
4932 |
|
|
""
|
4933 |
|
|
{
|
4934 |
|
|
rtx tmp1 = gen_reg_rtx (V16QImode);
|
4935 |
|
|
rtx tmp2 = gen_reg_rtx (V8HImode);
|
4936 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4937 |
|
|
unsigned char arr[16] = {
|
4938 |
|
|
0x80, 0x08, 0x80, 0x09, 0x80, 0x0A, 0x80, 0x0B,
|
4939 |
|
|
0x80, 0x0C, 0x80, 0x0D, 0x80, 0x0E, 0x80, 0x0F};
|
4940 |
|
|
|
4941 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4942 |
|
|
emit_insn (gen_shufb (tmp1, operands[1], operands[1], mask));
|
4943 |
|
|
emit_insn (gen_spu_xsbh (tmp2, tmp1));
|
4944 |
|
|
emit_move_insn (operands[0], tmp2);
|
4945 |
|
|
|
4946 |
|
|
DONE;
|
4947 |
|
|
})
|
4948 |
|
|
|
4949 |
|
|
|
4950 |
|
|
(define_expand "vec_pack_trunc_v8hi"
|
4951 |
|
|
[(set (match_operand:V16QI 0 "spu_reg_operand" "=r")
|
4952 |
|
|
(vec_concat:V16QI
|
4953 |
|
|
(truncate:V8QI (match_operand:V8HI 1 "spu_reg_operand" "r"))
|
4954 |
|
|
(truncate:V8QI (match_operand:V8HI 2 "spu_reg_operand" "r"))))]
|
4955 |
|
|
""
|
4956 |
|
|
"
|
4957 |
|
|
{
|
4958 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4959 |
|
|
unsigned char arr[16] = {
|
4960 |
|
|
0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
|
4961 |
|
|
0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F};
|
4962 |
|
|
|
4963 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4964 |
|
|
emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask));
|
4965 |
|
|
|
4966 |
|
|
DONE;
|
4967 |
|
|
}")
|
4968 |
|
|
|
4969 |
|
|
(define_expand "vec_pack_trunc_v4si"
|
4970 |
|
|
[(set (match_operand:V8HI 0 "spu_reg_operand" "=r")
|
4971 |
|
|
(vec_concat:V8HI
|
4972 |
|
|
(truncate:V4HI (match_operand:V4SI 1 "spu_reg_operand" "r"))
|
4973 |
|
|
(truncate:V4HI (match_operand:V4SI 2 "spu_reg_operand" "r"))))]
|
4974 |
|
|
""
|
4975 |
|
|
"
|
4976 |
|
|
{
|
4977 |
|
|
rtx mask = gen_reg_rtx (TImode);
|
4978 |
|
|
unsigned char arr[16] = {
|
4979 |
|
|
0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
|
4980 |
|
|
0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F};
|
4981 |
|
|
|
4982 |
|
|
emit_move_insn (mask, array_to_constant (TImode, arr));
|
4983 |
|
|
emit_insn (gen_shufb (operands[0], operands[1], operands[2], mask));
|
4984 |
|
|
|
4985 |
|
|
DONE;
|
4986 |
|
|
}")
|
4987 |
|
|
|
4988 |
|
|
(define_insn "stack_protect_set"
|
4989 |
|
|
[(set (match_operand:SI 0 "memory_operand" "=m")
|
4990 |
|
|
(unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
|
4991 |
|
|
(set (match_scratch:SI 2 "=&r") (const_int 0))]
|
4992 |
|
|
""
|
4993 |
|
|
"lq%p1\t%2,%1\;stq%p0\t%2,%0\;xor\t%2,%2,%2"
|
4994 |
|
|
[(set_attr "length" "12")
|
4995 |
|
|
(set_attr "type" "multi1")]
|
4996 |
|
|
)
|
4997 |
|
|
|
4998 |
|
|
(define_expand "stack_protect_test"
|
4999 |
|
|
[(match_operand 0 "memory_operand" "")
|
5000 |
|
|
(match_operand 1 "memory_operand" "")
|
5001 |
|
|
(match_operand 2 "" "")]
|
5002 |
|
|
""
|
5003 |
|
|
{
|
5004 |
|
|
rtx compare_result;
|
5005 |
|
|
rtx bcomp, loc_ref;
|
5006 |
|
|
|
5007 |
|
|
compare_result = gen_reg_rtx (SImode);
|
5008 |
|
|
|
5009 |
|
|
emit_insn (gen_stack_protect_test_si (compare_result,
|
5010 |
|
|
operands[0],
|
5011 |
|
|
operands[1]));
|
5012 |
|
|
|
5013 |
|
|
bcomp = gen_rtx_NE (SImode, compare_result, const0_rtx);
|
5014 |
|
|
|
5015 |
|
|
loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[2]);
|
5016 |
|
|
|
5017 |
|
|
emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
|
5018 |
|
|
gen_rtx_IF_THEN_ELSE (VOIDmode, bcomp,
|
5019 |
|
|
loc_ref, pc_rtx)));
|
5020 |
|
|
|
5021 |
|
|
DONE;
|
5022 |
|
|
})
|
5023 |
|
|
|
5024 |
|
|
(define_insn "stack_protect_test_si"
|
5025 |
|
|
[(set (match_operand:SI 0 "spu_reg_operand" "=&r")
|
5026 |
|
|
(unspec:SI [(match_operand:SI 1 "memory_operand" "m")
|
5027 |
|
|
(match_operand:SI 2 "memory_operand" "m")]
|
5028 |
|
|
UNSPEC_SP_TEST))
|
5029 |
|
|
(set (match_scratch:SI 3 "=&r") (const_int 0))]
|
5030 |
|
|
""
|
5031 |
|
|
"lq%p1\t%0,%1\;lq%p2\t%3,%2\;ceq\t%0,%0,%3\;xor\t%3,%3,%3"
|
5032 |
|
|
[(set_attr "length" "16")
|
5033 |
|
|
(set_attr "type" "multi1")]
|
5034 |
|
|
)
|
5035 |
|
|
|