OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [config/] [mcore/] [mcore.md] - Blame information for rev 820

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
;;  Machine description the Motorola MCore
2
;;  Copyright (C) 1993, 1999, 2000, 2004, 2005, 2007
3
;;  Free Software Foundation, Inc.
4
;;  Contributed by Motorola.
5
 
6
;; This file is part of GCC.
7
 
8
;; GCC is free software; you can redistribute it and/or modify
9
;; it under the terms of the GNU General Public License as published by
10
;; the Free Software Foundation; either version 3, or (at your option)
11
;; any later version.
12
 
13
;; GCC is distributed in the hope that it will be useful,
14
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
15
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
;; GNU General Public License for more details.
17
 
18
;; You should have received a copy of the GNU General Public License
19
;; along with GCC; see the file COPYING3.  If not see
20
;; .
21
 
22
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
23
 
24
 
25
 
26
;; -------------------------------------------------------------------------
27
;; Attributes
28
;; -------------------------------------------------------------------------
29
 
30
; Target CPU.
31
 
32
(define_attr "type" "brcond,branch,jmp,load,store,move,alu,shift"
33
  (const_string "alu"))
34
 
35
;; If a branch destination is within -2048..2047 bytes away from the
36
;; instruction it can be 2 bytes long.  All other conditional branches
37
;; are 10 bytes long, and all other unconditional branches are 8 bytes.
38
;;
39
;; the assembler handles the long-branch span case for us if we use
40
;; the "jb*" mnemonics for jumps/branches. This pushes the span
41
;; calculations and the literal table placement into the assembler,
42
;; where their interactions can be managed in a single place.
43
 
44
;; All MCORE instructions are two bytes long.
45
 
46
(define_attr "length" "" (const_int 2))
47
 
48
;; Scheduling.  We only model a simple load latency.
49
(define_insn_reservation "any_insn" 1
50
                         (eq_attr "type" "!load")
51
                         "nothing")
52
(define_insn_reservation "memory" 2
53
                         (eq_attr "type" "load")
54
                         "nothing")
55
 
56
(include "predicates.md")
57
 
58
;; -------------------------------------------------------------------------
59
;; Test and bit test
60
;; -------------------------------------------------------------------------
61
 
62
(define_insn ""
63
  [(set (reg:SI 17)
64
        (sign_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
65
                         (const_int 1)
66
                         (match_operand:SI 1 "mcore_literal_K_operand" "K")))]
67
  ""
68
  "btsti        %0,%1"
69
  [(set_attr "type" "shift")])
70
 
71
(define_insn ""
72
  [(set (reg:SI 17)
73
        (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
74
                         (const_int 1)
75
                         (match_operand:SI 1 "mcore_literal_K_operand" "K")))]
76
  ""
77
  "btsti        %0,%1"
78
  [(set_attr "type" "shift")])
79
 
80
;;; This is created by combine.
81
(define_insn ""
82
  [(set (reg:CC 17)
83
        (ne:CC (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
84
                                (const_int 1)
85
                                (match_operand:SI 1 "mcore_literal_K_operand" "K"))
86
               (const_int 0)))]
87
  ""
88
  "btsti        %0,%1"
89
  [(set_attr "type" "shift")])
90
 
91
 
92
;; Created by combine from conditional patterns below (see sextb/btsti rx,31)
93
 
94
(define_insn ""
95
  [(set (reg:CC 17)
96
        (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
97
                            (const_int 7))
98
               (const_int 0)))]
99
  "GET_CODE(operands[0]) == SUBREG &&
100
      GET_MODE(SUBREG_REG(operands[0])) == QImode"
101
  "btsti        %0,7"
102
  [(set_attr "type" "shift")])
103
 
104
(define_insn ""
105
  [(set (reg:CC 17)
106
        (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
107
                            (const_int 15))
108
               (const_int 0)))]
109
  "GET_CODE(operands[0]) == SUBREG &&
110
      GET_MODE(SUBREG_REG(operands[0])) == HImode"
111
  "btsti        %0,15"
112
  [(set_attr "type" "shift")])
113
 
114
(define_split
115
  [(set (pc)
116
        (if_then_else (ne (eq:CC (zero_extract:SI
117
                                  (match_operand:SI 0 "mcore_arith_reg_operand" "")
118
                                  (const_int 1)
119
                                  (match_operand:SI 1 "mcore_literal_K_operand" ""))
120
                                 (const_int 0))
121
                          (const_int 0))
122
                      (label_ref (match_operand 2 "" ""))
123
                      (pc)))]
124
  ""
125
  [(set (reg:CC 17)
126
        (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1)))
127
   (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
128
                           (label_ref (match_dup 2))
129
                           (pc)))]
130
  "")
131
 
132
(define_split
133
  [(set (pc)
134
        (if_then_else (eq (ne:CC (zero_extract:SI
135
                                  (match_operand:SI 0 "mcore_arith_reg_operand" "")
136
                                  (const_int 1)
137
                                  (match_operand:SI 1 "mcore_literal_K_operand" ""))
138
                                 (const_int 0))
139
                          (const_int 0))
140
                      (label_ref (match_operand 2 "" ""))
141
                      (pc)))]
142
  ""
143
  [(set (reg:CC 17)
144
        (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1)))
145
   (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
146
                           (label_ref (match_dup 2))
147
                           (pc)))]
148
  "")
149
 
150
;; XXX - disabled by nickc because it fails on libiberty/fnmatch.c
151
;;
152
;; ; Experimental - relax immediates for and, andn, or, and tst to allow
153
;; ;    any immediate value (or an immediate at all -- or, andn, & tst).
154
;; ;    This is done to allow bit field masks to fold together in combine.
155
;; ;    The reload phase will force the immediate into a register at the
156
;; ;    very end.  This helps in some cases, but hurts in others: we'd
157
;; ;    really like to cse these immediates.  However, there is a phase
158
;; ;    ordering problem here.  cse picks up individual masks and cse's
159
;; ;    those, but not folded masks (cse happens before combine).  It's
160
;; ;    not clear what the best solution is because we really want cse
161
;; ;    before combine (leaving the bit field masks alone).   To pick up
162
;; ;    relaxed immediates use -mrelax-immediates.  It might take some
163
;; ;    experimenting to see which does better (i.e. regular imms vs.
164
;; ;    arbitrary imms) for a particular code.   BRC
165
;;
166
;; (define_insn ""
167
;;   [(set (reg:CC 17)
168
;;      (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
169
;;                     (match_operand:SI 1 "mcore_arith_any_imm_operand" "rI"))
170
;;             (const_int 0)))]
171
;;   "TARGET_RELAX_IMM"
172
;;   "tst       %0,%1")
173
;;
174
;; (define_insn ""
175
;;   [(set (reg:CC 17)
176
;;      (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
177
;;                     (match_operand:SI 1 "mcore_arith_M_operand" "r"))
178
;;             (const_int 0)))]
179
;;   "!TARGET_RELAX_IMM"
180
;;   "tst       %0,%1")
181
 
182
(define_insn ""
183
  [(set (reg:CC 17)
184
        (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
185
                       (match_operand:SI 1 "mcore_arith_M_operand" "r"))
186
               (const_int 0)))]
187
  ""
188
  "tst  %0,%1")
189
 
190
 
191
(define_split
192
  [(parallel[
193
      (set (reg:CC 17)
194
           (ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
195
                                 (match_operand:SI 1 "mcore_arith_reg_operand" "r"))
196
                         (const_int 0))
197
                  (const_int 0)))
198
      (clobber (match_operand:CC 2 "mcore_arith_reg_operand" "=r"))])]
199
  ""
200
  [(set (reg:CC 17) (ne:SI (match_dup 0) (const_int 0)))
201
   (set (reg:CC 17) (leu:CC (match_dup 0) (match_dup 1)))])
202
 
203
;; -------------------------------------------------------------------------
204
;; SImode signed integer comparisons
205
;; -------------------------------------------------------------------------
206
 
207
(define_insn "decne_t"
208
  [(set (reg:CC 17) (ne:CC (plus:SI (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
209
                                    (const_int -1))
210
                           (const_int 0)))
211
   (set (match_dup 0)
212
        (plus:SI (match_dup 0)
213
                 (const_int -1)))]
214
  ""
215
  "decne        %0")
216
 
217
;; The combiner seems to prefer the following to the former.
218
;;
219
(define_insn ""
220
  [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
221
                           (const_int 1)))
222
   (set (match_dup 0)
223
        (plus:SI (match_dup 0)
224
                 (const_int -1)))]
225
  ""
226
  "decne        %0")
227
 
228
(define_insn "cmpnesi_t"
229
  [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
230
                           (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
231
  ""
232
  "cmpne        %0,%1")
233
 
234
(define_insn "cmpneisi_t"
235
  [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
236
                           (match_operand:SI 1 "mcore_arith_K_operand" "K")))]
237
  ""
238
  "cmpnei       %0,%1")
239
 
240
(define_insn "cmpgtsi_t"
241
  [(set (reg:CC 17) (gt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
242
                           (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
243
  ""
244
  "cmplt        %1,%0")
245
 
246
(define_insn ""
247
  [(set (reg:CC 17) (gt:CC (plus:SI
248
                            (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
249
                            (const_int -1))
250
                           (const_int 0)))
251
   (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))]
252
  ""
253
  "decgt        %0")
254
 
255
(define_insn "cmpltsi_t"
256
  [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
257
                           (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
258
  ""
259
  "cmplt        %0,%1")
260
 
261
; cmplti is 1-32
262
(define_insn "cmpltisi_t"
263
  [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
264
                           (match_operand:SI 1 "mcore_arith_J_operand" "J")))]
265
  ""
266
  "cmplti       %0,%1")
267
 
268
; covers cmplti x,0
269
(define_insn ""
270
  [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
271
                         (const_int 0)))]
272
  ""
273
  "btsti        %0,31")
274
 
275
(define_insn ""
276
  [(set (reg:CC 17) (lt:CC (plus:SI
277
                            (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
278
                            (const_int -1))
279
                           (const_int 0)))
280
   (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))]
281
  ""
282
  "declt        %0")
283
 
284
;; -------------------------------------------------------------------------
285
;; SImode unsigned integer comparisons
286
;; -------------------------------------------------------------------------
287
 
288
(define_insn "cmpgeusi_t"
289
  [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
290
                            (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
291
  ""
292
  "cmphs        %0,%1")
293
 
294
(define_insn "cmpgeusi_0"
295
  [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
296
                            (const_int 0)))]
297
  ""
298
  "cmpnei       %0, 0")
299
 
300
(define_insn "cmpleusi_t"
301
  [(set (reg:CC 17) (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
302
                            (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
303
  ""
304
  "cmphs        %1,%0")
305
 
306
;; We save the compare operands in the cmpxx patterns and use them when
307
;; we generate the branch.
308
 
309
;; We accept constants here, in case we can modify them to ones which
310
;; are more efficient to load.  E.g. change 'x <= 62' to 'x < 63'.
311
 
312
(define_expand "cmpsi"
313
  [(set (reg:CC 17) (compare:CC (match_operand:SI 0 "mcore_compare_operand" "")
314
                                (match_operand:SI 1 "nonmemory_operand" "")))]
315
  ""
316
  "
317
{ arch_compare_op0 = operands[0];
318
  arch_compare_op1 = operands[1];
319
  DONE;
320
}")
321
 
322
;; -------------------------------------------------------------------------
323
;; Logical operations
324
;; -------------------------------------------------------------------------
325
 
326
;; Logical AND clearing a single bit.  andsi3 knows that we have this
327
;; pattern and allows the constant literal pass through.
328
;;
329
 
330
;; RBE 2/97: don't need this pattern any longer...
331
;; RBE: I don't think we need both "S" and exact_log2() clauses.
332
;;(define_insn ""
333
;;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
334
;;      (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
335
;;              (match_operand:SI 2 "const_int_operand" "S")))]
336
;;  "mcore_arith_S_operand (operands[2])"
337
;;  "bclri      %0,%Q2")
338
;;
339
 
340
(define_insn "andnsi3"
341
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
342
        (and:SI (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))
343
                (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
344
  ""
345
  "andn %0,%1")
346
 
347
(define_expand "andsi3"
348
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
349
        (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
350
                (match_operand:SI 2 "nonmemory_operand" "")))]
351
  ""
352
  "
353
{
354
  if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0
355
      && ! mcore_arith_S_operand (operands[2]))
356
    {
357
      int not_value = ~ INTVAL (operands[2]);
358
      if (   CONST_OK_FOR_I (not_value)
359
          || CONST_OK_FOR_M (not_value)
360
          || CONST_OK_FOR_N (not_value))
361
        {
362
          operands[2] = copy_to_mode_reg (SImode, GEN_INT (not_value));
363
          emit_insn (gen_andnsi3 (operands[0], operands[2], operands[1]));
364
          DONE;
365
        }
366
    }
367
 
368
  if (! mcore_arith_K_S_operand (operands[2], SImode))
369
    operands[2] = copy_to_mode_reg (SImode, operands[2]);
370
}")
371
 
372
(define_insn ""
373
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
374
        (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0")
375
                (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,K,0,S")))]
376
  "TARGET_RELAX_IMM"
377
  "*
378
{
379
   switch (which_alternative)
380
     {
381
     case 0: return \"and       %0,%2\";
382
     case 1: return \"andi      %0,%2\";
383
     case 2: return \"and       %0,%1\";
384
     /* case -1: return \"bclri %0,%Q2\";        will not happen */
385
     case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
386
     default: gcc_unreachable ();
387
     }
388
}")
389
 
390
;; This was the old "S" which was "!(2^n)" */
391
;; case -1: return \"bclri      %0,%Q2\";        will not happen */
392
 
393
(define_insn ""
394
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
395
        (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0")
396
                (match_operand:SI 2 "mcore_arith_K_S_operand" "r,K,0,S")))]
397
  "!TARGET_RELAX_IMM"
398
  "*
399
{
400
   switch (which_alternative)
401
     {
402
     case 0: return \"and       %0,%2\";
403
     case 1: return \"andi      %0,%2\";
404
     case 2: return \"and       %0,%1\";
405
     case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
406
     default: gcc_unreachable ();
407
     }
408
}")
409
 
410
;(define_insn "iorsi3"
411
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
412
;       (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
413
;               (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
414
;  ""
415
;  "or  %0,%2")
416
 
417
; need an expand to resolve ambiguity betw. the two iors below.
418
(define_expand "iorsi3"
419
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
420
        (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
421
                (match_operand:SI 2 "nonmemory_operand" "")))]
422
  ""
423
  "
424
{
425
   if (! mcore_arith_M_operand (operands[2], SImode))
426
      operands[2] = copy_to_mode_reg (SImode, operands[2]);
427
}")
428
 
429
(define_insn ""
430
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
431
        (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
432
                (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,M,T")))]
433
  "TARGET_RELAX_IMM"
434
  "*
435
{
436
   switch (which_alternative)
437
     {
438
     case 0: return \"or        %0,%2\";
439
     case 1: return \"bseti     %0,%P2\";
440
     case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
441
     default: gcc_unreachable ();
442
     }
443
}")
444
 
445
(define_insn ""
446
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
447
        (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
448
                (match_operand:SI 2 "mcore_arith_M_operand" "r,M,T")))]
449
  "!TARGET_RELAX_IMM"
450
  "*
451
{
452
   switch (which_alternative)
453
     {
454
     case 0: return \"or        %0,%2\";
455
     case 1: return \"bseti     %0,%P2\";
456
     case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
457
     default: gcc_unreachable ();
458
     }
459
}")
460
 
461
;(define_insn ""
462
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
463
;       (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
464
;               (match_operand:SI 2 "const_int_operand" "M")))]
465
;  "exact_log2 (INTVAL (operands[2])) >= 0"
466
;  "bseti       %0,%P2")
467
 
468
;(define_insn ""
469
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
470
;       (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
471
;               (match_operand:SI 2 "const_int_operand" "i")))]
472
;  "mcore_num_ones (INTVAL (operands[2])) < 3"
473
;  "* return mcore_output_bseti (operands[0], INTVAL (operands[2]));")
474
 
475
(define_insn "xorsi3"
476
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
477
        (xor:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
478
                (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
479
  ""
480
  "xor  %0,%2")
481
 
482
; these patterns give better code then gcc invents if
483
; left to its own devices
484
 
485
(define_insn "anddi3"
486
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
487
        (and:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
488
                (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
489
  ""
490
  "and  %0,%2\;and      %R0,%R2"
491
  [(set_attr "length" "4")])
492
 
493
(define_insn "iordi3"
494
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
495
        (ior:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
496
                (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
497
  ""
498
  "or   %0,%2\;or       %R0,%R2"
499
  [(set_attr "length" "4")])
500
 
501
(define_insn "xordi3"
502
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
503
        (xor:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
504
                (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
505
  ""
506
  "xor  %0,%2\;xor      %R0,%R2"
507
  [(set_attr "length" "4")])
508
 
509
;; -------------------------------------------------------------------------
510
;; Shifts and rotates
511
;; -------------------------------------------------------------------------
512
 
513
;; Only allow these if the shift count is a convenient constant.
514
(define_expand "rotlsi3"
515
  [(set (match_operand:SI            0 "mcore_arith_reg_operand" "")
516
        (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
517
                   (match_operand:SI 2 "nonmemory_operand" "")))]
518
  ""
519
  "if (! mcore_literal_K_operand (operands[2], SImode))
520
         FAIL;
521
  ")
522
 
523
;; We can only do constant rotates, which is what this pattern provides.
524
;; The combiner will put it together for us when we do:
525
;;      (x << N) | (x >> (32 - N))
526
(define_insn ""
527
  [(set (match_operand:SI              0 "mcore_arith_reg_operand" "=r")
528
        (rotate:SI (match_operand:SI   1 "mcore_arith_reg_operand"  "0")
529
                     (match_operand:SI 2 "mcore_literal_K_operand"  "K")))]
530
  ""
531
  "rotli        %0,%2"
532
  [(set_attr "type" "shift")])
533
 
534
(define_insn "ashlsi3"
535
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
536
        (ashift:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
537
                   (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
538
  ""
539
  "@
540
        lsl     %0,%2
541
        lsli    %0,%2"
542
  [(set_attr "type" "shift")])
543
 
544
(define_insn ""
545
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
546
        (ashift:SI (const_int 1)
547
                   (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
548
  ""
549
  "bgenr        %0,%1"
550
  [(set_attr "type" "shift")])
551
 
552
(define_insn "ashrsi3"
553
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
554
        (ashiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
555
                     (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
556
  ""
557
  "@
558
        asr     %0,%2
559
        asri    %0,%2"
560
  [(set_attr "type" "shift")])
561
 
562
(define_insn "lshrsi3"
563
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
564
        (lshiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
565
                     (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
566
  ""
567
  "@
568
        lsr     %0,%2
569
        lsri    %0,%2"
570
  [(set_attr "type" "shift")])
571
 
572
;(define_expand "ashldi3"
573
;  [(parallel[(set (match_operand:DI 0 "mcore_arith_reg_operand" "")
574
;                 (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "")
575
;                            (match_operand:DI 2 "immediate_operand" "")))
576
;
577
;            (clobber (reg:CC 17))])]
578
;
579
;  ""
580
;  "
581
;{
582
;  if (GET_CODE (operands[2]) != CONST_INT
583
;      || INTVAL (operands[2]) != 1)
584
;    FAIL;
585
;}")
586
;
587
;(define_insn ""
588
;  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
589
;       (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
590
;                    (const_int 1)))
591
;   (clobber (reg:CC 17))]
592
;  ""
593
;  "lsli        %R0,0\;rotli    %0,0"
594
;  [(set_attr "length" "4") (set_attr "type" "shift")])
595
 
596
;; -------------------------------------------------------------------------
597
;; Index instructions
598
;; -------------------------------------------------------------------------
599
;; The second of each set of patterns is borrowed from the alpha.md file.
600
;; These variants of the above insns can occur if the second operand
601
;; is the frame pointer.  This is a kludge, but there doesn't
602
;; seem to be a way around it.  Only recognize them while reloading.
603
 
604
;; We must use reload_operand for some operands in case frame pointer
605
;; elimination put a MEM with invalid address there.  Otherwise,
606
;; the result of the substitution will not match this pattern, and reload
607
;; will not be able to correctly fix the result.
608
 
609
;; indexing longlongs or doubles (8 bytes)
610
 
611
(define_insn "indexdi_t"
612
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
613
        (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
614
                          (const_int 8))
615
                 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
616
  ""
617
  "*
618
    if (! mcore_is_same_reg (operands[1], operands[2]))
619
      {
620
        output_asm_insn (\"ixw\\t%0,%1\", operands);
621
        output_asm_insn (\"ixw\\t%0,%1\", operands);
622
      }
623
    else
624
      {
625
        output_asm_insn (\"ixh\\t%0,%1\", operands);
626
        output_asm_insn (\"ixh\\t%0,%1\", operands);
627
      }
628
    return \"\";
629
  "
630
;; if operands[1] == operands[2], the first option above is wrong! -- dac
631
;; was this... -- dac
632
;; ixw  %0,%1\;ixw      %0,%1"
633
 
634
  [(set_attr "length" "4")])
635
 
636
(define_insn ""
637
  [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
638
        (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
639
                                   (const_int 8))
640
                          (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
641
                 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
642
  "reload_in_progress"
643
  "@
644
        ixw     %0,%1\;ixw      %0,%1\;addu     %0,%3
645
        ixw     %0,%1\;ixw      %0,%1\;addi     %0,%3
646
        ixw     %0,%1\;ixw      %0,%1\;subi     %0,%M3"
647
  [(set_attr "length" "6")])
648
 
649
;; indexing longs (4 bytes)
650
 
651
(define_insn "indexsi_t"
652
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
653
        (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
654
                          (const_int 4))
655
                 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
656
  ""
657
  "ixw  %0,%1")
658
 
659
(define_insn ""
660
  [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
661
        (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
662
                                   (const_int 4))
663
                          (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
664
                 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
665
  "reload_in_progress"
666
  "@
667
        ixw     %0,%1\;addu     %0,%3
668
        ixw     %0,%1\;addi     %0,%3
669
        ixw     %0,%1\;subi     %0,%M3"
670
  [(set_attr "length" "4")])
671
 
672
;; indexing shorts (2 bytes)
673
 
674
(define_insn "indexhi_t"
675
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
676
        (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
677
                          (const_int 2))
678
                 (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
679
  ""
680
  "ixh  %0,%1")
681
 
682
(define_insn ""
683
  [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
684
        (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
685
                                   (const_int 2))
686
                          (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
687
                 (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
688
  "reload_in_progress"
689
  "@
690
        ixh     %0,%1\;addu     %0,%3
691
        ixh     %0,%1\;addi     %0,%3
692
        ixh     %0,%1\;subi     %0,%M3"
693
  [(set_attr "length" "4")])
694
 
695
;;
696
;; Other sizes may be handy for indexing.
697
;; the tradeoffs to consider when adding these are
698
;;      code size, execution time [vs. mul it is easy to win],
699
;;      and register pressure -- these patterns don't use an extra
700
;;      register to build the offset from the base
701
;;      and whether the compiler will not come up with some other idiom.
702
;;
703
 
704
;; -------------------------------------------------------------------------
705
;; Addition, Subtraction instructions
706
;; -------------------------------------------------------------------------
707
 
708
(define_expand "addsi3"
709
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
710
        (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
711
                 (match_operand:SI 2 "nonmemory_operand" "")))]
712
  ""
713
  "
714
{
715
  extern int flag_omit_frame_pointer;
716
 
717
  /* If this is an add to the frame pointer, then accept it as is so
718
     that we can later fold in the fp/sp offset from frame pointer
719
     elimination.  */
720
  if (flag_omit_frame_pointer
721
      && GET_CODE (operands[1]) == REG
722
      && (REGNO (operands[1]) == VIRTUAL_STACK_VARS_REGNUM
723
          || REGNO (operands[1]) == FRAME_POINTER_REGNUM))
724
    {
725
      emit_insn (gen_addsi3_fp (operands[0], operands[1], operands[2]));
726
      DONE;
727
    }
728
 
729
  /* Convert adds to subtracts if this makes loading the constant cheaper.
730
     But only if we are allowed to generate new pseudos.  */
731
  if (! (reload_in_progress || reload_completed)
732
      && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < -32)
733
    {
734
      int neg_value = - INTVAL (operands[2]);
735
      if (   CONST_OK_FOR_I (neg_value)
736
          || CONST_OK_FOR_M (neg_value)
737
          || CONST_OK_FOR_N (neg_value))
738
        {
739
          operands[2] = copy_to_mode_reg (SImode, GEN_INT (neg_value));
740
          emit_insn (gen_subsi3 (operands[0], operands[1], operands[2]));
741
          DONE;
742
        }
743
    }
744
 
745
  if (! mcore_addsub_operand (operands[2], SImode))
746
    operands[2] = copy_to_mode_reg (SImode, operands[2]);
747
}")
748
 
749
;; RBE: for some constants which are not in the range which allows
750
;; us to do a single operation, we will try a paired addi/addi instead
751
;; of a movi/addi. This relieves some register pressure at the expense
752
;; of giving away some potential constant reuse.
753
;;
754
;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern
755
;; for later reference
756
;;
757
;; (define_insn "addsi3_i2"
758
;;   [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
759
;;      (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
760
;;               (match_operand:SI 2 "const_int_operand" "g")))]
761
;;   "GET_CODE(operands[2]) == CONST_INT
762
;;    && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64)
763
;;        || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
764
;;   "*
765
;; {
766
;;    int n = INTVAL(operands[2]);
767
;;    if (n > 0)
768
;;      {
769
;;        operands[2] = GEN_INT(n - 32);
770
;;        return \"addi\\t%0,32\;addi\\t%0,%2\";
771
;;      }
772
;;    else
773
;;      {
774
;;        n = (-n);
775
;;        operands[2] = GEN_INT(n - 32);
776
;;        return \"subi\\t%0,32\;subi\\t%0,%2\";
777
;;      }
778
;; }"
779
;;  [(set_attr "length" "4")])
780
 
781
(define_insn "addsi3_i"
782
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
783
        (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
784
                 (match_operand:SI 2 "mcore_addsub_operand" "r,J,L")))]
785
  ""
786
  "@
787
        addu    %0,%2
788
        addi    %0,%2
789
        subi    %0,%M2")
790
 
791
;; This exists so that address computations based on the frame pointer
792
;; can be folded in when frame pointer elimination occurs.  Ordinarily
793
;; this would be bad because it allows insns which would require reloading,
794
;; but without it, we get multiple adds where one would do.
795
 
796
(define_insn "addsi3_fp"
797
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
798
        (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
799
                 (match_operand:SI 2 "immediate_operand" "r,J,L")))]
800
  "flag_omit_frame_pointer
801
   && (reload_in_progress || reload_completed || REGNO (operands[1]) == FRAME_POINTER_REGNUM)"
802
  "@
803
        addu    %0,%2
804
        addi    %0,%2
805
        subi    %0,%M2")
806
 
807
;; RBE: for some constants which are not in the range which allows
808
;; us to do a single operation, we will try a paired addi/addi instead
809
;; of a movi/addi. This relieves some register pressure at the expense
810
;; of giving away some potential constant reuse.
811
;;
812
;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern
813
;; for later reference
814
;;
815
;; (define_insn "subsi3_i2"
816
;;   [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
817
;;      (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
818
;;               (match_operand:SI 2 "const_int_operand" "g")))]
819
;;   "TARGET_RBETEST && GET_CODE(operands[2]) == CONST_INT
820
;;    && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64)
821
;;        || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
822
;;   "*
823
;; {
824
;;    int n = INTVAL(operands[2]);
825
;;    if ( n > 0)
826
;;      {
827
;;        operands[2] = GEN_INT( n - 32);
828
;;        return \"subi\\t%0,32\;subi\\t%0,%2\";
829
;;      }
830
;;    else
831
;;      {
832
;;        n = (-n);
833
;;        operands[2] = GEN_INT(n - 32);
834
;;        return \"addi\\t%0,32\;addi\\t%0,%2\";
835
;;      }
836
;; }"
837
;;   [(set_attr "length" "4")])
838
 
839
;(define_insn "subsi3"
840
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
841
;       (minus:SI (match_operand:SI 1 "mcore_arith_K_operand" "0,0,r,K")
842
;                 (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0,0")))]
843
;  ""
844
;  "@
845
;       sub     %0,%2
846
;       subi    %0,%2
847
;       rsub    %0,%1
848
;       rsubi   %0,%1")
849
 
850
(define_insn "subsi3"
851
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
852
        (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r")
853
                  (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0")))]
854
  ""
855
  "@
856
        subu    %0,%2
857
        subi    %0,%2
858
        rsub    %0,%1")
859
 
860
(define_insn ""
861
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
862
        (minus:SI (match_operand:SI 1 "mcore_literal_K_operand" "K")
863
                  (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
864
  ""
865
  "rsubi        %0,%1")
866
 
867
(define_insn "adddi3"
868
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
869
        (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
870
                 (match_operand:DI 2 "mcore_arith_reg_operand" "r")))
871
   (clobber (reg:CC 17))]
872
  ""
873
  "*
874
  {
875
    if (TARGET_LITTLE_END)
876
      return \"cmplt    %0,%0\;addc     %0,%2\;addc     %R0,%R2\";
877
    return \"cmplt      %R0,%R0\;addc   %R0,%R2\;addc   %0,%2\";
878
  }"
879
  [(set_attr "length" "6")])
880
 
881
;; special case for "longlong += 1"
882
(define_insn ""
883
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
884
        (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
885
                 (const_int 1)))
886
   (clobber (reg:CC 17))]
887
  ""
888
  "*
889
  {
890
   if (TARGET_LITTLE_END)
891
      return \"addi     %0,1\;cmpnei %0,0\;incf %R0\";
892
    return \"addi       %R0,1\;cmpnei %R0,0\;incf       %0\";
893
  }"
894
  [(set_attr "length" "6")])
895
 
896
;; special case for "longlong -= 1"
897
(define_insn ""
898
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
899
        (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
900
                 (const_int -1)))
901
   (clobber (reg:CC 17))]
902
  ""
903
  "*
904
  {
905
    if (TARGET_LITTLE_END)
906
       return \"cmpnei %0,0\;decf       %R0\;subi       %0,1\";
907
    return \"cmpnei %R0,0\;decf %0\;subi        %R0,1\";
908
  }"
909
  [(set_attr "length" "6")])
910
 
911
;; special case for "longlong += const_int"
912
;; we have to use a register for the const_int because we don't
913
;; have an unsigned compare immediate... only +/- 1 get to
914
;; play the no-extra register game because they compare with 0.
915
;; This winds up working out for any literal that is synthesized
916
;; with a single instruction. The more complicated ones look
917
;; like the get broken into subreg's to get initialized too soon
918
;; for us to catch here. -- RBE 4/25/96
919
;; only allow for-sure positive values.
920
 
921
(define_insn ""
922
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
923
        (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
924
                 (match_operand:SI 2 "const_int_operand" "r")))
925
   (clobber (reg:CC 17))]
926
  "GET_CODE (operands[2]) == CONST_INT
927
   && INTVAL (operands[2]) > 0 && ! (INTVAL (operands[2]) & 0x80000000)"
928
  "*
929
{
930
  gcc_assert (GET_MODE (operands[2]) == SImode);
931
  if (TARGET_LITTLE_END)
932
    return \"addu       %0,%2\;cmphs    %0,%2\;incf     %R0\";
933
  return \"addu %R0,%2\;cmphs   %R0,%2\;incf    %0\";
934
}"
935
  [(set_attr "length" "6")])
936
 
937
;; optimize "long long" + "unsigned long"
938
;; won't trigger because of how the extension is expanded upstream.
939
;; (define_insn ""
940
;;   [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
941
;;      (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
942
;;               (zero_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r"))))
943
;;    (clobber (reg:CC 17))]
944
;;   "0"
945
;;   "cmplt     %R0,%R0\;addc   %R0,%2\;inct    %0"
946
;;   [(set_attr "length" "6")])
947
 
948
;; optimize "long long" + "signed long"
949
;; won't trigger because of how the extension is expanded upstream.
950
;; (define_insn ""
951
;;   [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
952
;;      (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
953
;;               (sign_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r"))))
954
;;    (clobber (reg:CC 17))]
955
;;   "0"
956
;;   "cmplt     %R0,%R0\;addc   %R0,%2\;inct    %0\;btsti       %2,31\;dect     %0"
957
;;   [(set_attr "length" "6")])
958
 
959
(define_insn "subdi3"
960
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
961
        (minus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
962
                  (match_operand:DI 2 "mcore_arith_reg_operand" "r")))
963
   (clobber (reg:CC 17))]
964
  ""
965
  "*
966
  {
967
    if (TARGET_LITTLE_END)
968
      return \"cmphs    %0,%0\;subc     %0,%2\;subc     %R0,%R2\";
969
    return \"cmphs      %R0,%R0\;subc   %R0,%R2\;subc   %0,%2\";
970
  }"
971
  [(set_attr "length" "6")])
972
 
973
;; -------------------------------------------------------------------------
974
;; Multiplication instructions
975
;; -------------------------------------------------------------------------
976
 
977
(define_insn "mulsi3"
978
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
979
        (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
980
                 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
981
  ""
982
  "mult %0,%2")
983
 
984
;;
985
;; 32/32 signed division -- added to the MCORE instruction set spring 1997
986
;;
987
;; Different constraints based on the architecture revision...
988
;;
989
(define_expand "divsi3"
990
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
991
        (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
992
                (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
993
  "TARGET_DIV"
994
  "")
995
 
996
;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97)
997
;;
998
(define_insn ""
999
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1000
        (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1001
                (match_operand:SI 2 "mcore_arith_reg_operand" "b")))]
1002
  "TARGET_DIV"
1003
  "divs %0,%2")
1004
 
1005
;;
1006
;; 32/32 signed division -- added to the MCORE instruction set spring 1997
1007
;;
1008
;; Different constraints based on the architecture revision...
1009
;;
1010
(define_expand "udivsi3"
1011
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1012
        (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
1013
                 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
1014
  "TARGET_DIV"
1015
  "")
1016
 
1017
;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97)
1018
(define_insn ""
1019
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1020
        (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1021
                 (match_operand:SI 2 "mcore_arith_reg_operand" "b")))]
1022
  "TARGET_DIV"
1023
  "divu %0,%2")
1024
 
1025
;; -------------------------------------------------------------------------
1026
;; Unary arithmetic
1027
;; -------------------------------------------------------------------------
1028
 
1029
(define_insn "negsi2"
1030
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1031
        (neg:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1032
  ""
1033
  "*
1034
{
1035
   return \"rsubi       %0,0\";
1036
}")
1037
 
1038
 
1039
(define_insn "abssi2"
1040
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1041
        (abs:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1042
  ""
1043
  "abs  %0")
1044
 
1045
(define_insn "negdi2"
1046
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
1047
        (neg:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")))
1048
   (clobber (reg:CC 17))]
1049
  ""
1050
  "*
1051
{
1052
   if (TARGET_LITTLE_END)
1053
     return \"cmpnei    %0,0\\n\\trsubi %0,0\\n\\tnot   %R0\\n\\tincf   %R0\";
1054
   return \"cmpnei      %R0,0\\n\\trsubi        %R0,0\\n\\tnot  %0\\n\\tincf    %0\";
1055
}"
1056
  [(set_attr "length" "8")])
1057
 
1058
(define_insn "one_cmplsi2"
1059
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1060
        (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1061
  ""
1062
  "not  %0")
1063
 
1064
;; -------------------------------------------------------------------------
1065
;; Zero extension instructions
1066
;; -------------------------------------------------------------------------
1067
 
1068
(define_expand "zero_extendhisi2"
1069
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1070
        (zero_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "")))]
1071
  ""
1072
  "")
1073
 
1074
(define_insn ""
1075
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
1076
        (zero_extend:SI (match_operand:HI 1 "general_operand" "0,m")))]
1077
  ""
1078
  "@
1079
        zexth   %0
1080
        ld.h    %0,%1"
1081
  [(set_attr "type" "shift,load")])
1082
 
1083
;; ldh gives us a free zero-extension. The combiner picks up on this.
1084
(define_insn ""
1085
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1086
        (zero_extend:SI (mem:HI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
1087
  ""
1088
  "ld.h %0,(%1)"
1089
  [(set_attr "type" "load")])
1090
 
1091
(define_insn ""
1092
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1093
        (zero_extend:SI (mem:HI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
1094
                                         (match_operand:SI 2 "const_int_operand" "")))))]
1095
  "(INTVAL (operands[2]) >= 0) &&
1096
   (INTVAL (operands[2]) < 32) &&
1097
   ((INTVAL (operands[2])&1) == 0)"
1098
  "ld.h %0,(%1,%2)"
1099
  [(set_attr "type" "load")])
1100
 
1101
(define_expand "zero_extendqisi2"
1102
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1103
        (zero_extend:SI (match_operand:QI 1 "general_operand" "")))]
1104
  ""
1105
  "")
1106
 
1107
;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register.
1108
(define_insn ""
1109
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b,r")
1110
        (zero_extend:SI (match_operand:QI 1 "general_operand" "0,r,m")))]
1111
  ""
1112
  "@
1113
        zextb   %0
1114
        xtrb3   %0,%1
1115
        ld.b    %0,%1"
1116
  [(set_attr "type" "shift,shift,load")])
1117
 
1118
;; ldb gives us a free zero-extension. The combiner picks up on this.
1119
(define_insn ""
1120
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1121
        (zero_extend:SI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
1122
  ""
1123
  "ld.b %0,(%1)"
1124
  [(set_attr "type" "load")])
1125
 
1126
(define_insn ""
1127
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1128
        (zero_extend:SI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
1129
                                         (match_operand:SI 2 "const_int_operand" "")))))]
1130
  "(INTVAL (operands[2]) >= 0) &&
1131
   (INTVAL (operands[2]) < 16)"
1132
  "ld.b %0,(%1,%2)"
1133
  [(set_attr "type" "load")])
1134
 
1135
(define_expand "zero_extendqihi2"
1136
  [(set (match_operand:HI 0 "mcore_arith_reg_operand" "")
1137
        (zero_extend:HI (match_operand:QI 1 "general_operand" "")))]
1138
  ""
1139
  "")
1140
 
1141
;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register.
1142
(define_insn ""
1143
  [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r,b,r")
1144
        (zero_extend:HI (match_operand:QI 1 "general_operand" "0,r,m")))]
1145
  ""
1146
  "@
1147
        zextb   %0
1148
        xtrb3   %0,%1
1149
        ld.b    %0,%1"
1150
  [(set_attr "type" "shift,shift,load")])
1151
 
1152
;; ldb gives us a free zero-extension. The combiner picks up on this.
1153
;; this doesn't catch references that are into a structure.
1154
;; note that normally the compiler uses the above insn, unless it turns
1155
;; out that we're dealing with a volatile...
1156
(define_insn ""
1157
  [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
1158
        (zero_extend:HI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
1159
  ""
1160
  "ld.b %0,(%1)"
1161
  [(set_attr "type" "load")])
1162
 
1163
(define_insn ""
1164
  [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
1165
        (zero_extend:HI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
1166
                                         (match_operand:SI 2 "const_int_operand" "")))))]
1167
  "(INTVAL (operands[2]) >= 0) &&
1168
   (INTVAL (operands[2]) < 16)"
1169
  "ld.b %0,(%1,%2)"
1170
  [(set_attr "type" "load")])
1171
 
1172
 
1173
;; -------------------------------------------------------------------------
1174
;; Sign extension instructions
1175
;; -------------------------------------------------------------------------
1176
 
1177
(define_expand "extendsidi2"
1178
  [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
1179
        (match_operand:SI 1 "mcore_arith_reg_operand" "r"))]
1180
  ""
1181
  "
1182
  {
1183
    int low, high;
1184
 
1185
    if (TARGET_LITTLE_END)
1186
      low = 0, high = 4;
1187
    else
1188
      low = 4, high = 0;
1189
 
1190
    emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], low),
1191
              operands[1]));
1192
    emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], high),
1193
              gen_rtx_ASHIFTRT (SImode,
1194
                               gen_rtx_SUBREG (SImode, operands[0], low),
1195
                               GEN_INT (31))));
1196
    DONE;
1197
  }"
1198
)
1199
 
1200
(define_insn "extendhisi2"
1201
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1202
        (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))]
1203
  ""
1204
  "sexth        %0")
1205
 
1206
(define_insn "extendqisi2"
1207
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1208
        (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))]
1209
  ""
1210
  "sextb        %0")
1211
 
1212
(define_insn "extendqihi2"
1213
  [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
1214
        (sign_extend:HI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))]
1215
  ""
1216
  "sextb        %0")
1217
 
1218
;; -------------------------------------------------------------------------
1219
;; Move instructions
1220
;; -------------------------------------------------------------------------
1221
 
1222
;; SImode
1223
 
1224
(define_expand "movsi"
1225
  [(set (match_operand:SI 0 "general_operand" "")
1226
        (match_operand:SI 1 "general_operand" ""))]
1227
  ""
1228
  "
1229
{
1230
  if (GET_CODE (operands[0]) == MEM)
1231
    operands[1] = force_reg (SImode, operands[1]);
1232
}")
1233
 
1234
(define_insn ""
1235
  [(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,a,r,a,r,m")
1236
        (match_operand:SI 1 "mcore_general_movsrc_operand"  "r,P,i,c,R,m,r"))]
1237
  "(register_operand (operands[0], SImode)
1238
    || register_operand (operands[1], SImode))"
1239
  "* return mcore_output_move (insn, operands, SImode);"
1240
  [(set_attr "type" "move,move,move,move,load,load,store")])
1241
 
1242
;;
1243
;; HImode
1244
;;
1245
 
1246
(define_expand "movhi"
1247
  [(set (match_operand:HI 0 "general_operand" "")
1248
        (match_operand:HI 1 "general_operand"  ""))]
1249
  ""
1250
  "
1251
{
1252
  if (GET_CODE (operands[0]) == MEM)
1253
    operands[1] = force_reg (HImode, operands[1]);
1254
  else if (CONSTANT_P (operands[1])
1255
           && (GET_CODE (operands[1]) != CONST_INT
1256
               || (! CONST_OK_FOR_I (INTVAL (operands[1]))
1257
                   && ! CONST_OK_FOR_M (INTVAL (operands[1]))
1258
                   && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
1259
           && ! reload_completed && ! reload_in_progress)
1260
    {
1261
      rtx reg = gen_reg_rtx (SImode);
1262
      emit_insn (gen_movsi (reg, operands[1]));
1263
      operands[1] = gen_lowpart (HImode, reg);
1264
    }
1265
}")
1266
 
1267
(define_insn ""
1268
  [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m")
1269
        (match_operand:HI 1 "mcore_general_movsrc_operand"  "r,P,i,c,m,r"))]
1270
  "(register_operand (operands[0], HImode)
1271
    || register_operand (operands[1], HImode))"
1272
  "* return mcore_output_move (insn, operands, HImode);"
1273
  [(set_attr "type" "move,move,move,move,load,store")])
1274
 
1275
;;
1276
;; QImode
1277
;;
1278
 
1279
(define_expand "movqi"
1280
  [(set (match_operand:QI 0 "general_operand" "")
1281
        (match_operand:QI 1 "general_operand"  ""))]
1282
  ""
1283
  "
1284
{
1285
  if (GET_CODE (operands[0]) == MEM)
1286
    operands[1] = force_reg (QImode, operands[1]);
1287
  else if (CONSTANT_P (operands[1])
1288
           && (GET_CODE (operands[1]) != CONST_INT
1289
               || (! CONST_OK_FOR_I (INTVAL (operands[1]))
1290
                   && ! CONST_OK_FOR_M (INTVAL (operands[1]))
1291
                   && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
1292
           && ! reload_completed && ! reload_in_progress)
1293
    {
1294
      rtx reg = gen_reg_rtx (SImode);
1295
      emit_insn (gen_movsi (reg, operands[1]));
1296
      operands[1] = gen_lowpart (QImode, reg);
1297
    }
1298
}")
1299
 
1300
(define_insn ""
1301
  [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m")
1302
        (match_operand:QI 1 "mcore_general_movsrc_operand"  "r,P,i,c,m,r"))]
1303
  "(register_operand (operands[0], QImode)
1304
    || register_operand (operands[1], QImode))"
1305
  "* return mcore_output_move (insn, operands, QImode);"
1306
   [(set_attr "type" "move,move,move,move,load,store")])
1307
 
1308
 
1309
;; DImode
1310
 
1311
(define_expand "movdi"
1312
  [(set (match_operand:DI 0 "general_operand" "")
1313
        (match_operand:DI 1 "general_operand" ""))]
1314
  ""
1315
  "
1316
{
1317
  if (GET_CODE (operands[0]) == MEM)
1318
    operands[1] = force_reg (DImode, operands[1]);
1319
  else if (GET_CODE (operands[1]) == CONST_INT
1320
           && ! CONST_OK_FOR_I (INTVAL (operands[1]))
1321
           && ! CONST_OK_FOR_M (INTVAL (operands[1]))
1322
           && ! CONST_OK_FOR_N (INTVAL (operands[1])))
1323
    {
1324
      int i;
1325
      for (i = 0; i < UNITS_PER_WORD * 2; i += UNITS_PER_WORD)
1326
        emit_move_insn (simplify_gen_subreg (SImode, operands[0], DImode, i),
1327
                        simplify_gen_subreg (SImode, operands[1], DImode, i));
1328
      DONE;
1329
    }
1330
}")
1331
 
1332
(define_insn "movdi_i"
1333
  [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,a,r,m")
1334
        (match_operand:DI 1 "mcore_general_movsrc_operand" "I,M,N,r,R,m,r"))]
1335
  ""
1336
  "* return mcore_output_movedouble (operands, DImode);"
1337
  [(set_attr "length" "4") (set_attr "type" "move,move,move,move,load,load,store")])
1338
 
1339
;; SFmode
1340
 
1341
(define_expand "movsf"
1342
  [(set (match_operand:SF 0 "general_operand" "")
1343
        (match_operand:SF 1 "general_operand" ""))]
1344
  ""
1345
  "
1346
{
1347
  if (GET_CODE (operands[0]) == MEM)
1348
    operands[1] = force_reg (SFmode, operands[1]);
1349
}")
1350
 
1351
(define_insn "movsf_i"
1352
  [(set (match_operand:SF 0 "general_operand" "=r,r,m")
1353
        (match_operand:SF 1 "general_operand"  "r,m,r"))]
1354
  ""
1355
  "@
1356
        mov     %0,%1
1357
        ld.w    %0,%1
1358
        st.w    %1,%0"
1359
  [(set_attr "type" "move,load,store")])
1360
 
1361
;; DFmode
1362
 
1363
(define_expand "movdf"
1364
  [(set (match_operand:DF 0 "general_operand" "")
1365
        (match_operand:DF 1 "general_operand" ""))]
1366
  ""
1367
  "
1368
{
1369
  if (GET_CODE (operands[0]) == MEM)
1370
    operands[1] = force_reg (DFmode, operands[1]);
1371
}")
1372
 
1373
(define_insn "movdf_k"
1374
  [(set (match_operand:DF 0 "general_operand" "=r,r,m")
1375
        (match_operand:DF 1 "general_operand" "r,m,r"))]
1376
  ""
1377
  "* return mcore_output_movedouble (operands, DFmode);"
1378
  [(set_attr "length" "4") (set_attr "type" "move,load,store")])
1379
 
1380
 
1381
;; Load/store multiple
1382
 
1383
;; ??? This is not currently used.
1384
(define_insn "ldm"
1385
  [(set (match_operand:TI 0 "mcore_arith_reg_operand" "=r")
1386
        (mem:TI (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
1387
  ""
1388
  "ldq  %U0,(%1)")
1389
 
1390
;; ??? This is not currently used.
1391
(define_insn "stm"
1392
  [(set (mem:TI (match_operand:SI 0 "mcore_arith_reg_operand" "r"))
1393
        (match_operand:TI 1 "mcore_arith_reg_operand" "r"))]
1394
  ""
1395
  "stq  %U1,(%0)")
1396
 
1397
(define_expand "load_multiple"
1398
  [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
1399
                          (match_operand:SI 1 "" ""))
1400
                     (use (match_operand:SI 2 "" ""))])]
1401
  ""
1402
  "
1403
{
1404
  int regno, count, i;
1405
 
1406
  /* Support only loading a constant number of registers from memory and
1407
     only if at least two registers.  The last register must be r15.  */
1408
  if (GET_CODE (operands[2]) != CONST_INT
1409
      || INTVAL (operands[2]) < 2
1410
      || GET_CODE (operands[1]) != MEM
1411
      || XEXP (operands[1], 0) != stack_pointer_rtx
1412
      || GET_CODE (operands[0]) != REG
1413
      || REGNO (operands[0]) + INTVAL (operands[2]) != 16)
1414
    FAIL;
1415
 
1416
  count = INTVAL (operands[2]);
1417
  regno = REGNO (operands[0]);
1418
 
1419
  operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
1420
 
1421
  for (i = 0; i < count; i++)
1422
    XVECEXP (operands[3], 0, i)
1423
      = gen_rtx_SET (VOIDmode,
1424
                 gen_rtx_REG (SImode, regno + i),
1425
                 gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
1426
                                                      i * 4)));
1427
}")
1428
 
1429
(define_insn ""
1430
  [(match_parallel 0 "mcore_load_multiple_operation"
1431
                   [(set (match_operand:SI 1 "mcore_arith_reg_operand" "=r")
1432
                         (mem:SI (match_operand:SI 2 "register_operand" "r")))])]
1433
  "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM"
1434
  "ldm  %1-r15,(%2)")
1435
 
1436
(define_expand "store_multiple"
1437
  [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
1438
                          (match_operand:SI 1 "" ""))
1439
                     (use (match_operand:SI 2 "" ""))])]
1440
  ""
1441
  "
1442
{
1443
  int regno, count, i;
1444
 
1445
  /* Support only storing a constant number of registers to memory and
1446
     only if at least two registers.  The last register must be r15.  */
1447
  if (GET_CODE (operands[2]) != CONST_INT
1448
      || INTVAL (operands[2]) < 2
1449
      || GET_CODE (operands[0]) != MEM
1450
      || XEXP (operands[0], 0) != stack_pointer_rtx
1451
      || GET_CODE (operands[1]) != REG
1452
      || REGNO (operands[1]) + INTVAL (operands[2]) != 16)
1453
    FAIL;
1454
 
1455
  count = INTVAL (operands[2]);
1456
  regno = REGNO (operands[1]);
1457
 
1458
  operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
1459
 
1460
  for (i = 0; i < count; i++)
1461
    XVECEXP (operands[3], 0, i)
1462
      = gen_rtx_SET (VOIDmode,
1463
                 gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
1464
                                                      i * 4)),
1465
                 gen_rtx_REG (SImode, regno + i));
1466
}")
1467
 
1468
(define_insn ""
1469
  [(match_parallel 0 "mcore_store_multiple_operation"
1470
                   [(set (mem:SI (match_operand:SI 2 "register_operand" "r"))
1471
                         (match_operand:SI 1 "mcore_arith_reg_operand" "r"))])]
1472
  "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM"
1473
  "stm  %1-r15,(%2)")
1474
 
1475
;; ------------------------------------------------------------------------
1476
;; Define the real conditional branch instructions.
1477
;; ------------------------------------------------------------------------
1478
 
1479
(define_insn "branch_true"
1480
  [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0))
1481
                           (label_ref (match_operand 0 "" ""))
1482
                           (pc)))]
1483
  ""
1484
  "jbt  %l0"
1485
  [(set_attr "type" "brcond")])
1486
 
1487
(define_insn "branch_false"
1488
  [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
1489
                           (label_ref (match_operand 0 "" ""))
1490
                           (pc)))]
1491
  ""
1492
  "jbf  %l0"
1493
  [(set_attr "type" "brcond")])
1494
 
1495
(define_insn "inverse_branch_true"
1496
  [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0))
1497
                           (pc)
1498
                           (label_ref (match_operand 0 "" ""))))]
1499
  ""
1500
  "jbf  %l0"
1501
  [(set_attr "type" "brcond")])
1502
 
1503
(define_insn "inverse_branch_false"
1504
  [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
1505
                           (pc)
1506
                           (label_ref (match_operand 0 "" ""))))]
1507
  ""
1508
  "jbt  %l0"
1509
  [(set_attr "type" "brcond")])
1510
 
1511
;; Conditional branch insns
1512
 
1513
;; At top-level, condition test are eq/ne, because we
1514
;; are comparing against the condition register (which
1515
;; has the result of the true relational test
1516
 
1517
; There is no beq compare, so we reverse the branch arms.
1518
 
1519
(define_expand "beq"
1520
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1521
                           (pc)
1522
                           (label_ref (match_operand 0 "" ""))))]
1523
  ""
1524
  "
1525
{
1526
  operands[1] = mcore_gen_compare_reg (EQ);
1527
}")
1528
 
1529
(define_expand "bne"
1530
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1531
                           (label_ref (match_operand 0 "" ""))
1532
                           (pc)))]
1533
  ""
1534
  "
1535
{
1536
  operands[1] = mcore_gen_compare_reg (NE);
1537
}")
1538
 
1539
; check whether (GT A imm) can become (LE A imm) with the branch reversed.
1540
; if so, emit a (LT A imm + 1) in place of the (LE A imm).  BRC
1541
 
1542
(define_expand "bgt"
1543
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1544
                           (label_ref (match_operand 0 "" ""))
1545
                           (pc)))]
1546
  ""
1547
  "
1548
{
1549
  if (mcore_modify_comparison (LE))
1550
    {
1551
      emit_jump_insn (gen_reverse_blt (operands[0]));
1552
      DONE;
1553
    }
1554
  operands[1] = mcore_gen_compare_reg (GT);
1555
}")
1556
 
1557
; There is no ble compare, so we reverse the branch arms.
1558
; reversed the condition and branch arms for ble -- the check_dbra_loop()
1559
; transformation assumes that ble uses a branch-true with the label as
1560
; as the target. BRC
1561
 
1562
; check whether (LE A imm) can become (LT A imm + 1).
1563
 
1564
(define_expand "ble"
1565
  [(set (pc) (if_then_else (eq (match_dup 1) (const_int 0))
1566
                           (label_ref (match_operand 0 "" ""))
1567
                           (pc)))]
1568
  ""
1569
  "
1570
{
1571
  if (mcore_modify_comparison (LE))
1572
    {
1573
      emit_jump_insn (gen_blt (operands[0]));
1574
      DONE;
1575
    }
1576
  operands[1] = mcore_gen_compare_reg (LE);
1577
}")
1578
 
1579
; make generating a reversed blt simple
1580
(define_expand "reverse_blt"
1581
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1582
                           (pc)
1583
                           (label_ref (match_operand 0 "" ""))))]
1584
  ""
1585
  "
1586
{
1587
  operands[1] = mcore_gen_compare_reg (LT);
1588
}")
1589
 
1590
(define_expand "blt"
1591
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1592
                           (label_ref (match_operand 0 "" ""))
1593
                           (pc)))]
1594
  ""
1595
  "
1596
{
1597
  operands[1] = mcore_gen_compare_reg (LT);
1598
}")
1599
 
1600
; There is no bge compare, so we reverse the branch arms.
1601
 
1602
(define_expand "bge"
1603
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1604
                           (pc)
1605
                           (label_ref (match_operand 0 "" ""))))]
1606
  ""
1607
  "
1608
{
1609
  operands[1] = mcore_gen_compare_reg (GE);
1610
}")
1611
 
1612
; There is no gtu compare, so we reverse the branch arms
1613
 
1614
;(define_expand "bgtu"
1615
;  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1616
;                          (pc)
1617
;                          (label_ref (match_operand 0 "" ""))))]
1618
;  ""
1619
;  "
1620
;{
1621
;  if (GET_CODE (arch_compare_op1) == CONST_INT
1622
;      && INTVAL (arch_compare_op1) == 0)
1623
;    operands[1] = mcore_gen_compare_reg (NE);
1624
;  else
1625
;    { if (mcore_modify_comparison (GTU))
1626
;       {
1627
;         emit_jump_insn (gen_bgeu (operands[0]));
1628
;         DONE;
1629
;       }
1630
;      operands[1] = mcore_gen_compare_reg (LEU);
1631
;    }
1632
;}")
1633
 
1634
(define_expand "bgtu"
1635
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1636
                           (pc)
1637
                           (label_ref (match_operand 0 "" ""))))]
1638
  ""
1639
  "
1640
{
1641
  if (GET_CODE (arch_compare_op1) == CONST_INT
1642
      && INTVAL (arch_compare_op1) == 0)
1643
    {
1644
      /* The inverse of '> 0' for an unsigned test is
1645
         '== 0' but we do not have such an instruction available.
1646
         Instead we must reverse the branch (back to the normal
1647
         ordering) and test '!= 0'.  */
1648
 
1649
      operands[1] = mcore_gen_compare_reg (NE);
1650
 
1651
      emit_jump_insn (gen_rtx_SET (VOIDmode,
1652
        pc_rtx,
1653
        gen_rtx_IF_THEN_ELSE (VOIDmode,
1654
        gen_rtx_NE (VOIDmode,
1655
        operands[1],
1656
        const0_rtx),
1657
        gen_rtx_LABEL_REF (VOIDmode,operands[0]),
1658
        pc_rtx)));
1659
      DONE;
1660
    }
1661
  operands[1] = mcore_gen_compare_reg (GTU);
1662
}")
1663
 
1664
 
1665
(define_expand "bleu"
1666
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1667
                           (label_ref (match_operand 0 "" ""))
1668
                           (pc)))]
1669
  ""
1670
  "
1671
{
1672
  operands[1] = mcore_gen_compare_reg (LEU);
1673
}")
1674
 
1675
; There is no bltu compare, so we reverse the branch arms
1676
(define_expand "bltu"
1677
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1678
                           (pc)
1679
                           (label_ref (match_operand 0 "" ""))))]
1680
  ""
1681
  "
1682
{
1683
  operands[1] = mcore_gen_compare_reg (LTU);
1684
}")
1685
 
1686
(define_expand "bgeu"
1687
  [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
1688
                           (label_ref (match_operand 0 "" ""))
1689
                           (pc)))]
1690
  ""
1691
  "
1692
{
1693
 
1694
  operands[1] = mcore_gen_compare_reg (GEU);
1695
}")
1696
 
1697
;; ------------------------------------------------------------------------
1698
;; Jump and linkage insns
1699
;; ------------------------------------------------------------------------
1700
 
1701
(define_insn "jump_real"
1702
  [(set (pc)
1703
        (label_ref (match_operand 0 "" "")))]
1704
  ""
1705
  "jbr  %l0"
1706
  [(set_attr "type" "branch")])
1707
 
1708
(define_expand "jump"
1709
 [(set (pc) (label_ref (match_operand 0 "" "")))]
1710
 ""
1711
 "
1712
{
1713
  emit_jump_insn (gen_jump_real (operand0));
1714
  DONE;
1715
}
1716
")
1717
 
1718
(define_insn "indirect_jump"
1719
  [(set (pc)
1720
        (match_operand:SI 0 "mcore_arith_reg_operand" "r"))]
1721
  ""
1722
  "jmp  %0"
1723
  [(set_attr "type" "jmp")])
1724
 
1725
(define_expand "call"
1726
  [(parallel[(call (match_operand:SI 0 "" "")
1727
                   (match_operand 1 "" ""))
1728
             (clobber (reg:SI 15))])]
1729
  ""
1730
  "
1731
{
1732
  if (GET_CODE (operands[0]) == MEM
1733
      && ! register_operand (XEXP (operands[0], 0), SImode)
1734
      && ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
1735
    operands[0] = gen_rtx_MEM (GET_MODE (operands[0]),
1736
                           force_reg (Pmode, XEXP (operands[0], 0)));
1737
}")
1738
 
1739
(define_insn "call_internal"
1740
  [(call (mem:SI (match_operand:SI 0 "mcore_call_address_operand" "riR"))
1741
         (match_operand 1 "" ""))
1742
   (clobber (reg:SI 15))]
1743
  ""
1744
  "* return mcore_output_call (operands, 0);")
1745
 
1746
(define_expand "call_value"
1747
  [(parallel[(set (match_operand 0 "register_operand" "")
1748
                  (call (match_operand:SI 1 "" "")
1749
                        (match_operand 2 "" "")))
1750
             (clobber (reg:SI 15))])]
1751
  ""
1752
  "
1753
{
1754
  if (GET_CODE (operands[0]) == MEM
1755
      && ! register_operand (XEXP (operands[0], 0), SImode)
1756
      && ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
1757
    operands[1] = gen_rtx_MEM (GET_MODE (operands[1]),
1758
                           force_reg (Pmode, XEXP (operands[1], 0)));
1759
}")
1760
 
1761
(define_insn "call_value_internal"
1762
  [(set (match_operand 0 "register_operand" "=r")
1763
        (call (mem:SI (match_operand:SI 1 "mcore_call_address_operand" "riR"))
1764
              (match_operand 2 "" "")))
1765
   (clobber (reg:SI 15))]
1766
  ""
1767
  "* return mcore_output_call (operands, 1);")
1768
 
1769
(define_insn "call_value_struct"
1770
  [(parallel [(set (match_parallel 0 ""
1771
                     [(expr_list (match_operand 3 "register_operand" "") (match_operand 4 "immediate_operand" ""))
1772
                      (expr_list (match_operand 5 "register_operand" "") (match_operand 6 "immediate_operand" ""))])
1773
                  (call (match_operand:SI 1 "" "")
1774
                        (match_operand 2 "" "")))
1775
             (clobber (reg:SI 15))])]
1776
  ""
1777
  "* return mcore_output_call (operands, 1);"
1778
)
1779
 
1780
 
1781
;; ------------------------------------------------------------------------
1782
;; Misc insns
1783
;; ------------------------------------------------------------------------
1784
 
1785
(define_insn "nop"
1786
  [(const_int 0)]
1787
  ""
1788
  "or   r0,r0")
1789
 
1790
(define_insn "tablejump"
1791
  [(set (pc)
1792
        (match_operand:SI 0 "mcore_arith_reg_operand" "r"))
1793
   (use (label_ref (match_operand 1 "" "")))]
1794
  ""
1795
  "jmp  %0"
1796
  [(set_attr "type" "jmp")])
1797
 
1798
(define_insn "*return"
1799
 [(return)]
1800
 "reload_completed && ! mcore_naked_function_p ()"
1801
 "jmp   r15"
1802
 [(set_attr "type" "jmp")])
1803
 
1804
(define_insn "*no_return"
1805
 [(return)]
1806
 "reload_completed && mcore_naked_function_p ()"
1807
 ""
1808
 [(set_attr "length" "0")]
1809
)
1810
 
1811
(define_expand "prologue"
1812
  [(const_int 0)]
1813
  ""
1814
  "mcore_expand_prolog (); DONE;")
1815
 
1816
(define_expand "epilogue"
1817
  [(return)]
1818
  ""
1819
  "mcore_expand_epilog ();")
1820
 
1821
;; ------------------------------------------------------------------------
1822
;; Scc instructions
1823
;; ------------------------------------------------------------------------
1824
 
1825
(define_insn "mvc"
1826
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1827
        (ne:SI (reg:CC 17) (const_int 0)))]
1828
  ""
1829
  "mvc  %0"
1830
  [(set_attr "type" "move")])
1831
 
1832
(define_insn "mvcv"
1833
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1834
        (eq:SI (reg:CC 17) (const_int 0)))]
1835
  ""
1836
  "mvcv %0"
1837
  [(set_attr "type" "move")])
1838
 
1839
; in 0.97 use (LE 0) with (LT 1) and complement c.  BRC
1840
(define_split
1841
  [(parallel[
1842
     (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1843
          (ne:SI (gt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
1844
                        (const_int 0))
1845
                 (const_int 0)))
1846
     (clobber (reg:SI 17))])]
1847
  ""
1848
  [(set (reg:CC 17)
1849
        (lt:CC (match_dup 1) (const_int 1)))
1850
   (set (match_dup 0) (eq:SI (reg:CC 17) (const_int 0)))])
1851
 
1852
 
1853
(define_expand "seq"
1854
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1855
        (eq:SI (match_dup 1) (const_int 0)))]
1856
  ""
1857
  "
1858
{
1859
  operands[1] = mcore_gen_compare_reg (NE);
1860
}")
1861
 
1862
(define_expand "sne"
1863
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1864
        (ne:SI (match_dup 1) (const_int 0)))]
1865
  ""
1866
  "
1867
{
1868
  operands[1] = mcore_gen_compare_reg (NE);
1869
}")
1870
 
1871
(define_expand "slt"
1872
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1873
        (ne:SI (match_dup 1) (const_int 0)))]
1874
  ""
1875
  "
1876
{
1877
  operands[1] = mcore_gen_compare_reg (LT);
1878
}")
1879
 
1880
; make generating a LT with the comparison reversed easy.  BRC
1881
(define_expand "reverse_slt"
1882
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1883
        (eq:SI (match_dup 1) (const_int 0)))]
1884
  ""
1885
  "
1886
{
1887
  operands[1] = mcore_gen_compare_reg (LT);
1888
}")
1889
 
1890
(define_expand "sge"
1891
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1892
        (eq:SI (match_dup 1) (const_int 0)))]
1893
  ""
1894
  "
1895
{
1896
  operands[1] = mcore_gen_compare_reg (LT);
1897
}")
1898
 
1899
; check whether (GT A imm) can become (LE A imm) with the comparison
1900
; reversed.  if so, emit a (LT A imm + 1) in place of the (LE A imm).  BRC
1901
 
1902
(define_expand "sgt"
1903
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1904
        (ne:SI (match_dup 1) (const_int 0)))]
1905
  ""
1906
  "
1907
{
1908
  if (mcore_modify_comparison (LE))
1909
    {
1910
      emit_insn (gen_reverse_slt (operands[0]));
1911
      DONE;
1912
    }
1913
 
1914
  operands[1] = mcore_gen_compare_reg (GT);
1915
}")
1916
 
1917
(define_expand "sle"
1918
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1919
        (eq:SI (match_dup 1) (const_int 0)))]
1920
  ""
1921
  "
1922
{
1923
  if (mcore_modify_comparison (LE))
1924
    {
1925
      emit_insn (gen_slt (operands[0]));
1926
      DONE;
1927
    }
1928
  operands[1] = mcore_gen_compare_reg (GT);
1929
}")
1930
 
1931
(define_expand "sltu"
1932
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1933
        (eq:SI (match_dup 1) (const_int 0)))]
1934
  ""
1935
  "
1936
{
1937
  operands[1] = mcore_gen_compare_reg (GEU);
1938
}")
1939
 
1940
(define_expand "sgeu"
1941
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1942
        (ne:SI (match_dup 1) (const_int 0)))]
1943
  ""
1944
  "
1945
{
1946
  operands[1] = mcore_gen_compare_reg (GEU);
1947
}")
1948
 
1949
(define_expand "sgtu"
1950
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1951
        (eq:SI (match_dup 1) (const_int 0)))]
1952
  ""
1953
  "
1954
{
1955
  operands[1] = mcore_gen_compare_reg (LEU);
1956
}")
1957
 
1958
(define_expand "sleu"
1959
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
1960
        (ne:SI (match_dup 1) (const_int 0)))]
1961
  ""
1962
  "
1963
{
1964
  operands[1] = mcore_gen_compare_reg (LEU);
1965
}")
1966
 
1967
(define_insn "incscc"
1968
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1969
        (plus:SI (ne (reg:CC 17) (const_int 0))
1970
                 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1971
  ""
1972
  "inct %0")
1973
 
1974
(define_insn "incscc_false"
1975
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1976
        (plus:SI (eq (reg:CC 17) (const_int 0))
1977
                 (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
1978
  ""
1979
  "incf %0")
1980
 
1981
(define_insn "decscc"
1982
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1983
        (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1984
                  (ne (reg:CC 17) (const_int 0))))]
1985
  ""
1986
  "dect %0")
1987
 
1988
(define_insn "decscc_false"
1989
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
1990
        (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
1991
                  (eq (reg:CC 17) (const_int 0))))]
1992
  ""
1993
  "decf %0")
1994
 
1995
;; ------------------------------------------------------------------------
1996
;; Conditional move patterns.
1997
;; ------------------------------------------------------------------------
1998
 
1999
(define_expand "smaxsi3"
2000
  [(set (reg:CC 17)
2001
        (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2002
               (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2003
   (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2004
        (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2005
                         (match_dup 1) (match_dup 2)))]
2006
  ""
2007
  "")
2008
 
2009
(define_split
2010
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2011
        (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2012
                 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2013
  ""
2014
  [(set (reg:CC 17)
2015
        (lt:SI (match_dup 1) (match_dup 2)))
2016
   (set (match_dup 0)
2017
        (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2018
                         (match_dup 1) (match_dup 2)))]
2019
  "")
2020
 
2021
; no tstgt in 0.97, so just use cmplti (btsti x,31) and reverse move
2022
; condition  BRC
2023
(define_split
2024
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2025
        (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2026
                 (const_int 0)))]
2027
  ""
2028
  [(set (reg:CC 17)
2029
        (lt:CC (match_dup 1) (const_int 0)))
2030
   (set (match_dup 0)
2031
        (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2032
                         (match_dup 1) (const_int 0)))]
2033
  "")
2034
 
2035
(define_expand "sminsi3"
2036
  [(set (reg:CC 17)
2037
        (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2038
               (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2039
   (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2040
        (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2041
                         (match_dup 1) (match_dup 2)))]
2042
  ""
2043
  "")
2044
 
2045
(define_split
2046
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2047
        (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2048
                 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2049
  ""
2050
  [(set (reg:CC 17)
2051
        (lt:SI (match_dup 1) (match_dup 2)))
2052
   (set (match_dup 0)
2053
        (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2054
                         (match_dup 1) (match_dup 2)))]
2055
  "")
2056
 
2057
;(define_split
2058
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2059
;        (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2060
;                 (const_int 0)))]
2061
;  ""
2062
;  [(set (reg:CC 17)
2063
;        (gt:CC (match_dup 1) (const_int 0)))
2064
;   (set (match_dup 0)
2065
;        (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2066
;                         (match_dup 1) (const_int 0)))]
2067
;  "")
2068
 
2069
; changed these unsigned patterns to use geu instead of ltu.  it appears
2070
; that the c-torture & ssrl test suites didn't catch these!  only showed
2071
; up in friedman's clib work.   BRC 7/7/95
2072
 
2073
(define_expand "umaxsi3"
2074
  [(set (reg:CC 17)
2075
        (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2076
                (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2077
   (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2078
        (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2079
                         (match_dup 2) (match_dup 1)))]
2080
  ""
2081
  "")
2082
 
2083
(define_split
2084
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2085
        (umax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2086
                 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2087
  ""
2088
  [(set (reg:CC 17)
2089
        (geu:SI (match_dup 1) (match_dup 2)))
2090
   (set (match_dup 0)
2091
        (if_then_else:SI (eq (reg:CC 17) (const_int 0))
2092
                         (match_dup 2) (match_dup 1)))]
2093
  "")
2094
 
2095
(define_expand "uminsi3"
2096
  [(set (reg:CC 17)
2097
        (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
2098
                (match_operand:SI 2 "mcore_arith_reg_operand" "")))
2099
   (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2100
        (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2101
                         (match_dup 2) (match_dup 1)))]
2102
  ""
2103
  "")
2104
 
2105
(define_split
2106
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2107
        (umin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2108
                 (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
2109
  ""
2110
  [(set (reg:CC 17)
2111
        (geu:SI (match_dup 1) (match_dup 2)))
2112
   (set (match_dup 0)
2113
        (if_then_else:SI (ne (reg:CC 17) (const_int 0))
2114
                         (match_dup 2) (match_dup 1)))]
2115
  "")
2116
 
2117
;; ------------------------------------------------------------------------
2118
;; conditional move patterns really start here
2119
;; ------------------------------------------------------------------------
2120
 
2121
;; the "movtK" patterns are experimental.  they are intended to account for
2122
;; gcc's mucking on code such as:
2123
;;
2124
;;            free_ent = ((block_compress) ? 257 : 256 );
2125
;;
2126
;; these patterns help to get a tstne/bgeni/inct (or equivalent) sequence
2127
;; when both arms have constants that are +/- 1 of each other.
2128
;;
2129
;; note in the following patterns that the "movtK" ones should be the first
2130
;; one defined in each sequence.  this is because the general pattern also
2131
;; matches, so use ordering to determine priority (it's easier this way than
2132
;; adding conditions to the general patterns).   BRC
2133
;;
2134
;; the U and Q constraints are necessary to ensure that reload does the
2135
;; 'right thing'.  U constrains the operand to 0 and Q to 1 for use in the
2136
;; clrt & clrf and clrt/inct & clrf/incf patterns.    BRC 6/26
2137
;;
2138
;; ??? there appears to be some problems with these movtK patterns for ops
2139
;; other than eq & ne.  need to fix.  6/30 BRC
2140
 
2141
;; ------------------------------------------------------------------------
2142
;; ne
2143
;; ------------------------------------------------------------------------
2144
 
2145
; experimental conditional move with two constants +/- 1  BRC
2146
 
2147
(define_insn "movtK_1"
2148
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2149
        (if_then_else:SI
2150
            (ne (reg:CC 17) (const_int 0))
2151
          (match_operand:SI 1 "mcore_arith_O_operand" "O")
2152
          (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2153
  "  GET_CODE (operands[1]) == CONST_INT
2154
  && GET_CODE (operands[2]) == CONST_INT
2155
  && (   (INTVAL (operands[1]) - INTVAL (operands[2]) == 1)
2156
      || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2157
  "* return mcore_output_cmov (operands, 1, NULL);"
2158
  [(set_attr "length" "4")])
2159
 
2160
(define_insn "movt0"
2161
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2162
        (if_then_else:SI
2163
         (ne (reg:CC 17) (const_int 0))
2164
         (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2165
         (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2166
  ""
2167
  "@
2168
    movt        %0,%1
2169
    movf        %0,%2
2170
    clrt        %0
2171
    clrf        %0")
2172
 
2173
;; ------------------------------------------------------------------------
2174
;; eq
2175
;; ------------------------------------------------------------------------
2176
 
2177
; experimental conditional move with two constants +/- 1  BRC
2178
(define_insn "movtK_2"
2179
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2180
        (if_then_else:SI
2181
            (eq (reg:CC 17) (const_int 0))
2182
          (match_operand:SI 1 "mcore_arith_O_operand" "O")
2183
          (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2184
  "  GET_CODE (operands[1]) == CONST_INT
2185
  && GET_CODE (operands[2]) == CONST_INT
2186
  && (   (INTVAL (operands[1]) - INTVAL (operands[2]) == 1)
2187
      || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2188
  "* return mcore_output_cmov (operands, 0, NULL);"
2189
  [(set_attr "length" "4")])
2190
 
2191
(define_insn "movf0"
2192
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2193
        (if_then_else:SI
2194
         (eq (reg:CC 17) (const_int 0))
2195
         (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2196
         (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2197
  ""
2198
  "@
2199
    movf        %0,%1
2200
    movt        %0,%2
2201
    clrf        %0
2202
    clrt        %0")
2203
 
2204
; turns lsli rx,imm/btsti rx,31 into btsti rx,imm.  not done by a peephole
2205
; because the instructions are not adjacent (peepholes are related by posn -
2206
; not by dataflow).   BRC
2207
 
2208
(define_insn ""
2209
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2210
        (if_then_else:SI (eq (zero_extract:SI
2211
                              (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2212
                              (const_int 1)
2213
                              (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K"))
2214
                             (const_int 0))
2215
                         (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0")
2216
                         (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))]
2217
  ""
2218
  "@
2219
    btsti       %1,%2\;movf     %0,%3
2220
    btsti       %1,%2\;movt     %0,%4
2221
    btsti       %1,%2\;clrf     %0
2222
    btsti       %1,%2\;clrt     %0"
2223
  [(set_attr "length" "4")])
2224
 
2225
; turns sextb rx/btsti rx,31 into btsti rx,7.  must be QImode to be safe.  BRC
2226
 
2227
(define_insn ""
2228
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2229
        (if_then_else:SI (eq (lshiftrt:SI
2230
                              (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2231
                              (const_int 7))
2232
                             (const_int 0))
2233
                         (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2234
                         (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2235
  "GET_CODE (operands[1]) == SUBREG &&
2236
      GET_MODE (SUBREG_REG (operands[1])) == QImode"
2237
  "@
2238
    btsti       %1,7\;movf      %0,%2
2239
    btsti       %1,7\;movt      %0,%3
2240
    btsti       %1,7\;clrf      %0
2241
    btsti       %1,7\;clrt      %0"
2242
  [(set_attr "length" "4")])
2243
 
2244
 
2245
;; ------------------------------------------------------------------------
2246
;; ne
2247
;; ------------------------------------------------------------------------
2248
 
2249
;; Combine creates this from an andn instruction in a scc sequence.
2250
;; We must recognize it to get conditional moves generated.
2251
 
2252
; experimental conditional move with two constants +/- 1  BRC
2253
(define_insn "movtK_3"
2254
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2255
        (if_then_else:SI
2256
            (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2257
                (const_int 0))
2258
          (match_operand:SI 2 "mcore_arith_O_operand" "O")
2259
          (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2260
  "  GET_CODE (operands[2]) == CONST_INT
2261
  && GET_CODE (operands[3]) == CONST_INT
2262
  && (   (INTVAL (operands[2]) - INTVAL (operands[3]) == 1)
2263
      || (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2264
  "*
2265
{
2266
  rtx out_operands[4];
2267
  out_operands[0] = operands[0];
2268
  out_operands[1] = operands[2];
2269
  out_operands[2] = operands[3];
2270
  out_operands[3] = operands[1];
2271
 
2272
  return mcore_output_cmov (out_operands, 1, \"cmpnei   %3,0\");
2273
 
2274
}"
2275
  [(set_attr "length" "6")])
2276
 
2277
(define_insn "movt2"
2278
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2279
        (if_then_else:SI (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2280
                             (const_int 0))
2281
                         (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2282
                         (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2283
  ""
2284
  "@
2285
    cmpnei      %1,0\;movt      %0,%2
2286
    cmpnei      %1,0\;movf      %0,%3
2287
    cmpnei      %1,0\;clrt      %0
2288
    cmpnei      %1,0\;clrf      %0"
2289
  [(set_attr "length" "4")])
2290
 
2291
; turns lsli rx,imm/btsti rx,31 into btsti rx,imm.  not done by a peephole
2292
; because the instructions are not adjacent (peepholes are related by posn -
2293
; not by dataflow).   BRC
2294
 
2295
(define_insn ""
2296
 [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2297
        (if_then_else:SI (ne (zero_extract:SI
2298
                              (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2299
                              (const_int 1)
2300
                              (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K"))
2301
                             (const_int 0))
2302
                         (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0")
2303
                         (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))]
2304
  ""
2305
  "@
2306
    btsti       %1,%2\;movt     %0,%3
2307
    btsti       %1,%2\;movf     %0,%4
2308
    btsti       %1,%2\;clrt     %0
2309
    btsti       %1,%2\;clrf     %0"
2310
  [(set_attr "length" "4")])
2311
 
2312
; turns sextb rx/btsti rx,31 into btsti rx,7.  must be QImode to be safe.  BRC
2313
 
2314
(define_insn ""
2315
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2316
        (if_then_else:SI (ne (lshiftrt:SI
2317
                              (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2318
                              (const_int 7))
2319
                             (const_int 0))
2320
                         (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2321
                         (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2322
  "GET_CODE (operands[1]) == SUBREG &&
2323
      GET_MODE (SUBREG_REG (operands[1])) == QImode"
2324
  "@
2325
    btsti       %1,7\;movt      %0,%2
2326
    btsti       %1,7\;movf      %0,%3
2327
    btsti       %1,7\;clrt      %0
2328
    btsti       %1,7\;clrf      %0"
2329
  [(set_attr "length" "4")])
2330
 
2331
;; ------------------------------------------------------------------------
2332
;; eq/eq
2333
;; ------------------------------------------------------------------------
2334
 
2335
; experimental conditional move with two constants +/- 1  BRC
2336
(define_insn "movtK_4"
2337
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2338
        (if_then_else:SI
2339
            (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2340
          (match_operand:SI 1 "mcore_arith_O_operand" "O")
2341
          (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2342
  "GET_CODE (operands[1]) == CONST_INT &&
2343
   GET_CODE (operands[2]) == CONST_INT &&
2344
   ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2345
   (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2346
  "* return mcore_output_cmov(operands, 1, NULL);"
2347
  [(set_attr "length" "4")])
2348
 
2349
(define_insn "movt3"
2350
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2351
        (if_then_else:SI
2352
         (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2353
         (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2354
         (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2355
  ""
2356
  "@
2357
    movt        %0,%1
2358
    movf        %0,%2
2359
    clrt        %0
2360
    clrf        %0")
2361
 
2362
;; ------------------------------------------------------------------------
2363
;; eq/ne
2364
;; ------------------------------------------------------------------------
2365
 
2366
; experimental conditional move with two constants +/- 1  BRC
2367
(define_insn "movtK_5"
2368
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2369
        (if_then_else:SI
2370
            (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2371
          (match_operand:SI 1 "mcore_arith_O_operand" "O")
2372
          (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2373
  "GET_CODE (operands[1]) == CONST_INT &&
2374
   GET_CODE (operands[2]) == CONST_INT &&
2375
   ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2376
    (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2377
  "* return mcore_output_cmov (operands, 0, NULL);"
2378
  [(set_attr "length" "4")])
2379
 
2380
(define_insn "movf1"
2381
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2382
        (if_then_else:SI
2383
         (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2384
         (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2385
         (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2386
  ""
2387
  "@
2388
    movf        %0,%1
2389
    movt        %0,%2
2390
    clrf        %0
2391
    clrt        %0")
2392
 
2393
;; ------------------------------------------------------------------------
2394
;; eq
2395
;; ------------------------------------------------------------------------
2396
 
2397
;; Combine creates this from an andn instruction in a scc sequence.
2398
;; We must recognize it to get conditional moves generated.
2399
 
2400
; experimental conditional move with two constants +/- 1  BRC
2401
 
2402
(define_insn "movtK_6"
2403
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2404
        (if_then_else:SI
2405
            (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2406
                (const_int 0))
2407
          (match_operand:SI 2 "mcore_arith_O_operand" "O")
2408
          (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2409
  "GET_CODE (operands[1]) == CONST_INT &&
2410
   GET_CODE (operands[2]) == CONST_INT &&
2411
   ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
2412
    (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2413
  "*
2414
{
2415
   rtx out_operands[4];
2416
   out_operands[0] = operands[0];
2417
   out_operands[1] = operands[2];
2418
   out_operands[2] = operands[3];
2419
   out_operands[3] = operands[1];
2420
 
2421
   return mcore_output_cmov (out_operands, 0, \"cmpnei  %3,0\");
2422
}"
2423
  [(set_attr "length" "6")])
2424
 
2425
(define_insn "movf3"
2426
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2427
        (if_then_else:SI (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2428
                             (const_int 0))
2429
                         (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2430
                         (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2431
  ""
2432
  "@
2433
    cmpnei      %1,0\;movf      %0,%2
2434
    cmpnei      %1,0\;movt      %0,%3
2435
    cmpnei      %1,0\;clrf      %0
2436
    cmpnei      %1,0\;clrt      %0"
2437
  [(set_attr "length" "4")])
2438
 
2439
;; ------------------------------------------------------------------------
2440
;; ne/eq
2441
;; ------------------------------------------------------------------------
2442
 
2443
; experimental conditional move with two constants +/- 1  BRC
2444
(define_insn "movtK_7"
2445
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2446
        (if_then_else:SI
2447
            (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2448
          (match_operand:SI 1 "mcore_arith_O_operand" "O")
2449
          (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2450
  "GET_CODE (operands[1]) == CONST_INT &&
2451
   GET_CODE (operands[2]) == CONST_INT &&
2452
   ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2453
    (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2454
  "* return mcore_output_cmov (operands, 0, NULL);"
2455
  [(set_attr "length" "4")])
2456
 
2457
(define_insn "movf4"
2458
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2459
        (if_then_else:SI
2460
         (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
2461
         (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2462
         (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2463
  ""
2464
  "@
2465
    movf        %0,%1
2466
    movt        %0,%2
2467
    clrf        %0
2468
    clrt        %0")
2469
 
2470
;; ------------------------------------------------------------------------
2471
;; ne/ne
2472
;; ------------------------------------------------------------------------
2473
 
2474
; experimental conditional move with two constants +/- 1  BRC
2475
(define_insn "movtK_8"
2476
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2477
        (if_then_else:SI
2478
            (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2479
          (match_operand:SI 1 "mcore_arith_O_operand" "O")
2480
          (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
2481
  "GET_CODE (operands[1]) == CONST_INT &&
2482
   GET_CODE (operands[2]) == CONST_INT &&
2483
   ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
2484
    (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
2485
  "* return mcore_output_cmov (operands, 1, NULL);"
2486
  [(set_attr "length" "4")])
2487
 
2488
(define_insn "movt4"
2489
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2490
        (if_then_else:SI
2491
         (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
2492
         (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
2493
         (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
2494
  ""
2495
  "@
2496
    movt        %0,%1
2497
    movf        %0,%2
2498
    clrt        %0
2499
    clrf        %0")
2500
 
2501
;; Also need patterns to recognize lt/ge, since otherwise the compiler will
2502
;; try to output not/asri/tstne/movf.
2503
 
2504
;; ------------------------------------------------------------------------
2505
;; lt
2506
;; ------------------------------------------------------------------------
2507
 
2508
; experimental conditional move with two constants +/- 1  BRC
2509
(define_insn "movtK_9"
2510
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2511
        (if_then_else:SI
2512
            (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2513
                (const_int 0))
2514
          (match_operand:SI 2 "mcore_arith_O_operand" "O")
2515
          (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2516
  "GET_CODE (operands[2]) == CONST_INT &&
2517
   GET_CODE (operands[3]) == CONST_INT &&
2518
   ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
2519
    (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2520
  "*
2521
{
2522
   rtx out_operands[4];
2523
   out_operands[0] = operands[0];
2524
   out_operands[1] = operands[2];
2525
   out_operands[2] = operands[3];
2526
   out_operands[3] = operands[1];
2527
 
2528
   return mcore_output_cmov (out_operands, 1, \"btsti   %3,31\");
2529
}"
2530
  [(set_attr "length" "6")])
2531
 
2532
(define_insn "movt5"
2533
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2534
        (if_then_else:SI (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2535
                             (const_int 0))
2536
                         (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2537
                         (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2538
  ""
2539
  "@
2540
    btsti       %1,31\;movt     %0,%2
2541
    btsti       %1,31\;movf     %0,%3
2542
    btsti       %1,31\;clrt     %0
2543
    btsti       %1,31\;clrf     %0"
2544
  [(set_attr "length" "4")])
2545
 
2546
 
2547
;; ------------------------------------------------------------------------
2548
;; ge
2549
;; ------------------------------------------------------------------------
2550
 
2551
; experimental conditional move with two constants +/- 1  BRC
2552
(define_insn "movtK_10"
2553
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2554
        (if_then_else:SI
2555
            (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r")
2556
                (const_int 0))
2557
          (match_operand:SI 2 "mcore_arith_O_operand" "O")
2558
          (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
2559
  "GET_CODE (operands[2]) == CONST_INT &&
2560
   GET_CODE (operands[3]) == CONST_INT &&
2561
   ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
2562
    (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
2563
  "*
2564
{
2565
  rtx out_operands[4];
2566
  out_operands[0] = operands[0];
2567
  out_operands[1] = operands[2];
2568
  out_operands[2] = operands[3];
2569
  out_operands[3] = operands[1];
2570
 
2571
   return mcore_output_cmov (out_operands, 0, \"btsti   %3,31\");
2572
}"
2573
  [(set_attr "length" "6")])
2574
 
2575
(define_insn "movf5"
2576
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
2577
        (if_then_else:SI (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
2578
                             (const_int 0))
2579
                         (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
2580
                         (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
2581
  ""
2582
  "@
2583
    btsti       %1,31\;movf     %0,%2
2584
    btsti       %1,31\;movt     %0,%3
2585
    btsti       %1,31\;clrf     %0
2586
    btsti       %1,31\;clrt     %0"
2587
  [(set_attr "length" "4")])
2588
 
2589
;; ------------------------------------------------------------------------
2590
;; Bitfield extract (xtrbN)
2591
;; ------------------------------------------------------------------------
2592
 
2593
; sometimes we're better off using QI/HI mode and letting the machine indep.
2594
; part expand insv and extv.
2595
;
2596
; e.g., sequences like:a        [an insertion]
2597
;
2598
;      ldw r8,(r6)
2599
;      movi r7,0x00ffffff
2600
;      and r8,r7                 r7 dead
2601
;      stw r8,(r6)                r8 dead
2602
;
2603
; become:
2604
;
2605
;      movi r8,0
2606
;      stb r8,(r6)              r8 dead
2607
;
2608
; it looks like always using SI mode is a win except in this type of code
2609
; (when adjacent bit fields collapse on a byte or halfword boundary).  when
2610
; expanding with SI mode, non-adjacent bit field masks fold, but with QI/HI
2611
; mode, they do not.  one thought is to add some peepholes to cover cases
2612
; like the above, but this is not a general solution.
2613
;
2614
; -mword-bitfields expands/inserts using SI mode.  otherwise, do it with
2615
; the smallest mode possible (using the machine indep. expansions).  BRC
2616
 
2617
;(define_expand "extv"
2618
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2619
;       (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2620
;                        (match_operand:SI 2 "const_int_operand" "")
2621
;                        (match_operand:SI 3 "const_int_operand" "")))
2622
;   (clobber (reg:CC 17))]
2623
;  ""
2624
;  "
2625
;{
2626
;  if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) % 8 != 0)
2627
;    {
2628
;     if (TARGET_W_FIELD)
2629
;       {
2630
;        rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
2631
;        rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
2632
;
2633
;        emit_insn (gen_rtx_SET (SImode, operands[0], operands[1]));
2634
;        emit_insn (gen_rtx_SET (SImode, operands[0],
2635
;                            gen_rtx_ASHIFT (SImode, operands[0], lshft)));
2636
;        emit_insn (gen_rtx_SET (SImode, operands[0],
2637
;                            gen_rtx_ASHIFTRT (SImode, operands[0], rshft)));
2638
;        DONE;
2639
;     }
2640
;     else
2641
;        FAIL;
2642
;  }
2643
;}")
2644
 
2645
(define_expand "extv"
2646
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2647
        (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2648
                         (match_operand:SI 2 "const_int_operand" "")
2649
                         (match_operand:SI 3 "const_int_operand" "")))
2650
   (clobber (reg:CC 17))]
2651
  ""
2652
  "
2653
{
2654
  if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
2655
    {
2656
       /* 8 bit field, aligned properly, use the xtrb[0123]+sext sequence.  */
2657
       /* not DONE, not FAIL, but let the RTL get generated....  */
2658
    }
2659
  else if (TARGET_W_FIELD)
2660
    {
2661
      /* Arbitrary placement; note that the tree->rtl generator will make
2662
         something close to this if we return FAIL  */
2663
      rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
2664
      rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
2665
      rtx tmp1 = gen_reg_rtx (SImode);
2666
      rtx tmp2 = gen_reg_rtx (SImode);
2667
 
2668
      emit_insn (gen_rtx_SET (SImode, tmp1, operands[1]));
2669
      emit_insn (gen_rtx_SET (SImode, tmp2,
2670
                         gen_rtx_ASHIFT (SImode, tmp1, lshft)));
2671
      emit_insn (gen_rtx_SET (SImode, operands[0],
2672
                         gen_rtx_ASHIFTRT (SImode, tmp2, rshft)));
2673
      DONE;
2674
    }
2675
  else
2676
    {
2677
      /* Let the caller choose an alternate sequence.  */
2678
      FAIL;
2679
    }
2680
}")
2681
 
2682
(define_expand "extzv"
2683
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2684
        (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
2685
                         (match_operand:SI 2 "const_int_operand" "")
2686
                         (match_operand:SI 3 "const_int_operand" "")))
2687
   (clobber (reg:CC 17))]
2688
  ""
2689
  "
2690
{
2691
  if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
2692
    {
2693
       /* 8 bit field, aligned properly, use the xtrb[0123] sequence.  */
2694
       /* Let the template generate some RTL....  */
2695
    }
2696
  else if (CONST_OK_FOR_K ((1 << INTVAL (operands[2])) - 1))
2697
    {
2698
      /* A narrow bit-field (<=5 bits) means we can do a shift to put
2699
         it in place and then use an andi to extract it.
2700
         This is as good as a shiftleft/shiftright.  */
2701
 
2702
      rtx shifted;
2703
      rtx mask = GEN_INT ((1 << INTVAL (operands[2])) - 1);
2704
 
2705
      if (INTVAL (operands[3]) == 0)
2706
        {
2707
          shifted = operands[1];
2708
        }
2709
      else
2710
        {
2711
          rtx rshft = GEN_INT (INTVAL (operands[3]));
2712
          shifted = gen_reg_rtx (SImode);
2713
          emit_insn (gen_rtx_SET (SImode, shifted,
2714
                         gen_rtx_LSHIFTRT (SImode, operands[1], rshft)));
2715
        }
2716
     emit_insn (gen_rtx_SET (SImode, operands[0],
2717
                       gen_rtx_AND (SImode, shifted, mask)));
2718
     DONE;
2719
   }
2720
 else if (TARGET_W_FIELD)
2721
   {
2722
     /* Arbitrary pattern; play shift/shift games to get it.
2723
      * this is pretty much what the caller will do if we say FAIL */
2724
     rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
2725
     rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
2726
     rtx tmp1 = gen_reg_rtx (SImode);
2727
     rtx tmp2 = gen_reg_rtx (SImode);
2728
 
2729
     emit_insn (gen_rtx_SET (SImode, tmp1, operands[1]));
2730
     emit_insn (gen_rtx_SET (SImode, tmp2,
2731
                         gen_rtx_ASHIFT (SImode, tmp1, lshft)));
2732
     emit_insn (gen_rtx_SET (SImode, operands[0],
2733
                       gen_rtx_LSHIFTRT (SImode, tmp2, rshft)));
2734
     DONE;
2735
   }
2736
 else
2737
   {
2738
     /* Make the compiler figure out some alternative mechanism.  */
2739
     FAIL;
2740
   }
2741
 
2742
 /* Emit the RTL pattern; something will match it later.  */
2743
}")
2744
 
2745
(define_expand "insv"
2746
  [(set (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "")
2747
                         (match_operand:SI 1 "const_int_operand" "")
2748
                         (match_operand:SI 2 "const_int_operand" ""))
2749
        (match_operand:SI 3 "general_operand" ""))
2750
   (clobber (reg:CC 17))]
2751
  ""
2752
  "
2753
{
2754
  if (mcore_expand_insv (operands))
2755
    {
2756
      DONE;
2757
    }
2758
  else
2759
    {
2760
      FAIL;
2761
    }
2762
}")
2763
 
2764
;;
2765
;; the xtrb[0123] instructions handily get at 8-bit fields on nice boundaries.
2766
;; but then, they do force you through r1.
2767
;;
2768
;; the combiner will build such patterns for us, so we'll make them available
2769
;; for its use.
2770
;;
2771
;; Note that we have both SIGNED and UNSIGNED versions of these...
2772
;;
2773
 
2774
;;
2775
;; These no longer worry about the clobbering of CC bit; not sure this is
2776
;; good...
2777
;;
2778
;; the SIGNED versions of these
2779
;;
2780
(define_insn ""
2781
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
2782
        (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))]
2783
  ""
2784
  "@
2785
        asri    %0,24
2786
        xtrb0   %0,%1\;sextb    %0"
2787
  [(set_attr "type" "shift")])
2788
 
2789
(define_insn ""
2790
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2791
        (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))]
2792
  ""
2793
  "xtrb1        %0,%1\;sextb    %0"
2794
  [(set_attr "type" "shift")])
2795
 
2796
(define_insn ""
2797
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2798
        (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))]
2799
  ""
2800
  "xtrb2        %0,%1\;sextb    %0"
2801
  [(set_attr "type" "shift")])
2802
 
2803
(define_insn ""
2804
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2805
        (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") (const_int 8) (const_int 0)))]
2806
  ""
2807
  "sextb        %0"
2808
  [(set_attr "type" "shift")])
2809
 
2810
;; the UNSIGNED uses of xtrb[0123]
2811
;;
2812
(define_insn ""
2813
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
2814
        (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))]
2815
  ""
2816
  "@
2817
        lsri    %0,24
2818
        xtrb0   %0,%1"
2819
  [(set_attr "type" "shift")])
2820
 
2821
(define_insn ""
2822
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2823
        (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))]
2824
  ""
2825
  "xtrb1        %0,%1"
2826
  [(set_attr "type" "shift")])
2827
 
2828
(define_insn ""
2829
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
2830
        (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))]
2831
  ""
2832
  "xtrb2        %0,%1"
2833
  [(set_attr "type" "shift")])
2834
 
2835
;; This can be peepholed if it follows a ldb ...
2836
(define_insn ""
2837
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
2838
        (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 0)))]
2839
  ""
2840
  "@
2841
        zextb   %0
2842
        xtrb3   %0,%1\;zextb    %0"
2843
  [(set_attr "type" "shift")])
2844
 
2845
 
2846
;; ------------------------------------------------------------------------
2847
;; Block move - adapted from m88k.md
2848
;; ------------------------------------------------------------------------
2849
 
2850
(define_expand "movmemsi"
2851
  [(parallel [(set (mem:BLK (match_operand:BLK 0 "" ""))
2852
                   (mem:BLK (match_operand:BLK 1 "" "")))
2853
              (use (match_operand:SI 2 "general_operand" ""))
2854
              (use (match_operand:SI 3 "immediate_operand" ""))])]
2855
  ""
2856
  "
2857
{
2858
  if (mcore_expand_block_move (operands))
2859
    DONE;
2860
  else
2861
    FAIL;
2862
}")
2863
 
2864
;; ;;; ??? These patterns are meant to be generated from expand_block_move,
2865
;; ;;; but they currently are not.
2866
;;
2867
;; (define_insn ""
2868
;;   [(set (match_operand:QI 0 "mcore_arith_reg_operand" "=r")
2869
;;      (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
2870
;;   ""
2871
;;   "ld.b      %0,%1"
2872
;;   [(set_attr "type" "load")])
2873
;;
2874
;; (define_insn ""
2875
;;   [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
2876
;;      (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
2877
;;   ""
2878
;;   "ld.h      %0,%1"
2879
;;   [(set_attr "type" "load")])
2880
;;
2881
;; (define_insn ""
2882
;;   [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2883
;;      (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
2884
;;   ""
2885
;;   "ld.w      %0,%1"
2886
;;   [(set_attr "type" "load")])
2887
;;
2888
;; (define_insn ""
2889
;;   [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
2890
;;      (match_operand:QI 1 "mcore_arith_reg_operand" "r"))]
2891
;;   ""
2892
;;   "st.b      %1,%0"
2893
;;   [(set_attr "type" "store")])
2894
;;
2895
;; (define_insn ""
2896
;;   [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
2897
;;      (match_operand:HI 1 "mcore_arith_reg_operand" "r"))]
2898
;;   ""
2899
;;   "st.h      %1,%0"
2900
;;   [(set_attr "type" "store")])
2901
;;
2902
;; (define_insn ""
2903
;;   [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
2904
;;      (match_operand:SI 1 "mcore_arith_reg_operand" "r"))]
2905
;;   ""
2906
;;   "st.w      %1,%0"
2907
;;   [(set_attr "type" "store")])
2908
 
2909
;; ------------------------------------------------------------------------
2910
;; Misc Optimizing quirks
2911
;; ------------------------------------------------------------------------
2912
 
2913
;; pair to catch constructs like:  (int *)((p+=4)-4) which happen
2914
;; in stdarg/varargs traversal. This changes a 3 insn sequence to a 2
2915
;; insn sequence. -- RBE 11/30/95
2916
(define_insn ""
2917
  [(parallel[
2918
      (set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
2919
           (match_operand:SI 1 "mcore_arith_reg_operand" "+r"))
2920
      (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])]
2921
  "GET_CODE(operands[2]) == CONST_INT"
2922
  "#"
2923
  [(set_attr "length" "4")])
2924
 
2925
(define_split
2926
  [(parallel[
2927
      (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2928
           (match_operand:SI 1 "mcore_arith_reg_operand" ""))
2929
      (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])]
2930
  "GET_CODE(operands[2]) == CONST_INT &&
2931
   operands[0] != operands[1]"
2932
  [(set (match_dup 0) (match_dup 1))
2933
   (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))])
2934
 
2935
 
2936
;;; Peepholes
2937
 
2938
; note: in the following patterns, use mcore_is_dead() to ensure that the
2939
; reg we may be trashing really is dead.  reload doesn't always mark
2940
; deaths, so mcore_is_dead() (see mcore.c) scans forward to find its death.  BRC
2941
 
2942
;;; A peephole to convert the 3 instruction sequence generated by reload
2943
;;; to load a FP-offset address into a 2 instruction sequence.
2944
;;; ??? This probably never matches anymore.
2945
(define_peephole
2946
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
2947
        (match_operand:SI 1 "const_int_operand" "J"))
2948
   (set (match_dup 0) (neg:SI (match_dup 0)))
2949
   (set (match_dup 0)
2950
        (plus:SI (match_dup 0)
2951
                 (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
2952
  "CONST_OK_FOR_J (INTVAL (operands[1]))"
2953
  "error\;mov   %0,%2\;subi     %0,%1")
2954
 
2955
;; Moves of inlinable constants are done late, so when a 'not' is generated
2956
;; it is never combined with the following 'and' to generate an 'andn' b/c
2957
;; the combiner never sees it.  use a peephole to pick up this case (happens
2958
;; mostly with bitfields)  BRC
2959
 
2960
(define_peephole
2961
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
2962
        (match_operand:SI 1 "const_int_operand" "i"))
2963
   (set (match_operand:SI 2 "mcore_arith_reg_operand" "r")
2964
        (and:SI (match_dup 2) (match_dup 0)))]
2965
  "mcore_const_trick_uses_not (INTVAL (operands[1])) &&
2966
        operands[0] != operands[2] &&
2967
        mcore_is_dead (insn, operands[0])"
2968
  "* return mcore_output_andn (insn, operands);")
2969
 
2970
; when setting or clearing just two bits, it's cheapest to use two bseti's
2971
; or bclri's.  only happens when relaxing immediates.  BRC
2972
 
2973
(define_peephole
2974
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2975
        (match_operand:SI 1 "const_int_operand" ""))
2976
   (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
2977
        (ior:SI (match_dup 2) (match_dup 0)))]
2978
  "TARGET_HARDLIT && mcore_num_ones (INTVAL (operands[1])) == 2 &&
2979
       mcore_is_dead (insn, operands[0])"
2980
  "* return mcore_output_bseti (operands[2], INTVAL (operands[1]));")
2981
 
2982
(define_peephole
2983
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2984
        (match_operand:SI 1 "const_int_operand" ""))
2985
   (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
2986
        (and:SI (match_dup 2) (match_dup 0)))]
2987
  "TARGET_HARDLIT && mcore_num_zeros (INTVAL (operands[1])) == 2 &&
2988
       mcore_is_dead (insn, operands[0])"
2989
  "* return mcore_output_bclri (operands[2], INTVAL (operands[1]));")
2990
 
2991
; change an and with a mask that has a single cleared bit into a bclri.  this
2992
; handles QI and HI mode values using the knowledge that the most significant
2993
; bits don't matter.
2994
 
2995
(define_peephole
2996
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
2997
        (match_operand:SI 1 "const_int_operand" ""))
2998
   (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
2999
        (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "")
3000
                (match_dup 0)))]
3001
  "GET_CODE (operands[3]) == SUBREG &&
3002
      GET_MODE (SUBREG_REG (operands[3])) == QImode &&
3003
      mcore_num_zeros (INTVAL (operands[1]) | 0xffffff00) == 1 &&
3004
      mcore_is_dead (insn, operands[0])"
3005
"*
3006
  if (! mcore_is_same_reg (operands[2], operands[3]))
3007
    output_asm_insn (\"mov\\t%2,%3\", operands);
3008
  return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffffff00);")
3009
 
3010
/* Do not fold these together -- mode is lost at final output phase.  */
3011
 
3012
(define_peephole
3013
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3014
        (match_operand:SI 1 "const_int_operand" ""))
3015
   (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
3016
        (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "")
3017
                (match_dup 0)))]
3018
  "GET_CODE (operands[3]) == SUBREG &&
3019
      GET_MODE (SUBREG_REG (operands[3])) == HImode &&
3020
      mcore_num_zeros (INTVAL (operands[1]) | 0xffff0000) == 1 &&
3021
      operands[2] == operands[3] &&
3022
      mcore_is_dead (insn, operands[0])"
3023
"*
3024
  if (! mcore_is_same_reg (operands[2], operands[3]))
3025
    output_asm_insn (\"mov\\t%2,%3\", operands);
3026
  return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffff0000);")
3027
 
3028
; This peephole helps when using -mwide-bitfields to widen fields so they
3029
; collapse.   This, however, has the effect that a narrower mode is not used
3030
; when desirable.
3031
;
3032
; e.g., sequences like:
3033
;
3034
;      ldw r8,(r6)
3035
;      movi r7,0x00ffffff
3036
;      and r8,r7                 r7 dead
3037
;      stw r8,(r6)                r8 dead
3038
;
3039
; get peepholed to become:
3040
;
3041
;      movi r8,0
3042
;      stb r8,(r6)              r8 dead
3043
;
3044
; Do only easy addresses that have no offset.  This peephole is also applied
3045
; to halfwords.  We need to check that the load is non-volatile before we get
3046
; rid of it.
3047
 
3048
(define_peephole
3049
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3050
        (match_operand:SI 1 "memory_operand" ""))
3051
   (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
3052
        (match_operand:SI 3 "const_int_operand" ""))
3053
   (set (match_dup 0) (and:SI (match_dup 0) (match_dup 2)))
3054
   (set (match_operand:SI 4 "memory_operand" "") (match_dup 0))]
3055
  "mcore_is_dead (insn, operands[0]) &&
3056
   ! MEM_VOLATILE_P (operands[1]) &&
3057
   mcore_is_dead (insn, operands[2]) &&
3058
   (mcore_byte_offset (INTVAL (operands[3])) > -1 ||
3059
    mcore_halfword_offset (INTVAL (operands[3])) > -1) &&
3060
   ! MEM_VOLATILE_P (operands[4]) &&
3061
   GET_CODE (XEXP (operands[4], 0)) == REG"
3062
"*
3063
{
3064
   int ofs;
3065
   enum machine_mode mode;
3066
   rtx base_reg = XEXP (operands[4], 0);
3067
 
3068
   if ((ofs = mcore_byte_offset (INTVAL (operands[3]))) > -1)
3069
      mode = QImode;
3070
   else if ((ofs = mcore_halfword_offset (INTVAL (operands[3]))) > -1)
3071
      mode = HImode;
3072
   else
3073
      gcc_unreachable ();
3074
 
3075
   if (ofs > 0)
3076
      operands[4] = gen_rtx_MEM (mode,
3077
                              gen_rtx_PLUS (SImode, base_reg, GEN_INT(ofs)));
3078
   else
3079
      operands[4] = gen_rtx_MEM (mode, base_reg);
3080
 
3081
   if (mode == QImode)
3082
      return \"movi     %0,0\\n\\tst.b  %0,%4\";
3083
 
3084
   return \"movi        %0,0\\n\\tst.h  %0,%4\";
3085
}")
3086
 
3087
; from sop11. get btsti's for (LT A 0) where A is a QI or HI value
3088
 
3089
(define_peephole
3090
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
3091
        (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))
3092
   (set (reg:CC 17)
3093
        (lt:CC (match_dup 0)
3094
            (const_int 0)))]
3095
  "mcore_is_dead (insn, operands[0])"
3096
  "btsti        %0,7")
3097
 
3098
(define_peephole
3099
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
3100
        (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))
3101
   (set (reg:CC 17)
3102
        (lt:CC (match_dup 0)
3103
            (const_int 0)))]
3104
  "mcore_is_dead (insn, operands[0])"
3105
  "btsti        %0,15")
3106
 
3107
; Pick up a tst.  This combination happens because the immediate is not
3108
; allowed to fold into one of the operands of the tst.  Does not happen
3109
; when relaxing immediates.  BRC
3110
 
3111
(define_peephole
3112
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3113
        (match_operand:SI 1 "mcore_arith_reg_operand" ""))
3114
   (set (match_dup 0)
3115
        (and:SI (match_dup 0)
3116
                (match_operand:SI 2 "mcore_literal_K_operand" "")))
3117
   (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))]
3118
  "mcore_is_dead (insn, operands[0])"
3119
  "movi %0,%2\;tst      %1,%0")
3120
 
3121
(define_peephole
3122
  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3123
        (if_then_else:SI (ne (zero_extract:SI
3124
                                (match_operand:SI 1 "mcore_arith_reg_operand" "")
3125
                                (const_int 1)
3126
                                (match_operand:SI 2 "mcore_literal_K_operand" ""))
3127
                             (const_int 0))
3128
           (match_operand:SI 3 "mcore_arith_imm_operand" "")
3129
           (match_operand:SI 4 "mcore_arith_imm_operand" "")))
3130
    (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))]
3131
  ""
3132
"*
3133
{
3134
  unsigned int op0 = REGNO (operands[0]);
3135
 
3136
  if (GET_CODE (operands[3]) == REG)
3137
    {
3138
     if (REGNO (operands[3]) == op0 && GET_CODE (operands[4]) == CONST_INT
3139
         && INTVAL (operands[4]) == 0)
3140
        return \"btsti  %1,%2\\n\\tclrf %0\";
3141
     else if (GET_CODE (operands[4]) == REG)
3142
       {
3143
        if (REGNO (operands[4]) == op0)
3144
           return \"btsti       %1,%2\\n\\tmovf %0,%3\";
3145
        else if (REGNO (operands[3]) == op0)
3146
           return \"btsti       %1,%2\\n\\tmovt %0,%4\";
3147
       }
3148
 
3149
     gcc_unreachable ();
3150
    }
3151
  else if (GET_CODE (operands[3]) == CONST_INT
3152
           && INTVAL (operands[3]) == 0
3153
           && GET_CODE (operands[4]) == REG)
3154
     return \"btsti     %1,%2\\n\\tclrt %0\";
3155
 
3156
  gcc_unreachable ();
3157
}")
3158
 
3159
; experimental - do the constant folding ourselves.  note that this isn't
3160
;   re-applied like we'd really want.  i.e., four ands collapse into two
3161
;   instead of one.  this is because peepholes are applied as a sliding
3162
;   window.  the peephole does not generate new rtl's, but instead slides
3163
;   across the rtl's generating machine instructions.  it would be nice
3164
;   if the peephole optimizer is changed to re-apply patterns and to gen
3165
;   new rtl's.  this is more flexible.  the pattern below helps when we're
3166
;   not using relaxed immediates.   BRC
3167
 
3168
;(define_peephole
3169
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
3170
;        (match_operand:SI 1 "const_int_operand" ""))
3171
;   (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
3172
;          (and:SI (match_dup 2) (match_dup 0)))
3173
;   (set (match_dup 0)
3174
;        (match_operand:SI 3 "const_int_operand" ""))
3175
;   (set (match_dup 2)
3176
;           (and:SI (match_dup 2) (match_dup 0)))]
3177
;  "!TARGET_RELAX_IMM && mcore_is_dead (insn, operands[0]) &&
3178
;       mcore_const_ok_for_inline (INTVAL (operands[1]) & INTVAL (operands[3]))"
3179
;  "*
3180
;{
3181
;  rtx out_operands[2];
3182
;  out_operands[0] = operands[0];
3183
;  out_operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[3]));
3184
;
3185
;  output_inline_const (SImode, out_operands);
3186
;
3187
;  output_asm_insn (\"and       %2,%0\", operands);
3188
;
3189
;  return \"\";
3190
;}")
3191
 
3192
; BRC: for inlining get rid of extra test - experimental
3193
;(define_peephole
3194
;  [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
3195
;          (ne:SI (reg:CC 17) (const_int 0)))
3196
;   (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))
3197
;   (set (pc)
3198
;       (if_then_else (eq (reg:CC 17) (const_int 0))
3199
;         (label_ref (match_operand 1 "" ""))
3200
;         (pc)))]
3201
;   ""
3202
;   "*
3203
;{
3204
;  if (get_attr_length (insn) == 10)
3205
;    {
3206
;      output_asm_insn (\"bt    2f\\n\\tjmpi    [1f]\", operands);
3207
;      output_asm_insn (\".align        2\\n1:\", operands);
3208
;      output_asm_insn (\".long %1\\n2:\", operands);
3209
;      return \"\";
3210
;    }
3211
;  return \"bf  %l1\";
3212
;}")
3213
 
3214
 
3215
;;; Special patterns for dealing with the constant pool.
3216
 
3217
;;; 4 byte integer in line.
3218
 
3219
(define_insn "consttable_4"
3220
 [(unspec_volatile [(match_operand:SI 0 "general_operand" "=g")] 0)]
3221
 ""
3222
 "*
3223
{
3224
  assemble_integer (operands[0], 4, BITS_PER_WORD, 1);
3225
  return \"\";
3226
}"
3227
 [(set_attr "length" "4")])
3228
 
3229
;;; align to a four byte boundary.
3230
 
3231
(define_insn "align_4"
3232
 [(unspec_volatile [(const_int 0)] 1)]
3233
 ""
3234
 ".align 2")
3235
 
3236
;;; Handle extra constant pool entries created during final pass.
3237
 
3238
(define_insn "consttable_end"
3239
  [(unspec_volatile [(const_int 0)] 2)]
3240
  ""
3241
  "* return mcore_output_jump_label_table ();")
3242
 
3243
;;
3244
;; Stack allocation -- in particular, for alloca().
3245
;; this is *not* what we use for entry into functions.
3246
;;
3247
;; This is how we allocate stack space.  If we are allocating a
3248
;; constant amount of space and we know it is less than 4096
3249
;; bytes, we need do nothing.
3250
;;
3251
;; If it is more than 4096 bytes, we need to probe the stack
3252
;; periodically.
3253
;;
3254
;; operands[1], the distance is a POSITIVE number indicating that we
3255
;; are allocating stack space
3256
;;
3257
(define_expand "allocate_stack"
3258
  [(set (reg:SI 0)
3259
        (plus:SI (reg:SI 0)
3260
                 (match_operand:SI 1 "general_operand" "")))
3261
   (set (match_operand:SI 0 "register_operand" "=r")
3262
        (match_dup 2))]
3263
  ""
3264
  "
3265
{
3266
  /* If he wants no probing, just do it for him.  */
3267
  if (mcore_stack_increment == 0)
3268
    {
3269
      emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,operands[1]));
3270
;;      emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
3271
      DONE;
3272
    }
3273
 
3274
  /* For small constant growth, we unroll the code.  */
3275
  if (GET_CODE (operands[1]) == CONST_INT
3276
      && INTVAL (operands[1]) < 8 * STACK_UNITS_MAXSTEP)
3277
    {
3278
      int left = INTVAL(operands[1]);
3279
 
3280
      /* If it's a long way, get close enough for a last shot.  */
3281
      if (left >= STACK_UNITS_MAXSTEP)
3282
        {
3283
          rtx tmp = gen_reg_rtx (Pmode);
3284
          emit_insn (gen_movsi (tmp, GEN_INT (STACK_UNITS_MAXSTEP)));
3285
          do
3286
            {
3287
              rtx memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
3288
 
3289
              MEM_VOLATILE_P (memref) = 1;
3290
              emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
3291
              emit_insn (gen_movsi (memref, stack_pointer_rtx));
3292
              left -= STACK_UNITS_MAXSTEP;
3293
            }
3294
          while (left > STACK_UNITS_MAXSTEP);
3295
        }
3296
      /* Perform the final adjustment.  */
3297
      emit_insn (gen_addsi3 (stack_pointer_rtx,stack_pointer_rtx,GEN_INT(-left)));
3298
;;      emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
3299
      DONE;
3300
    }
3301
  else
3302
    {
3303
      rtx out_label = 0;
3304
      rtx loop_label = gen_label_rtx ();
3305
      rtx step = gen_reg_rtx (Pmode);
3306
      rtx tmp = gen_reg_rtx (Pmode);
3307
      rtx memref;
3308
 
3309
#if 1
3310
      emit_insn (gen_movsi (tmp, operands[1]));
3311
      emit_insn (gen_movsi (step, GEN_INT(STACK_UNITS_MAXSTEP)));
3312
 
3313
      if (GET_CODE (operands[1]) != CONST_INT)
3314
        {
3315
          out_label = gen_label_rtx ();
3316
          emit_insn (gen_cmpsi (step, tmp));            /* quick out */
3317
          emit_jump_insn (gen_bgeu (out_label));
3318
        }
3319
 
3320
      /* Run a loop that steps it incrementally.  */
3321
      emit_label (loop_label);
3322
 
3323
      /* Extend a step, probe, and adjust remaining count.  */
3324
      emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, step));
3325
      memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
3326
      MEM_VOLATILE_P (memref) = 1;
3327
      emit_insn(gen_movsi(memref, stack_pointer_rtx));
3328
      emit_insn(gen_subsi3(tmp, tmp, step));
3329
 
3330
      /* Loop condition -- going back up.  */
3331
      emit_insn (gen_cmpsi (step, tmp));
3332
      emit_jump_insn (gen_bltu (loop_label));
3333
 
3334
      if (out_label)
3335
        emit_label (out_label);
3336
 
3337
      /* Bump the residual.  */
3338
      emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
3339
;;      emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
3340
      DONE;
3341
#else
3342
      /* simple one-shot -- ensure register and do a subtract.
3343
       * This does NOT comply with the ABI.  */
3344
      emit_insn (gen_movsi (tmp, operands[1]));
3345
      emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
3346
;;      emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
3347
      DONE;
3348
#endif
3349
    }
3350
}")

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.