1 |
13 |
serginhofr |
;; Machine description for RISC-V atomic operations.
|
2 |
|
|
;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
|
3 |
|
|
;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
|
4 |
|
|
;; Based on MIPS target for GNU compiler.
|
5 |
|
|
|
6 |
|
|
;; This file is part of GCC.
|
7 |
|
|
|
8 |
|
|
;; GCC is free software; you can redistribute it and/or modify
|
9 |
|
|
;; it under the terms of the GNU General Public License as published by
|
10 |
|
|
;; the Free Software Foundation; either version 3, or (at your option)
|
11 |
|
|
;; any later version.
|
12 |
|
|
|
13 |
|
|
;; GCC is distributed in the hope that it will be useful,
|
14 |
|
|
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
16 |
|
|
;; GNU General Public License for more details.
|
17 |
|
|
|
18 |
|
|
;; You should have received a copy of the GNU General Public License
|
19 |
|
|
;; along with GCC; see the file COPYING3. If not see
|
20 |
|
|
;; .
|
21 |
|
|
|
22 |
|
|
(define_c_enum "unspec" [
|
23 |
|
|
UNSPEC_COMPARE_AND_SWAP
|
24 |
|
|
UNSPEC_SYNC_OLD_OP
|
25 |
|
|
UNSPEC_SYNC_EXCHANGE
|
26 |
|
|
UNSPEC_ATOMIC_STORE
|
27 |
|
|
UNSPEC_MEMORY_BARRIER
|
28 |
|
|
])
|
29 |
|
|
|
30 |
|
|
(define_code_iterator any_atomic [plus ior xor and])
|
31 |
|
|
(define_code_attr atomic_optab
|
32 |
|
|
[(plus "add") (ior "or") (xor "xor") (and "and")])
|
33 |
|
|
|
34 |
|
|
;; Memory barriers.
|
35 |
|
|
|
36 |
|
|
(define_expand "mem_thread_fence"
|
37 |
|
|
[(match_operand:SI 0 "const_int_operand" "")] ;; model
|
38 |
|
|
""
|
39 |
|
|
{
|
40 |
|
|
if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
|
41 |
|
|
{
|
42 |
|
|
rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
|
43 |
|
|
MEM_VOLATILE_P (mem) = 1;
|
44 |
|
|
emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
|
45 |
|
|
}
|
46 |
|
|
DONE;
|
47 |
|
|
})
|
48 |
|
|
|
49 |
|
|
(define_insn "mem_thread_fence_1"
|
50 |
|
|
[(set (match_operand:BLK 0 "" "")
|
51 |
|
|
(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
|
52 |
|
|
(match_operand:SI 1 "const_int_operand" "")] ;; model
|
53 |
|
|
""
|
54 |
|
|
{
|
55 |
|
|
switch (INTVAL (operands[1]))
|
56 |
|
|
{
|
57 |
|
|
case MEMMODEL_SEQ_CST:
|
58 |
|
|
case MEMMODEL_ACQ_REL:
|
59 |
|
|
return "fence rw,rw";
|
60 |
|
|
case MEMMODEL_ACQUIRE:
|
61 |
|
|
case MEMMODEL_CONSUME:
|
62 |
|
|
return "fence r,rw";
|
63 |
|
|
case MEMMODEL_RELEASE:
|
64 |
|
|
return "fence rw,w";
|
65 |
|
|
default:
|
66 |
|
|
gcc_unreachable();
|
67 |
|
|
}
|
68 |
|
|
})
|
69 |
|
|
|
70 |
|
|
;; Atomic memory operations.
|
71 |
|
|
|
72 |
|
|
;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
|
73 |
|
|
(define_insn "atomic_store"
|
74 |
|
|
[(set (match_operand:GPR 0 "memory_operand" "=A")
|
75 |
|
|
(unspec_volatile:GPR
|
76 |
|
|
[(match_operand:GPR 1 "reg_or_0_operand" "rJ")
|
77 |
|
|
(match_operand:SI 2 "const_int_operand")] ;; model
|
78 |
|
|
UNSPEC_ATOMIC_STORE))]
|
79 |
|
|
"TARGET_ATOMIC"
|
80 |
|
|
"amoswap.%A2 zero,%z1,%0")
|
81 |
|
|
|
82 |
|
|
(define_insn "atomic_"
|
83 |
|
|
[(set (match_operand:GPR 0 "memory_operand" "+A")
|
84 |
|
|
(unspec_volatile:GPR
|
85 |
|
|
[(any_atomic:GPR (match_dup 0)
|
86 |
|
|
(match_operand:GPR 1 "reg_or_0_operand" "rJ"))
|
87 |
|
|
(match_operand:SI 2 "const_int_operand")] ;; model
|
88 |
|
|
UNSPEC_SYNC_OLD_OP))]
|
89 |
|
|
"TARGET_ATOMIC"
|
90 |
|
|
"amo.%A2 zero,%z1,%0")
|
91 |
|
|
|
92 |
|
|
(define_insn "atomic_fetch_"
|
93 |
|
|
[(set (match_operand:GPR 0 "register_operand" "=&r")
|
94 |
|
|
(match_operand:GPR 1 "memory_operand" "+A"))
|
95 |
|
|
(set (match_dup 1)
|
96 |
|
|
(unspec_volatile:GPR
|
97 |
|
|
[(any_atomic:GPR (match_dup 1)
|
98 |
|
|
(match_operand:GPR 2 "reg_or_0_operand" "rJ"))
|
99 |
|
|
(match_operand:SI 3 "const_int_operand")] ;; model
|
100 |
|
|
UNSPEC_SYNC_OLD_OP))]
|
101 |
|
|
"TARGET_ATOMIC"
|
102 |
|
|
"amo.%A3 %0,%z2,%1")
|
103 |
|
|
|
104 |
|
|
(define_insn "atomic_exchange"
|
105 |
|
|
[(set (match_operand:GPR 0 "register_operand" "=&r")
|
106 |
|
|
(unspec_volatile:GPR
|
107 |
|
|
[(match_operand:GPR 1 "memory_operand" "+A")
|
108 |
|
|
(match_operand:SI 3 "const_int_operand")] ;; model
|
109 |
|
|
UNSPEC_SYNC_EXCHANGE))
|
110 |
|
|
(set (match_dup 1)
|
111 |
|
|
(match_operand:GPR 2 "register_operand" "0"))]
|
112 |
|
|
"TARGET_ATOMIC"
|
113 |
|
|
"amoswap.%A3 %0,%z2,%1")
|
114 |
|
|
|
115 |
|
|
(define_insn "atomic_cas_value_strong"
|
116 |
|
|
[(set (match_operand:GPR 0 "register_operand" "=&r")
|
117 |
|
|
(match_operand:GPR 1 "memory_operand" "+A"))
|
118 |
|
|
(set (match_dup 1)
|
119 |
|
|
(unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
|
120 |
|
|
(match_operand:GPR 3 "reg_or_0_operand" "rJ")
|
121 |
|
|
(match_operand:SI 4 "const_int_operand") ;; mod_s
|
122 |
|
|
(match_operand:SI 5 "const_int_operand")] ;; mod_f
|
123 |
|
|
UNSPEC_COMPARE_AND_SWAP))
|
124 |
|
|
(clobber (match_scratch:GPR 6 "=&r"))]
|
125 |
|
|
"TARGET_ATOMIC"
|
126 |
|
|
"1: lr.%A5 %0,%1; bne %0,%z2,1f; sc.%A4 %6,%z3,%1; bnez %6,1b; 1:"
|
127 |
|
|
[(set (attr "length") (const_int 16))])
|
128 |
|
|
|
129 |
|
|
(define_expand "atomic_compare_and_swap"
|
130 |
|
|
[(match_operand:SI 0 "register_operand" "") ;; bool output
|
131 |
|
|
(match_operand:GPR 1 "register_operand" "") ;; val output
|
132 |
|
|
(match_operand:GPR 2 "memory_operand" "") ;; memory
|
133 |
|
|
(match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
|
134 |
|
|
(match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
|
135 |
|
|
(match_operand:SI 5 "const_int_operand" "") ;; is_weak
|
136 |
|
|
(match_operand:SI 6 "const_int_operand" "") ;; mod_s
|
137 |
|
|
(match_operand:SI 7 "const_int_operand" "")] ;; mod_f
|
138 |
|
|
"TARGET_ATOMIC"
|
139 |
|
|
{
|
140 |
|
|
emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2],
|
141 |
|
|
operands[3], operands[4],
|
142 |
|
|
operands[6], operands[7]));
|
143 |
|
|
|
144 |
|
|
rtx compare = operands[1];
|
145 |
|
|
if (operands[3] != const0_rtx)
|
146 |
|
|
{
|
147 |
|
|
rtx difference = gen_rtx_MINUS (mode, operands[1], operands[3]);
|
148 |
|
|
compare = gen_reg_rtx (mode);
|
149 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, compare, difference));
|
150 |
|
|
}
|
151 |
|
|
|
152 |
|
|
rtx eq = gen_rtx_EQ (mode, compare, const0_rtx);
|
153 |
|
|
rtx result = gen_reg_rtx (mode);
|
154 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, result, eq));
|
155 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_lowpart (SImode, result)));
|
156 |
|
|
DONE;
|
157 |
|
|
})
|
158 |
|
|
|
159 |
|
|
(define_expand "atomic_test_and_set"
|
160 |
|
|
[(match_operand:QI 0 "register_operand" "") ;; bool output
|
161 |
|
|
(match_operand:QI 1 "memory_operand" "+A") ;; memory
|
162 |
|
|
(match_operand:SI 2 "const_int_operand" "")] ;; model
|
163 |
|
|
"TARGET_ATOMIC"
|
164 |
|
|
{
|
165 |
|
|
/* We have no QImode atomics, so use the address LSBs to form a mask,
|
166 |
|
|
then use an aligned SImode atomic. */
|
167 |
|
|
rtx result = operands[0];
|
168 |
|
|
rtx mem = operands[1];
|
169 |
|
|
rtx model = operands[2];
|
170 |
|
|
rtx addr = force_reg (Pmode, XEXP (mem, 0));
|
171 |
|
|
|
172 |
|
|
rtx aligned_addr = gen_reg_rtx (Pmode);
|
173 |
|
|
emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
|
174 |
|
|
|
175 |
|
|
rtx aligned_mem = change_address (mem, SImode, aligned_addr);
|
176 |
|
|
set_mem_alias_set (aligned_mem, 0);
|
177 |
|
|
|
178 |
|
|
rtx offset = gen_reg_rtx (SImode);
|
179 |
|
|
emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
|
180 |
|
|
GEN_INT (3)));
|
181 |
|
|
|
182 |
|
|
rtx tmp = gen_reg_rtx (SImode);
|
183 |
|
|
emit_move_insn (tmp, GEN_INT (1));
|
184 |
|
|
|
185 |
|
|
rtx shmt = gen_reg_rtx (SImode);
|
186 |
|
|
emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
|
187 |
|
|
|
188 |
|
|
rtx word = gen_reg_rtx (SImode);
|
189 |
|
|
emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
|
190 |
|
|
|
191 |
|
|
tmp = gen_reg_rtx (SImode);
|
192 |
|
|
emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
|
193 |
|
|
|
194 |
|
|
emit_move_insn (gen_lowpart (SImode, result),
|
195 |
|
|
gen_rtx_LSHIFTRT (SImode, tmp,
|
196 |
|
|
gen_lowpart (SImode, shmt)));
|
197 |
|
|
DONE;
|
198 |
|
|
})
|