1 |
63 |
zero_gravi |
// #################################################################################################
|
2 |
|
|
// # << NEORV32 - Intrinsics + Emulation Functions for the B CPU extensions >> #
|
3 |
|
|
// # ********************************************************************************************* #
|
4 |
|
|
// # The intrinsics provided by this library allow to use the hardware bit manipulation unit of #
|
5 |
|
|
// # the RISC-V B CPU extension without the need for B support by the compiler. #
|
6 |
|
|
// # ********************************************************************************************* #
|
7 |
|
|
// # BSD 3-Clause License #
|
8 |
|
|
// # #
|
9 |
|
|
// # Copyright (c) 2021, Stephan Nolting. All rights reserved. #
|
10 |
|
|
// # #
|
11 |
|
|
// # Redistribution and use in source and binary forms, with or without modification, are #
|
12 |
|
|
// # permitted provided that the following conditions are met: #
|
13 |
|
|
// # #
|
14 |
|
|
// # 1. Redistributions of source code must retain the above copyright notice, this list of #
|
15 |
|
|
// # conditions and the following disclaimer. #
|
16 |
|
|
// # #
|
17 |
|
|
// # 2. Redistributions in binary form must reproduce the above copyright notice, this list of #
|
18 |
|
|
// # conditions and the following disclaimer in the documentation and/or other materials #
|
19 |
|
|
// # provided with the distribution. #
|
20 |
|
|
// # #
|
21 |
|
|
// # 3. Neither the name of the copyright holder nor the names of its contributors may be used to #
|
22 |
|
|
// # endorse or promote products derived from this software without specific prior written #
|
23 |
|
|
// # permission. #
|
24 |
|
|
// # #
|
25 |
|
|
// # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS #
|
26 |
|
|
// # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF #
|
27 |
|
|
// # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
|
28 |
|
|
// # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #
|
29 |
|
|
// # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE #
|
30 |
|
|
// # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED #
|
31 |
|
|
// # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
|
32 |
|
|
// # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED #
|
33 |
|
|
// # OF THE POSSIBILITY OF SUCH DAMAGE. #
|
34 |
|
|
// # ********************************************************************************************* #
|
35 |
|
|
// # The NEORV32 Processor - https://github.com/stnolting/neorv32 (c) Stephan Nolting #
|
36 |
|
|
// #################################################################################################
|
37 |
|
|
|
38 |
|
|
|
39 |
|
|
/**********************************************************************//**
|
40 |
|
|
* @file bitmanip_test/neorv32_b_extension_intrinsics.h
|
41 |
|
|
* @author Stephan Nolting
|
42 |
|
|
* @brief "Intrinsic" library for the NEORV32 bit manipulation Zbb extension.
|
43 |
|
|
* Also provides emulation functions for all intrinsics (functionality re-built in pure software).
|
44 |
|
|
*
|
45 |
|
|
* @warning This library is just a temporary fall-back until the Zbb extensions are supported by the upstream RISC-V GCC port.
|
46 |
|
|
**************************************************************************/
|
47 |
|
|
|
48 |
|
|
#ifndef neorv32_b_extension_intrinsics_h
|
49 |
|
|
#define neorv32_b_extension_intrinsics_h
|
50 |
|
|
|
51 |
|
|
|
52 |
|
|
// ################################################################################################
|
53 |
|
|
// "Intrinsics"
|
54 |
|
|
// ################################################################################################
|
55 |
|
|
|
56 |
|
|
|
57 |
|
|
// ---------------------------------------------
|
58 |
|
|
// Zbb - Base instructions
|
59 |
|
|
// ---------------------------------------------
|
60 |
|
|
|
61 |
|
|
/**********************************************************************//**
|
62 |
|
|
* Intrinsic: Bit manipulation CLZ (count leading zeros) [B.Zbb]
|
63 |
|
|
*
|
64 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
65 |
|
|
* @return Number of leading zeros in source operand.
|
66 |
|
|
**************************************************************************/
|
67 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_clz(uint32_t rs1) {
|
68 |
|
|
|
69 |
|
|
register uint32_t result __asm__ ("a0");
|
70 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
71 |
|
|
|
72 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
73 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
74 |
|
|
|
75 |
|
|
// clz a0, a0
|
76 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0110000, 0b00000, a0, 0b001, a0, 0b0010011);
|
77 |
|
|
|
78 |
|
|
return result;
|
79 |
|
|
}
|
80 |
|
|
|
81 |
|
|
|
82 |
|
|
/**********************************************************************//**
|
83 |
|
|
* Intrinsic: Bit manipulation CTZ (count trailing zeros) [B.Zbb]
|
84 |
|
|
*
|
85 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
86 |
|
|
* @return Number of trailing zeros in source operand.
|
87 |
|
|
**************************************************************************/
|
88 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_ctz(uint32_t rs1) {
|
89 |
|
|
|
90 |
|
|
register uint32_t result __asm__ ("a0");
|
91 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
92 |
|
|
|
93 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
94 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
95 |
|
|
|
96 |
|
|
// ctz a0, a0
|
97 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0110000, 0b00001, a0, 0b001, a0, 0b0010011);
|
98 |
|
|
|
99 |
|
|
return result;
|
100 |
|
|
}
|
101 |
|
|
|
102 |
|
|
|
103 |
|
|
/**********************************************************************//**
|
104 |
|
|
* Intrinsic: Bit manipulation CPOP (count set bits) [B.Zbb]
|
105 |
|
|
*
|
106 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
107 |
|
|
* @return Number of set bits in source operand.
|
108 |
|
|
**************************************************************************/
|
109 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_cpop(uint32_t rs1) {
|
110 |
|
|
|
111 |
|
|
register uint32_t result __asm__ ("a0");
|
112 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
113 |
|
|
|
114 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
115 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
116 |
|
|
|
117 |
|
|
// cpop a0, a0
|
118 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0110000, 0b00010, a0, 0b001, a0, 0b0010011);
|
119 |
|
|
|
120 |
|
|
return result;
|
121 |
|
|
}
|
122 |
|
|
|
123 |
|
|
|
124 |
|
|
/**********************************************************************//**
|
125 |
|
|
* Intrinsic: Bit manipulation SEXT.B (sign-extend byte) [B.Zbb]
|
126 |
|
|
*
|
127 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
128 |
|
|
* @return Sign extended byte (operand(7:0)).
|
129 |
|
|
**************************************************************************/
|
130 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_sextb(uint32_t rs1) {
|
131 |
|
|
|
132 |
|
|
register uint32_t result __asm__ ("a0");
|
133 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
134 |
|
|
|
135 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
136 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
137 |
|
|
|
138 |
|
|
// sext.b a0, a0
|
139 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0110000, 0b00100, a0, 0b001, a0, 0b0010011);
|
140 |
|
|
|
141 |
|
|
return result;
|
142 |
|
|
}
|
143 |
|
|
|
144 |
|
|
|
145 |
|
|
/**********************************************************************//**
|
146 |
|
|
* Intrinsic: Bit manipulation SEXT.H (sign-extend half-word) [B.Zbb]
|
147 |
|
|
*
|
148 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
149 |
|
|
* @return Sign-extended half-word (operand(15:0)).
|
150 |
|
|
**************************************************************************/
|
151 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_sexth(uint32_t rs1) {
|
152 |
|
|
|
153 |
|
|
register uint32_t result __asm__ ("a0");
|
154 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
155 |
|
|
|
156 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
157 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
158 |
|
|
|
159 |
|
|
// sext.h a0, a0
|
160 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0110000, 0b00101, a0, 0b001, a0, 0b0010011);
|
161 |
|
|
|
162 |
|
|
return result;
|
163 |
|
|
}
|
164 |
|
|
|
165 |
|
|
|
166 |
|
|
/**********************************************************************//**
|
167 |
|
|
* Intrinsic: Bit manipulation ZEXT.H (zero-extend half-word) [B.Zbb]
|
168 |
|
|
*
|
169 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
170 |
|
|
* @return Zero-extended half-word (operand(15:0)).
|
171 |
|
|
**************************************************************************/
|
172 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_zexth(uint32_t rs1) {
|
173 |
|
|
|
174 |
|
|
register uint32_t result __asm__ ("a0");
|
175 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
176 |
|
|
|
177 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
178 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
179 |
|
|
|
180 |
|
|
// sext.h a0, a0
|
181 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0000100, 0b00000, a0, 0b100, a0, 0b0110011);
|
182 |
|
|
|
183 |
|
|
return result;
|
184 |
|
|
}
|
185 |
|
|
|
186 |
|
|
|
187 |
|
|
/**********************************************************************//**
|
188 |
|
|
* Intrinsic: Bit manipulation MIN (select signed minimum) [B.Zbb]
|
189 |
|
|
*
|
190 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
191 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
192 |
|
|
* @return Signed minimum.
|
193 |
|
|
**************************************************************************/
|
194 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_min(uint32_t rs1, uint32_t rs2) {
|
195 |
|
|
|
196 |
|
|
register uint32_t result __asm__ ("a0");
|
197 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
198 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
199 |
|
|
|
200 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
201 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
202 |
|
|
|
203 |
|
|
// min a0, a0, a1
|
204 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0000101, a1, a0, 0b100, a0, 0b0110011);
|
205 |
|
|
|
206 |
|
|
return result;
|
207 |
|
|
}
|
208 |
|
|
|
209 |
|
|
|
210 |
|
|
/**********************************************************************//**
|
211 |
|
|
* Intrinsic: Bit manipulation MINU (select unsigned minimum) [B.Zbb]
|
212 |
|
|
*
|
213 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
214 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
215 |
|
|
* @return Unsigned minimum.
|
216 |
|
|
**************************************************************************/
|
217 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_minu(uint32_t rs1, uint32_t rs2) {
|
218 |
|
|
|
219 |
|
|
register uint32_t result __asm__ ("a0");
|
220 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
221 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
222 |
|
|
|
223 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
224 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
225 |
|
|
|
226 |
|
|
// minu a0, a0, a1
|
227 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0000101, a1, a0, 0b101, a0, 0b0110011);
|
228 |
|
|
|
229 |
|
|
return result;
|
230 |
|
|
}
|
231 |
|
|
|
232 |
|
|
|
233 |
|
|
/**********************************************************************//**
|
234 |
|
|
* Intrinsic: Bit manipulation MAX (select signed maximum) [B.Zbb]
|
235 |
|
|
*
|
236 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
237 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
238 |
|
|
* @return Signed maximum.
|
239 |
|
|
**************************************************************************/
|
240 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_max(uint32_t rs1, uint32_t rs2) {
|
241 |
|
|
|
242 |
|
|
register uint32_t result __asm__ ("a0");
|
243 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
244 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
245 |
|
|
|
246 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
247 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
248 |
|
|
|
249 |
|
|
// max a0, a0, a1
|
250 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0000101, a1, a0, 0b110, a0, 0b0110011);
|
251 |
|
|
|
252 |
|
|
return result;
|
253 |
|
|
}
|
254 |
|
|
|
255 |
|
|
|
256 |
|
|
/**********************************************************************//**
|
257 |
|
|
* Intrinsic: Bit manipulation MAXU (select unsigned maximum) [B.Zbb]
|
258 |
|
|
*
|
259 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
260 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
261 |
|
|
* @return Unsigned maximum.
|
262 |
|
|
**************************************************************************/
|
263 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_maxu(uint32_t rs1, uint32_t rs2) {
|
264 |
|
|
|
265 |
|
|
register uint32_t result __asm__ ("a0");
|
266 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
267 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
268 |
|
|
|
269 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
270 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
271 |
|
|
|
272 |
|
|
// maxu a0, a0, a1
|
273 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0000101, a1, a0, 0b111, a0, 0b0110011);
|
274 |
|
|
|
275 |
|
|
return result;
|
276 |
|
|
}
|
277 |
|
|
|
278 |
|
|
|
279 |
|
|
/**********************************************************************//**
|
280 |
|
|
* Intrinsic: Bit manipulation ANDN (logical and-negate) [B.Zbb]
|
281 |
|
|
*
|
282 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
283 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
284 |
|
|
* @return Operand 1 AND NOT operand 2.
|
285 |
|
|
**************************************************************************/
|
286 |
|
|
inline inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_andn(uint32_t rs1, uint32_t rs2) {
|
287 |
|
|
|
288 |
|
|
register uint32_t result __asm__ ("a0");
|
289 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
290 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
291 |
|
|
|
292 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
293 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
294 |
|
|
|
295 |
|
|
// andn a0, a0, a1
|
296 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0100000, a1, a0, 0b111, a0, 0b0110011);
|
297 |
|
|
|
298 |
|
|
return result;
|
299 |
|
|
}
|
300 |
|
|
|
301 |
|
|
|
302 |
|
|
/**********************************************************************//**
|
303 |
|
|
* Intrinsic: Bit manipulation ORN (logical or-negate) [B.Zbb]
|
304 |
|
|
*
|
305 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
306 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
307 |
|
|
* @return Operand 1 OR NOT operand 2.
|
308 |
|
|
**************************************************************************/
|
309 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_orn(uint32_t rs1, uint32_t rs2) {
|
310 |
|
|
|
311 |
|
|
register uint32_t result __asm__ ("a0");
|
312 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
313 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
314 |
|
|
|
315 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
316 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
317 |
|
|
|
318 |
|
|
// orn a0, a0, a1
|
319 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0100000, a1, a0, 0b110, a0, 0b0110011);
|
320 |
|
|
|
321 |
|
|
return result;
|
322 |
|
|
}
|
323 |
|
|
|
324 |
|
|
|
325 |
|
|
/**********************************************************************//**
|
326 |
|
|
* Intrinsic: Bit manipulation XNOR (logical xor-negate) [B.Zbb]
|
327 |
|
|
*
|
328 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
329 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
330 |
|
|
* @return Operand 1 XOR NOT operand 2.
|
331 |
|
|
**************************************************************************/
|
332 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_xnor(uint32_t rs1, uint32_t rs2) {
|
333 |
|
|
|
334 |
|
|
register uint32_t result __asm__ ("a0");
|
335 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
336 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
337 |
|
|
|
338 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
339 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
340 |
|
|
|
341 |
|
|
// xnor a0, a0, a1
|
342 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0100000, a1, a0, 0b100, a0, 0b0110011);
|
343 |
|
|
|
344 |
|
|
return result;
|
345 |
|
|
}
|
346 |
|
|
|
347 |
|
|
|
348 |
|
|
/**********************************************************************//**
|
349 |
|
|
* Intrinsic: Bit manipulation ROL (rotate-left) [B.Zbb]
|
350 |
|
|
*
|
351 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
352 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
353 |
|
|
* @return Operand 1 rotated left by operand_2(4:0) positions.
|
354 |
|
|
**************************************************************************/
|
355 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_rol(uint32_t rs1, uint32_t rs2) {
|
356 |
|
|
|
357 |
|
|
register uint32_t result __asm__ ("a0");
|
358 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
359 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
360 |
|
|
|
361 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
362 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
363 |
|
|
|
364 |
|
|
// rol a0, a0, a1
|
365 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0110000, a1, a0, 0b001, a0, 0b0110011);
|
366 |
|
|
|
367 |
|
|
return result;
|
368 |
|
|
}
|
369 |
|
|
|
370 |
|
|
|
371 |
|
|
/**********************************************************************//**
|
372 |
|
|
* Intrinsic: Bit manipulation ROR (rotate-right) [B.Zbb]
|
373 |
|
|
*
|
374 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
375 |
|
|
* @param[in] rs2 Source operand 2 (a0).
|
376 |
|
|
* @return Operand 1 rotated right by operand_2(4:0) positions.
|
377 |
|
|
**************************************************************************/
|
378 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_ror(uint32_t rs1, uint32_t rs2) {
|
379 |
|
|
|
380 |
|
|
register uint32_t result __asm__ ("a0");
|
381 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
382 |
|
|
register uint32_t tmp_b __asm__ ("a1") = rs2;
|
383 |
|
|
|
384 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
385 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a), [input_j] "r" (tmp_b));
|
386 |
|
|
|
387 |
|
|
// ror a0, a0, a1
|
388 |
|
|
CUSTOM_INSTR_R2_TYPE(0b0110000, a1, a0, 0b101, a0, 0b0110011);
|
389 |
|
|
|
390 |
|
|
return result;
|
391 |
|
|
}
|
392 |
|
|
|
393 |
|
|
|
394 |
|
|
/**********************************************************************//**
|
395 |
|
|
* Intrinsic: Bit manipulation RORI (rotate-right) by 20 positions. [B.Zbb]
|
396 |
|
|
* @warning Fixed shift amount (20) for now.
|
397 |
|
|
*
|
398 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
399 |
|
|
* @return Operand 1 rotated right by 20 positions.
|
400 |
|
|
**************************************************************************/
|
401 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_rori20(uint32_t rs1) {
|
402 |
|
|
|
403 |
|
|
register uint32_t result __asm__ ("a0");
|
404 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
405 |
|
|
|
406 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
407 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
408 |
|
|
|
409 |
|
|
// rori a0, a0, 20
|
410 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0110000, 0b10100, a0, 0b101, a0, 0b0010011);
|
411 |
|
|
|
412 |
|
|
return result;
|
413 |
|
|
}
|
414 |
|
|
|
415 |
|
|
|
416 |
|
|
/**********************************************************************//**
|
417 |
|
|
* Intrinsic: Bit manipulation ORC.B (or-combine byte) [B.Zbb]
|
418 |
|
|
*
|
419 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
420 |
|
|
* @return OR-combined bytes of operand 1.
|
421 |
|
|
**************************************************************************/
|
422 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_orcb(uint32_t rs1) {
|
423 |
|
|
|
424 |
|
|
register uint32_t result __asm__ ("a0");
|
425 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
426 |
|
|
|
427 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
428 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
429 |
|
|
|
430 |
|
|
// gorci a0, a0, 7 (pseudo-instruction: orc.b a0, a0)
|
431 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0010100, 0b00111, a0, 0b101, a0, 0b0010011);
|
432 |
|
|
|
433 |
|
|
return result;
|
434 |
|
|
}
|
435 |
|
|
|
436 |
|
|
|
437 |
|
|
/**********************************************************************//**
|
438 |
|
|
* Intrinsic: Bit manipulation REV8 (byte-swap) [B.Zbb]
|
439 |
|
|
*
|
440 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
441 |
|
|
* @return Byte swap of operand 1
|
442 |
|
|
**************************************************************************/
|
443 |
|
|
inline uint32_t __attribute__ ((always_inline)) riscv_intrinsic_rev8(uint32_t rs1) {
|
444 |
|
|
|
445 |
|
|
register uint32_t result __asm__ ("a0");
|
446 |
|
|
register uint32_t tmp_a __asm__ ("a0") = rs1;
|
447 |
|
|
|
448 |
|
|
// dummy instruction to prevent GCC "constprop" optimization
|
449 |
|
|
asm volatile ("" : [output] "=r" (result) : [input_i] "r" (tmp_a));
|
450 |
|
|
|
451 |
|
|
// grevi a0, a0, -8 (pseudo-instruction: rev8 a0, a0)
|
452 |
|
|
CUSTOM_INSTR_R1_TYPE(0b0110100, 0b11000, a0, 0b101, a0, 0b0010011);
|
453 |
|
|
|
454 |
|
|
return result;
|
455 |
|
|
}
|
456 |
|
|
|
457 |
|
|
|
458 |
|
|
// ################################################################################################
|
459 |
|
|
// Emulation functions
|
460 |
|
|
// ################################################################################################
|
461 |
|
|
|
462 |
|
|
|
463 |
|
|
// ---------------------------------------------
|
464 |
|
|
// Zbb - Base instructions
|
465 |
|
|
// ---------------------------------------------
|
466 |
|
|
|
467 |
|
|
|
468 |
|
|
/**********************************************************************//**
|
469 |
|
|
* Intrinsic: Bit manipulation CLZ (count leading zeros) [emulation]
|
470 |
|
|
*
|
471 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
472 |
|
|
* @return Number of leading zeros in source operand.
|
473 |
|
|
**************************************************************************/
|
474 |
|
|
uint32_t riscv_emulate_clz(uint32_t rs1) {
|
475 |
|
|
|
476 |
|
|
uint32_t sreg = rs1;
|
477 |
|
|
uint32_t cnt = 0;
|
478 |
|
|
|
479 |
|
|
while(1) {
|
480 |
|
|
if (sreg & 0x80000000UL) {
|
481 |
|
|
break;
|
482 |
|
|
}
|
483 |
|
|
else {
|
484 |
|
|
sreg <<= 1;
|
485 |
|
|
cnt++;
|
486 |
|
|
}
|
487 |
|
|
}
|
488 |
|
|
|
489 |
|
|
return cnt;
|
490 |
|
|
}
|
491 |
|
|
|
492 |
|
|
|
493 |
|
|
/**********************************************************************//**
|
494 |
|
|
* Intrinsic: Bit manipulation CTZ (count trailing zeros) [emulation]
|
495 |
|
|
*
|
496 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
497 |
|
|
* @return Number of trailing zeros in source operand.
|
498 |
|
|
**************************************************************************/
|
499 |
|
|
uint32_t riscv_emulate_ctz(uint32_t rs1) {
|
500 |
|
|
|
501 |
|
|
uint32_t sreg = rs1;
|
502 |
|
|
uint32_t cnt = 0;
|
503 |
|
|
|
504 |
|
|
while(1) {
|
505 |
|
|
if (sreg & 1) {
|
506 |
|
|
break;
|
507 |
|
|
}
|
508 |
|
|
else {
|
509 |
|
|
sreg >>= 1;
|
510 |
|
|
cnt++;
|
511 |
|
|
}
|
512 |
|
|
}
|
513 |
|
|
|
514 |
|
|
return cnt;
|
515 |
|
|
}
|
516 |
|
|
|
517 |
|
|
|
518 |
|
|
/**********************************************************************//**
|
519 |
|
|
* Intrinsic: Bit manipulation CPOP (population count) [emulation]
|
520 |
|
|
*
|
521 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
522 |
|
|
* @return Number of set bits in source operand.
|
523 |
|
|
**************************************************************************/
|
524 |
|
|
uint32_t riscv_emulate_cpop(uint32_t rs1) {
|
525 |
|
|
|
526 |
|
|
uint32_t sreg = rs1;
|
527 |
|
|
uint32_t cnt = 0;
|
528 |
|
|
int i;
|
529 |
|
|
|
530 |
|
|
for (i=0; i<32; i++) {
|
531 |
|
|
if (sreg & 1) {
|
532 |
|
|
cnt++;
|
533 |
|
|
}
|
534 |
|
|
sreg >>= 1;
|
535 |
|
|
}
|
536 |
|
|
|
537 |
|
|
return cnt;
|
538 |
|
|
}
|
539 |
|
|
|
540 |
|
|
|
541 |
|
|
/**********************************************************************//**
|
542 |
|
|
* Intrinsic: Bit manipulation SEXT.B (sign-extend byte) [emulation]
|
543 |
|
|
*
|
544 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
545 |
|
|
* @return Sign-extended byte (operand(7:0)).
|
546 |
|
|
**************************************************************************/
|
547 |
|
|
uint32_t riscv_emulate_sextb(uint32_t rs1) {
|
548 |
|
|
|
549 |
|
|
uint32_t tmp = rs1 & 0xff;
|
550 |
|
|
|
551 |
|
|
if (tmp & 0x80) {
|
552 |
|
|
tmp |= 0xFFFFFF00UL;
|
553 |
|
|
}
|
554 |
|
|
|
555 |
|
|
return tmp;
|
556 |
|
|
}
|
557 |
|
|
|
558 |
|
|
|
559 |
|
|
/**********************************************************************//**
|
560 |
|
|
* Intrinsic: Bit manipulation SEXT.H (sign-extend half-word) [emulation]
|
561 |
|
|
*
|
562 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
563 |
|
|
* @return Sign-extended half-word (operand(15:0)).
|
564 |
|
|
**************************************************************************/
|
565 |
|
|
uint32_t riscv_emulate_sexth(uint32_t rs1) {
|
566 |
|
|
|
567 |
|
|
uint32_t tmp = rs1 & 0xffff;
|
568 |
|
|
|
569 |
|
|
if (tmp & 0x8000) {
|
570 |
|
|
tmp |= 0xFFFF0000UL;
|
571 |
|
|
}
|
572 |
|
|
|
573 |
|
|
return tmp;
|
574 |
|
|
}
|
575 |
|
|
|
576 |
|
|
|
577 |
|
|
/**********************************************************************//**
|
578 |
|
|
* Intrinsic: Bit manipulation ZEXT.H (zero-extend half-word) [emulation]
|
579 |
|
|
*
|
580 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
581 |
|
|
* @return Zero-extended half-word (operand(15:0)).
|
582 |
|
|
**************************************************************************/
|
583 |
|
|
uint32_t riscv_emulate_zexth(uint32_t rs1) {
|
584 |
|
|
|
585 |
|
|
return rs1 & 0x0000FFFFUL;
|
586 |
|
|
}
|
587 |
|
|
|
588 |
|
|
|
589 |
|
|
/**********************************************************************//**
|
590 |
|
|
* Intrinsic: Bit manipulation MIN (select signed minimum) [emulation]
|
591 |
|
|
*
|
592 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
593 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
594 |
|
|
* @return Signed minimum.
|
595 |
|
|
**************************************************************************/
|
596 |
|
|
uint32_t riscv_emulate_min(uint32_t rs1, uint32_t rs2) {
|
597 |
|
|
|
598 |
|
|
int32_t s_opa = (int32_t)rs1;
|
599 |
|
|
int32_t s_opb = (int32_t)rs2;
|
600 |
|
|
|
601 |
|
|
if (s_opa < s_opb) {
|
602 |
|
|
return rs1;
|
603 |
|
|
}
|
604 |
|
|
else {
|
605 |
|
|
return rs2;
|
606 |
|
|
}
|
607 |
|
|
}
|
608 |
|
|
|
609 |
|
|
|
610 |
|
|
/**********************************************************************//**
|
611 |
|
|
* Intrinsic: Bit manipulation MINU (select unsigned minimum) [emulation]
|
612 |
|
|
*
|
613 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
614 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
615 |
|
|
* @return Unsigned minimum.
|
616 |
|
|
**************************************************************************/
|
617 |
|
|
uint32_t riscv_emulate_minu(uint32_t rs1, uint32_t rs2) {
|
618 |
|
|
|
619 |
|
|
if (rs1 < rs2) {
|
620 |
|
|
return rs1;
|
621 |
|
|
}
|
622 |
|
|
else {
|
623 |
|
|
return rs2;
|
624 |
|
|
}
|
625 |
|
|
}
|
626 |
|
|
|
627 |
|
|
|
628 |
|
|
/**********************************************************************//**
|
629 |
|
|
* Intrinsic: Bit manipulation MAX (select signed maximum) [emulation]
|
630 |
|
|
*
|
631 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
632 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
633 |
|
|
* @return Signed maximum.
|
634 |
|
|
**************************************************************************/
|
635 |
|
|
uint32_t riscv_emulate_max(uint32_t rs1, uint32_t rs2) {
|
636 |
|
|
|
637 |
|
|
int32_t s_opa = (int32_t)rs1;
|
638 |
|
|
int32_t s_opb = (int32_t)rs2;
|
639 |
|
|
|
640 |
|
|
if (s_opa < s_opb) {
|
641 |
|
|
return rs2;
|
642 |
|
|
}
|
643 |
|
|
else {
|
644 |
|
|
return rs1;
|
645 |
|
|
}
|
646 |
|
|
}
|
647 |
|
|
|
648 |
|
|
|
649 |
|
|
/**********************************************************************//**
|
650 |
|
|
* Intrinsic: Bit manipulation MAXU (select unsigned maximum) [emulation]
|
651 |
|
|
*
|
652 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
653 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
654 |
|
|
* @return Unsigned maximum.
|
655 |
|
|
**************************************************************************/
|
656 |
|
|
uint32_t riscv_emulate_maxu(uint32_t rs1, uint32_t rs2) {
|
657 |
|
|
|
658 |
|
|
if (rs1 < rs2) {
|
659 |
|
|
return rs2;
|
660 |
|
|
}
|
661 |
|
|
else {
|
662 |
|
|
return rs1;
|
663 |
|
|
}
|
664 |
|
|
}
|
665 |
|
|
|
666 |
|
|
|
667 |
|
|
/**********************************************************************//**
|
668 |
|
|
* Intrinsic: Bit manipulation ANDN (logical and-negate) [emulation]
|
669 |
|
|
*
|
670 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
671 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
672 |
|
|
* @return Operand 1 AND NOT operand 2.
|
673 |
|
|
**************************************************************************/
|
674 |
|
|
uint32_t riscv_emulate_andn(uint32_t rs1, uint32_t rs2) {
|
675 |
|
|
|
676 |
|
|
return rs1 & (~rs2);
|
677 |
|
|
}
|
678 |
|
|
|
679 |
|
|
|
680 |
|
|
/**********************************************************************//**
|
681 |
|
|
* Intrinsic: Bit manipulation ORN (logical or-negate) [emulation]
|
682 |
|
|
*
|
683 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
684 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
685 |
|
|
* @return Operand 1 OR NOT operand 2.
|
686 |
|
|
**************************************************************************/
|
687 |
|
|
uint32_t riscv_emulate_orn(uint32_t rs1, uint32_t rs2) {
|
688 |
|
|
|
689 |
|
|
return rs1 | (~rs2);
|
690 |
|
|
}
|
691 |
|
|
|
692 |
|
|
|
693 |
|
|
/**********************************************************************//**
|
694 |
|
|
* Intrinsic: Bit manipulation XNOR (logical xor-negate) [emulation]
|
695 |
|
|
*
|
696 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
697 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
698 |
|
|
* @return Operand 1 XOR NOT operand 2.
|
699 |
|
|
**************************************************************************/
|
700 |
|
|
uint32_t riscv_emulate_xnor(uint32_t rs1, uint32_t rs2) {
|
701 |
|
|
|
702 |
|
|
return rs1 ^ (~rs2);
|
703 |
|
|
}
|
704 |
|
|
|
705 |
|
|
|
706 |
|
|
/**********************************************************************//**
|
707 |
|
|
* Intrinsic: Bit manipulation ROL (rotate-left) [emulation]
|
708 |
|
|
*
|
709 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
710 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
711 |
|
|
* @return Operand 1 rotated left by operand_2(4:0) positions.
|
712 |
|
|
**************************************************************************/
|
713 |
|
|
uint32_t riscv_emulate_rol(uint32_t rs1, uint32_t rs2) {
|
714 |
|
|
|
715 |
|
|
uint32_t shamt = rs2 & 0x1f;
|
716 |
|
|
|
717 |
|
|
uint32_t tmp_a = rs1 << shamt;
|
718 |
|
|
uint32_t tmp_b = rs1 >> (32-shamt);
|
719 |
|
|
|
720 |
|
|
return tmp_a | tmp_b;
|
721 |
|
|
}
|
722 |
|
|
|
723 |
|
|
|
724 |
|
|
/**********************************************************************//**
|
725 |
|
|
* Intrinsic: Bit manipulation ROR (rotate-right) [emulation]
|
726 |
|
|
*
|
727 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
728 |
|
|
* @param[in] rs2 Source operand 1 (a0).
|
729 |
|
|
* @return Operand 1 rotated right by operand_2(4:0) positions.
|
730 |
|
|
**************************************************************************/
|
731 |
|
|
uint32_t riscv_emulate_ror(uint32_t rs1, uint32_t rs2) {
|
732 |
|
|
|
733 |
|
|
uint32_t shamt = rs2 & 0x1f;
|
734 |
|
|
|
735 |
|
|
uint32_t tmp_a = rs1 >> shamt;
|
736 |
|
|
uint32_t tmp_b = rs1 << (32-shamt);
|
737 |
|
|
|
738 |
|
|
return tmp_a | tmp_b;
|
739 |
|
|
}
|
740 |
|
|
|
741 |
|
|
|
742 |
|
|
/**********************************************************************//**
|
743 |
|
|
* Intrinsic: Bit manipulation REV8 (byte swap) [emulation]
|
744 |
|
|
*
|
745 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
746 |
|
|
* @return Operand 1 byte swapped.
|
747 |
|
|
**************************************************************************/
|
748 |
|
|
uint32_t riscv_emulate_rev8(uint32_t rs1) {
|
749 |
|
|
|
750 |
|
|
uint32_t tmp_a = (rs1 & 0x000000ffUL) << 24;
|
751 |
|
|
uint32_t tmp_b = (rs1 & 0x0000ff00UL) << 8;
|
752 |
|
|
uint32_t tmp_c = (rs1 & 0x00ff0000UL) >> 8;
|
753 |
|
|
uint32_t tmp_d = (rs1 & 0xff000000UL) >> 24;
|
754 |
|
|
|
755 |
|
|
return tmp_a | tmp_b | tmp_c | tmp_d;
|
756 |
|
|
}
|
757 |
|
|
|
758 |
|
|
|
759 |
|
|
/**********************************************************************//**
|
760 |
|
|
* Intrinsic: Bit manipulation ORCB (or-combine bytes) [emulation]
|
761 |
|
|
*
|
762 |
|
|
* @param[in] rs1 Source operand 1 (a0).
|
763 |
|
|
* @return OR-combined bytes of operand 1.
|
764 |
|
|
**************************************************************************/
|
765 |
|
|
uint32_t riscv_emulate_orcb(uint32_t rs1) {
|
766 |
|
|
|
767 |
|
|
uint32_t tmp = 0;
|
768 |
|
|
|
769 |
|
|
if (rs1 & 0x000000ffUL) {
|
770 |
|
|
tmp |= 0x000000ffUL;
|
771 |
|
|
}
|
772 |
|
|
if (rs1 & 0x0000ff00UL) {
|
773 |
|
|
tmp |= 0x0000ff00UL;
|
774 |
|
|
}
|
775 |
|
|
if (rs1 & 0x00ff0000UL) {
|
776 |
|
|
tmp |= 0x00ff0000UL;
|
777 |
|
|
}
|
778 |
|
|
if (rs1 & 0xff000000UL) {
|
779 |
|
|
tmp |= 0xff000000UL;
|
780 |
|
|
}
|
781 |
|
|
|
782 |
|
|
return tmp;
|
783 |
|
|
}
|
784 |
|
|
|
785 |
|
|
|
786 |
|
|
#endif // neorv32_b_extension_intrinsics_h
|
787 |
|
|
|