OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ppc64/] [bitops.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * PowerPC64 atomic bit operations.
3
 * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner,
4
 * Anton Blanchard
5
 *
6
 * Originally taken from the 32b PPC code.  Modified to use 64b values for
7
 * the various counters & memory references.
8
 *
9
 * Bitops are odd when viewed on big-endian systems.  They were designed
10
 * on little endian so the size of the bitset doesn't matter (low order bytes
11
 * come first) as long as the bit in question is valid.
12
 *
13
 * Bits are "tested" often using the C expression (val & (1<<nr)) so we do
14
 * our best to stay compatible with that.  The assumption is that val will
15
 * be unsigned long for such tests.  As such, we assume the bits are stored
16
 * as an array of unsigned long (the usual case is a single unsigned long,
17
 * of course).  Here's an example bitset with bit numbering:
18
 *
19
 *   |63..........0|127........64|195.......128|255.......196|
20
 *
21
 * This leads to a problem. If an int, short or char is passed as a bitset
22
 * it will be a bad memory reference since we want to store in chunks
23
 * of unsigned long (64 bits here) size.
24
 *
25
 * This program is free software; you can redistribute it and/or
26
 * modify it under the terms of the GNU General Public License
27
 * as published by the Free Software Foundation; either version
28
 * 2 of the License, or (at your option) any later version.
29
 */
30
 
31
#ifndef _PPC64_BITOPS_H
32
#define _PPC64_BITOPS_H
33
 
34
#ifdef __KERNEL__
35
 
36
#include <asm/memory.h>
37
 
38
/*
39
 * clear_bit doesn't imply a memory barrier
40
 */
41
#define smp_mb__before_clear_bit()      smp_mb()
42
#define smp_mb__after_clear_bit()       smp_mb()
43
 
44
static __inline__ int test_bit(unsigned long nr, __const__ volatile void *addr)
45
{
46
        return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63)));
47
}
48
 
49
static __inline__ void set_bit(unsigned long nr, volatile void *addr)
50
{
51
        unsigned long old;
52
        unsigned long mask = 1UL << (nr & 0x3f);
53
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
54
 
55
        __asm__ __volatile__(
56
"1:     ldarx   %0,0,%3         # set_bit\n\
57
        or      %0,%0,%2\n\
58
        stdcx.  %0,0,%3\n\
59
        bne-    1b"
60
        : "=&r" (old), "=m" (*p)
61
        : "r" (mask), "r" (p), "m" (*p)
62
        : "cc");
63
}
64
 
65
static __inline__ void clear_bit(unsigned long nr, volatile void *addr)
66
{
67
        unsigned long old;
68
        unsigned long mask = 1UL << (nr & 0x3f);
69
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
70
 
71
        __asm__ __volatile__(
72
"1:     ldarx   %0,0,%3         # clear_bit\n\
73
        andc    %0,%0,%2\n\
74
        stdcx.  %0,0,%3\n\
75
        bne-    1b"
76
        : "=&r" (old), "=m" (*p)
77
        : "r" (mask), "r" (p), "m" (*p)
78
        : "cc");
79
}
80
 
81
static __inline__ void change_bit(unsigned long nr, volatile void *addr)
82
{
83
        unsigned long old;
84
        unsigned long mask = 1UL << (nr & 0x3f);
85
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
86
 
87
        __asm__ __volatile__(
88
"1:     ldarx   %0,0,%3         # change_bit\n\
89
        xor     %0,%0,%2\n\
90
        stdcx.  %0,0,%3\n\
91
        bne-    1b"
92
        : "=&r" (old), "=m" (*p)
93
        : "r" (mask), "r" (p), "m" (*p)
94
        : "cc");
95
}
96
 
97
static __inline__ int test_and_set_bit(unsigned long nr, volatile void *addr)
98
{
99
        unsigned long old, t;
100
        unsigned long mask = 1UL << (nr & 0x3f);
101
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
102
 
103
        __asm__ __volatile__(
104
        EIEIO_ON_SMP
105
"1:     ldarx   %0,0,%3         # test_and_set_bit\n\
106
        or      %1,%0,%2 \n\
107
        stdcx.  %1,0,%3 \n\
108
        bne-    1b"
109
        ISYNC_ON_SMP
110
        : "=&r" (old), "=&r" (t)
111
        : "r" (mask), "r" (p)
112
        : "cc", "memory");
113
 
114
        return (old & mask) != 0;
115
}
116
 
117
static __inline__ int test_and_clear_bit(unsigned long nr, volatile void *addr)
118
{
119
        unsigned long old, t;
120
        unsigned long mask = 1UL << (nr & 0x3f);
121
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
122
 
123
        __asm__ __volatile__(
124
        EIEIO_ON_SMP
125
"1:     ldarx   %0,0,%3         # test_and_clear_bit\n\
126
        andc    %1,%0,%2\n\
127
        stdcx.  %1,0,%3\n\
128
        bne-    1b"
129
        ISYNC_ON_SMP
130
        : "=&r" (old), "=&r" (t)
131
        : "r" (mask), "r" (p)
132
        : "cc", "memory");
133
 
134
        return (old & mask) != 0;
135
}
136
 
137
static __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr)
138
{
139
        unsigned long old, t;
140
        unsigned long mask = 1UL << (nr & 0x3f);
141
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
142
 
143
        __asm__ __volatile__(
144
        EIEIO_ON_SMP
145
"1:     ldarx   %0,0,%3         # test_and_change_bit\n\
146
        xor     %1,%0,%2\n\
147
        stdcx.  %1,0,%3\n\
148
        bne-    1b"
149
        ISYNC_ON_SMP
150
        : "=&r" (old), "=&r" (t)
151
        : "r" (mask), "r" (p)
152
        : "cc", "memory");
153
 
154
        return (old & mask) != 0;
155
}
156
 
157
/*
158
 * non-atomic versions
159
 */
160
static __inline__ void __set_bit(unsigned long nr, volatile void *addr)
161
{
162
        unsigned long mask = 1UL << (nr & 0x3f);
163
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
164
 
165
        *p |= mask;
166
}
167
 
168
static __inline__ void __clear_bit(unsigned long nr, volatile void *addr)
169
{
170
        unsigned long mask = 1UL << (nr & 0x3f);
171
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
172
 
173
        *p &= ~mask;
174
}
175
 
176
static __inline__ void __change_bit(unsigned long nr, volatile void *addr)
177
{
178
        unsigned long mask = 1UL << (nr & 0x3f);
179
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
180
 
181
        *p ^= mask;
182
}
183
 
184
static __inline__ int __test_and_set_bit(unsigned long nr, volatile void *addr)
185
{
186
        unsigned long mask = 1UL << (nr & 0x3f);
187
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
188
        unsigned long old = *p;
189
 
190
        *p = old | mask;
191
        return (old & mask) != 0;
192
}
193
 
194
static __inline__ int __test_and_clear_bit(unsigned long nr, volatile void *addr)
195
{
196
        unsigned long mask = 1UL << (nr & 0x3f);
197
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
198
        unsigned long old = *p;
199
 
200
        *p = old & ~mask;
201
        return (old & mask) != 0;
202
}
203
 
204
static __inline__ int __test_and_change_bit(unsigned long nr, volatile void *addr)
205
{
206
        unsigned long mask = 1UL << (nr & 0x3f);
207
        unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
208
        unsigned long old = *p;
209
 
210
        *p = old ^ mask;
211
        return (old & mask) != 0;
212
}
213
 
214
/*
215
 * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the
216
 * most significant (left-most) 1-bit in a double word.
217
 */
218
static __inline__ int __ilog2(unsigned long x)
219
{
220
        int lz;
221
 
222
        asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
223
        return 63 - lz;
224
}
225
 
226
/* Return the zero-based bit position
227
 *  from RIGHT TO LEFT  63 --> 0
228
 *   of the most significant (left-most) 1-bit in an 8-byte area.
229
 */
230
static __inline__ long cnt_trailing_zeros(unsigned long mask)
231
{
232
        long cnt;
233
 
234
        asm(
235
"       addi    %0,%1,-1        \n\
236
        andc    %0,%0,%1        \n\
237
        cntlzd  %0,%0           \n\
238
        subfic  %0,%0,64"
239
        : "=r" (cnt)
240
        : "r" (mask));
241
        return cnt;
242
}
243
 
244
 
245
/*
246
 * ffz = Find First Zero in word.
247
 * Determines the bit position of the least significant (rightmost) 0 bit
248
 * in the specified double word. The returned bit position will be zero-based,
249
 * starting from the right side (63 - 0).
250
 */
251
static __inline__ unsigned long ffz(unsigned long x)
252
{
253
        /* no zero exists anywhere in the 8 byte area. */
254
        if ((x = ~x) == 0)
255
                return 64;
256
 
257
        /*
258
         * Calculate the bit position of the least signficant '1' bit in x
259
         * (since x has been changed this will actually be the least signficant
260
         * '0' bit in * the original x).  Note: (x & -x) gives us a mask that
261
         * is the least significant * (RIGHT-most) 1-bit of the value in x.
262
         */
263
        return __ilog2(x & -x);
264
}
265
 
266
static __inline__ int __ffs(unsigned long x)
267
{
268
        return __ilog2(x & -x);
269
}
270
 
271
/*
272
 * ffs: find first bit set. This is defined the same way as
273
 * the libc and compiler builtin ffs routines, therefore
274
 * differs in spirit from the above ffz (man ffs).
275
 */
276
static __inline__ int ffs(int x)
277
{
278
        unsigned long i = (unsigned long)x;
279
        return __ilog2(i & -i) + 1;
280
}
281
 
282
/*
283
 * fls: find last (most-significant) bit set.
284
 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
285
 */
286
#define fls(x) generic_fls(x)
287
 
288
/*
289
 * hweightN: returns the hamming weight (i.e. the number
290
 * of bits set) of a N-bit word
291
 */
292
#define hweight32(x) generic_hweight32(x)
293
#define hweight16(x) generic_hweight16(x)
294
#define hweight8(x) generic_hweight8(x)
295
 
296
extern unsigned long find_next_zero_bit(unsigned long* addr, unsigned long size, unsigned long offset);
297
#define find_first_zero_bit(addr, size) \
298
        find_next_zero_bit((addr), (size), 0)
299
 
300
extern unsigned long find_next_bit(unsigned long* addr, unsigned long size, unsigned long offset);
301
#define find_first_bit(addr, size) \
302
        find_next_bit((addr), (size), 0)
303
 
304
extern unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset);
305
#define find_first_zero_le_bit(addr, size) \
306
        find_next_zero_le_bit((addr), (size), 0)
307
 
308
static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr)
309
{
310
        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
311
        return (ADDR[nr >> 3] >> (nr & 7)) & 1;
312
}
313
 
314
/*
315
 * non-atomic versions
316
 */
317
static __inline__ void __set_le_bit(unsigned long nr, unsigned long *addr)
318
{
319
        unsigned char *ADDR = (unsigned char *)addr;
320
 
321
        ADDR += nr >> 3;
322
        *ADDR |= 1 << (nr & 0x07);
323
}
324
 
325
static __inline__ void __clear_le_bit(unsigned long nr, unsigned long *addr)
326
{
327
        unsigned char *ADDR = (unsigned char *)addr;
328
 
329
        ADDR += nr >> 3;
330
        *ADDR &= ~(1 << (nr & 0x07));
331
}
332
 
333
static __inline__ int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
334
{
335
        int mask, retval;
336
        unsigned char *ADDR = (unsigned char *)addr;
337
 
338
        ADDR += nr >> 3;
339
        mask = 1 << (nr & 0x07);
340
        retval = (mask & *ADDR) != 0;
341
        *ADDR |= mask;
342
        return retval;
343
}
344
 
345
static __inline__ int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
346
{
347
        int mask, retval;
348
        unsigned char *ADDR = (unsigned char *)addr;
349
 
350
        ADDR += nr >> 3;
351
        mask = 1 << (nr & 0x07);
352
        retval = (mask & *ADDR) != 0;
353
        *ADDR &= ~mask;
354
        return retval;
355
}
356
 
357
#define ext2_set_bit(nr,addr) \
358
        __test_and_set_le_bit((nr),(unsigned long*)addr)
359
#define ext2_clear_bit(nr, addr) \
360
        __test_and_clear_le_bit((nr),(unsigned long*)addr)
361
#define ext2_test_bit(nr, addr)      test_le_bit((nr),(unsigned long*)addr)
362
#define ext2_find_first_zero_bit(addr, size) \
363
        find_first_zero_le_bit((unsigned long*)addr, size)
364
#define ext2_find_next_zero_bit(addr, size, off) \
365
        find_next_zero_le_bit((unsigned long*)addr, size, off)
366
 
367
#define minix_test_and_set_bit(nr,addr)         test_and_set_bit(nr,addr)
368
#define minix_set_bit(nr,addr)                  set_bit(nr,addr)
369
#define minix_test_and_clear_bit(nr,addr)       test_and_clear_bit(nr,addr)
370
#define minix_test_bit(nr,addr)                 test_bit(nr,addr)
371
#define minix_find_first_zero_bit(addr,size)    find_first_zero_bit(addr,size)
372
 
373
#endif /* __KERNEL__ */
374
#endif /* _PPC64_BITOPS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.