OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ppc/] [bitops.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
/*
2
 * bitops.h: Bit string operations on the ppc
3
 */
4
 
5
#ifdef __KERNEL__
6
#ifndef _PPC_BITOPS_H
7
#define _PPC_BITOPS_H
8
 
9
#include <linux/config.h>
10
#include <asm/byteorder.h>
11
#include <asm/atomic.h>
12
 
13
/*
14
 * The test_and_*_bit operations are taken to imply a memory barrier
15
 * on SMP systems.
16
 */
17
#ifdef CONFIG_SMP
18
#define SMP_WMB         "eieio\n"
19
#define SMP_MB          "\nsync"
20
#else
21
#define SMP_WMB
22
#define SMP_MB
23
#endif /* CONFIG_SMP */
24
 
25
/*
26
 * These used to be if'd out here because using : "cc" as a constraint
27
 * resulted in errors from egcs.  Things appear to be OK with gcc-2.95.
28
 */
29
static __inline__ void set_bit(int nr, volatile void * addr)
30
{
31
        unsigned long old;
32
        unsigned long mask = 1 << (nr & 0x1f);
33
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
34
 
35
        __asm__ __volatile__("\n\
36
1:      lwarx   %0,0,%3 \n\
37
        or      %0,%0,%2 \n"
38
        PPC405_ERR77(0,%3)
39
"       stwcx.  %0,0,%3 \n\
40
        bne-    1b"
41
        : "=&r" (old), "=m" (*p)
42
        : "r" (mask), "r" (p), "m" (*p)
43
        : "cc" );
44
}
45
 
46
/*
47
 * non-atomic version
48
 */
49
static __inline__ void __set_bit(int nr, volatile void *addr)
50
{
51
        unsigned long mask = 1 << (nr & 0x1f);
52
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
53
 
54
        *p |= mask;
55
}
56
 
57
/*
58
 * clear_bit doesn't imply a memory barrier
59
 */
60
#define smp_mb__before_clear_bit()      smp_mb()
61
#define smp_mb__after_clear_bit()       smp_mb()
62
 
63
static __inline__ void clear_bit(int nr, volatile void *addr)
64
{
65
        unsigned long old;
66
        unsigned long mask = 1 << (nr & 0x1f);
67
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
68
 
69
        __asm__ __volatile__("\n\
70
1:      lwarx   %0,0,%3 \n\
71
        andc    %0,%0,%2 \n"
72
        PPC405_ERR77(0,%3)
73
"       stwcx.  %0,0,%3 \n\
74
        bne-    1b"
75
        : "=&r" (old), "=m" (*p)
76
        : "r" (mask), "r" (p), "m" (*p)
77
        : "cc");
78
}
79
 
80
/*
81
 * non-atomic version
82
 */
83
static __inline__ void __clear_bit(int nr, volatile void *addr)
84
{
85
        unsigned long mask = 1 << (nr & 0x1f);
86
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
87
 
88
        *p &= ~mask;
89
}
90
 
91
static __inline__ void change_bit(int nr, volatile void *addr)
92
{
93
        unsigned long old;
94
        unsigned long mask = 1 << (nr & 0x1f);
95
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
96
 
97
        __asm__ __volatile__("\n\
98
1:      lwarx   %0,0,%3 \n\
99
        xor     %0,%0,%2 \n"
100
        PPC405_ERR77(0,%3)
101
"       stwcx.  %0,0,%3 \n\
102
        bne-    1b"
103
        : "=&r" (old), "=m" (*p)
104
        : "r" (mask), "r" (p), "m" (*p)
105
        : "cc");
106
}
107
 
108
/*
109
 * non-atomic version
110
 */
111
static __inline__ void __change_bit(int nr, volatile void *addr)
112
{
113
        unsigned long mask = 1 << (nr & 0x1f);
114
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
115
 
116
        *p ^= mask;
117
}
118
 
119
/*
120
 * test_and_*_bit do imply a memory barrier (?)
121
 */
122
static __inline__ int test_and_set_bit(int nr, volatile void *addr)
123
{
124
        unsigned int old, t;
125
        unsigned int mask = 1 << (nr & 0x1f);
126
        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
127
 
128
        __asm__ __volatile__(SMP_WMB "\n\
129
1:      lwarx   %0,0,%4 \n\
130
        or      %1,%0,%3 \n"
131
        PPC405_ERR77(0,%4)
132
"       stwcx.  %1,0,%4 \n\
133
        bne     1b"
134
        SMP_MB
135
        : "=&r" (old), "=&r" (t), "=m" (*p)
136
        : "r" (mask), "r" (p), "m" (*p)
137
        : "cc", "memory");
138
 
139
        return (old & mask) != 0;
140
}
141
 
142
/*
143
 * non-atomic version
144
 */
145
static __inline__ int __test_and_set_bit(int nr, volatile void *addr)
146
{
147
        unsigned long mask = 1 << (nr & 0x1f);
148
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
149
        unsigned long old = *p;
150
 
151
        *p = old | mask;
152
        return (old & mask) != 0;
153
}
154
 
155
static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
156
{
157
        unsigned int old, t;
158
        unsigned int mask = 1 << (nr & 0x1f);
159
        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
160
 
161
        __asm__ __volatile__(SMP_WMB "\n\
162
1:      lwarx   %0,0,%4 \n\
163
        andc    %1,%0,%3 \n"
164
        PPC405_ERR77(0,%4)
165
"       stwcx.  %1,0,%4 \n\
166
        bne     1b"
167
        SMP_MB
168
        : "=&r" (old), "=&r" (t), "=m" (*p)
169
        : "r" (mask), "r" (p), "m" (*p)
170
        : "cc", "memory");
171
 
172
        return (old & mask) != 0;
173
}
174
 
175
/*
176
 * non-atomic version
177
 */
178
static __inline__ int __test_and_clear_bit(int nr, volatile void *addr)
179
{
180
        unsigned long mask = 1 << (nr & 0x1f);
181
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
182
        unsigned long old = *p;
183
 
184
        *p = old & ~mask;
185
        return (old & mask) != 0;
186
}
187
 
188
static __inline__ int test_and_change_bit(int nr, volatile void *addr)
189
{
190
        unsigned int old, t;
191
        unsigned int mask = 1 << (nr & 0x1f);
192
        volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
193
 
194
        __asm__ __volatile__(SMP_WMB "\n\
195
1:      lwarx   %0,0,%4 \n\
196
        xor     %1,%0,%3 \n"
197
        PPC405_ERR77(0,%4)
198
"       stwcx.  %1,0,%4 \n\
199
        bne     1b"
200
        SMP_MB
201
        : "=&r" (old), "=&r" (t), "=m" (*p)
202
        : "r" (mask), "r" (p), "m" (*p)
203
        : "cc", "memory");
204
 
205
        return (old & mask) != 0;
206
}
207
 
208
/*
209
 * non-atomic version
210
 */
211
static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
212
{
213
        unsigned long mask = 1 << (nr & 0x1f);
214
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
215
        unsigned long old = *p;
216
 
217
        *p = old ^ mask;
218
        return (old & mask) != 0;
219
}
220
 
221
static __inline__ int test_bit(int nr, __const__ volatile void *addr)
222
{
223
        __const__ unsigned int *p = (__const__ unsigned int *) addr;
224
 
225
        return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
226
}
227
 
228
/* Return the bit position of the most significant 1 bit in a word */
229
static __inline__ int __ilog2(unsigned int x)
230
{
231
        int lz;
232
 
233
        asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
234
        return 31 - lz;
235
}
236
 
237
static __inline__ int ffz(unsigned int x)
238
{
239
        if ((x = ~x) == 0)
240
                return 32;
241
        return __ilog2(x & -x);
242
}
243
 
244
/*
245
 * ffs: find first bit set. This is defined the same way as
246
 * the libc and compiler builtin ffs routines, therefore
247
 * differs in spirit from the above ffz (man ffs).
248
 */
249
static __inline__ int ffs(int x)
250
{
251
        return __ilog2(x & -x) + 1;
252
}
253
 
254
/*
255
 * hweightN: returns the hamming weight (i.e. the number
256
 * of bits set) of a N-bit word
257
 */
258
 
259
#define hweight32(x) generic_hweight32(x)
260
#define hweight16(x) generic_hweight16(x)
261
#define hweight8(x) generic_hweight8(x)
262
 
263
/*
264
 * This implementation of find_{first,next}_zero_bit was stolen from
265
 * Linus' asm-alpha/bitops.h.
266
 */
267
#define find_first_zero_bit(addr, size) \
268
        find_next_zero_bit((addr), (size), 0)
269
 
270
static __inline__ unsigned long find_next_zero_bit(void * addr,
271
        unsigned long size, unsigned long offset)
272
{
273
        unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
274
        unsigned int result = offset & ~31UL;
275
        unsigned int tmp;
276
 
277
        if (offset >= size)
278
                return size;
279
        size -= result;
280
        offset &= 31UL;
281
        if (offset) {
282
                tmp = *p++;
283
                tmp |= ~0UL >> (32-offset);
284
                if (size < 32)
285
                        goto found_first;
286
                if (tmp != ~0U)
287
                        goto found_middle;
288
                size -= 32;
289
                result += 32;
290
        }
291
        while (size >= 32) {
292
                if ((tmp = *p++) != ~0U)
293
                        goto found_middle;
294
                result += 32;
295
                size -= 32;
296
        }
297
        if (!size)
298
                return result;
299
        tmp = *p;
300
found_first:
301
        tmp |= ~0UL << size;
302
        if (tmp == ~0UL)        /* Are any bits zero? */
303
                return result + size; /* Nope. */
304
found_middle:
305
        return result + ffz(tmp);
306
}
307
 
308
 
309
#define ext2_set_bit(nr, addr)          __test_and_set_bit((nr) ^ 0x18, addr)
310
#define ext2_clear_bit(nr, addr)        __test_and_clear_bit((nr) ^ 0x18, addr)
311
 
312
static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
313
{
314
        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
315
 
316
        return (ADDR[nr >> 3] >> (nr & 7)) & 1;
317
}
318
 
319
/*
320
 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
321
 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
322
 */
323
 
324
#define ext2_find_first_zero_bit(addr, size) \
325
        ext2_find_next_zero_bit((addr), (size), 0)
326
 
327
static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
328
        unsigned long size, unsigned long offset)
329
{
330
        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
331
        unsigned int result = offset & ~31UL;
332
        unsigned int tmp;
333
 
334
        if (offset >= size)
335
                return size;
336
        size -= result;
337
        offset &= 31UL;
338
        if (offset) {
339
                tmp = cpu_to_le32p(p++);
340
                tmp |= ~0UL >> (32-offset);
341
                if (size < 32)
342
                        goto found_first;
343
                if (tmp != ~0U)
344
                        goto found_middle;
345
                size -= 32;
346
                result += 32;
347
        }
348
        while (size >= 32) {
349
                if ((tmp = cpu_to_le32p(p++)) != ~0U)
350
                        goto found_middle;
351
                result += 32;
352
                size -= 32;
353
        }
354
        if (!size)
355
                return result;
356
        tmp = cpu_to_le32p(p);
357
found_first:
358
        tmp |= ~0U << size;
359
        if (tmp == ~0UL)        /* Are any bits zero? */
360
                return result + size; /* Nope. */
361
found_middle:
362
        return result + ffz(tmp);
363
}
364
 
365
/* Bitmap functions for the minix filesystem.  */
366
#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
367
#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
368
#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
369
#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
370
#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
371
 
372
#endif /* _PPC_BITOPS_H */
373
#endif /* __KERNEL__ */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.