OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-sparc/] [bitops.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* $Id: bitops.h,v 1.1.1.1 2004-04-15 02:40:00 phoenix Exp $
2
 * bitops.h: Bit string operations on the Sparc.
3
 *
4
 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
5
 * Copyright 1996 Eddie C. Dost   (ecd@skynet.be)
6
 * Copyright 2001 Anton Blanchard (anton@samba.org)
7
 */
8
 
9
#ifndef _SPARC_BITOPS_H
10
#define _SPARC_BITOPS_H
11
 
12
#include <linux/kernel.h>
13
#include <asm/byteorder.h>
14
#include <asm/system.h>
15
 
16
#ifdef __KERNEL__
17
 
18
/*
19
 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
20
 * is in the highest of the four bytes and bit '31' is the high bit
21
 * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
22
 * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
23
 */
24
static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
25
{
26
        register unsigned long mask asm("g2");
27
        register unsigned long *ADDR asm("g1");
28
        register int tmp1 asm("g3");
29
        register int tmp2 asm("g4");
30
        register int tmp3 asm("g5");
31
        register int tmp4 asm("g7");
32
 
33
        ADDR = ((unsigned long *) addr) + (nr >> 5);
34
        mask = 1 << (nr & 31);
35
 
36
        __asm__ __volatile__(
37
        "mov    %%o7, %%g4\n\t"
38
        "call   ___set_bit\n\t"
39
        " add   %%o7, 8, %%o7\n"
40
        : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
41
        : "0" (mask), "r" (ADDR)
42
        : "memory", "cc");
43
 
44
        return mask != 0;
45
}
46
 
47
static inline void set_bit(unsigned long nr, volatile void *addr)
48
{
49
        register unsigned long mask asm("g2");
50
        register unsigned long *ADDR asm("g1");
51
        register int tmp1 asm("g3");
52
        register int tmp2 asm("g4");
53
        register int tmp3 asm("g5");
54
        register int tmp4 asm("g7");
55
 
56
        ADDR = ((unsigned long *) addr) + (nr >> 5);
57
        mask = 1 << (nr & 31);
58
 
59
        __asm__ __volatile__(
60
        "mov    %%o7, %%g4\n\t"
61
        "call   ___set_bit\n\t"
62
        " add   %%o7, 8, %%o7\n"
63
        : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
64
        : "0" (mask), "r" (ADDR)
65
        : "memory", "cc");
66
}
67
 
68
static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
69
{
70
        register unsigned long mask asm("g2");
71
        register unsigned long *ADDR asm("g1");
72
        register int tmp1 asm("g3");
73
        register int tmp2 asm("g4");
74
        register int tmp3 asm("g5");
75
        register int tmp4 asm("g7");
76
 
77
        ADDR = ((unsigned long *) addr) + (nr >> 5);
78
        mask = 1 << (nr & 31);
79
 
80
        __asm__ __volatile__(
81
        "mov    %%o7, %%g4\n\t"
82
        "call   ___clear_bit\n\t"
83
        " add   %%o7, 8, %%o7\n"
84
        : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
85
        : "0" (mask), "r" (ADDR)
86
        : "memory", "cc");
87
 
88
        return mask != 0;
89
}
90
 
91
static inline void clear_bit(unsigned long nr, volatile void *addr)
92
{
93
        register unsigned long mask asm("g2");
94
        register unsigned long *ADDR asm("g1");
95
        register int tmp1 asm("g3");
96
        register int tmp2 asm("g4");
97
        register int tmp3 asm("g5");
98
        register int tmp4 asm("g7");
99
 
100
        ADDR = ((unsigned long *) addr) + (nr >> 5);
101
        mask = 1 << (nr & 31);
102
 
103
        __asm__ __volatile__(
104
        "mov    %%o7, %%g4\n\t"
105
        "call   ___clear_bit\n\t"
106
        " add   %%o7, 8, %%o7\n"
107
        : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
108
        : "0" (mask), "r" (ADDR)
109
        : "memory", "cc");
110
}
111
 
112
static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
113
{
114
        register unsigned long mask asm("g2");
115
        register unsigned long *ADDR asm("g1");
116
        register int tmp1 asm("g3");
117
        register int tmp2 asm("g4");
118
        register int tmp3 asm("g5");
119
        register int tmp4 asm("g7");
120
 
121
        ADDR = ((unsigned long *) addr) + (nr >> 5);
122
        mask = 1 << (nr & 31);
123
 
124
        __asm__ __volatile__(
125
        "mov    %%o7, %%g4\n\t"
126
        "call   ___change_bit\n\t"
127
        " add   %%o7, 8, %%o7\n"
128
        : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
129
        : "0" (mask), "r" (ADDR)
130
        : "memory", "cc");
131
 
132
        return mask != 0;
133
}
134
 
135
static inline void change_bit(unsigned long nr, volatile void *addr)
136
{
137
        register unsigned long mask asm("g2");
138
        register unsigned long *ADDR asm("g1");
139
        register int tmp1 asm("g3");
140
        register int tmp2 asm("g4");
141
        register int tmp3 asm("g5");
142
        register int tmp4 asm("g7");
143
 
144
        ADDR = ((unsigned long *) addr) + (nr >> 5);
145
        mask = 1 << (nr & 31);
146
 
147
        __asm__ __volatile__(
148
        "mov    %%o7, %%g4\n\t"
149
        "call   ___change_bit\n\t"
150
        " add   %%o7, 8, %%o7\n"
151
        : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
152
        : "0" (mask), "r" (ADDR)
153
        : "memory", "cc");
154
}
155
 
156
/*
157
 * non-atomic versions
158
 */
159
static inline void __set_bit(int nr, volatile void *addr)
160
{
161
        unsigned long mask = 1UL << (nr & 0x1f);
162
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
163
 
164
        *p |= mask;
165
}
166
 
167
static inline void __clear_bit(int nr, volatile void *addr)
168
{
169
        unsigned long mask = 1UL << (nr & 0x1f);
170
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
171
 
172
        *p &= ~mask;
173
}
174
 
175
static inline void __change_bit(int nr, volatile void *addr)
176
{
177
        unsigned long mask = 1UL << (nr & 0x1f);
178
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
179
 
180
        *p ^= mask;
181
}
182
 
183
static inline int __test_and_set_bit(int nr, volatile void *addr)
184
{
185
        unsigned long mask = 1UL << (nr & 0x1f);
186
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
187
        unsigned long old = *p;
188
 
189
        *p = old | mask;
190
        return (old & mask) != 0;
191
}
192
 
193
static inline int __test_and_clear_bit(int nr, volatile void *addr)
194
{
195
        unsigned long mask = 1UL << (nr & 0x1f);
196
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
197
        unsigned long old = *p;
198
 
199
        *p = old & ~mask;
200
        return (old & mask) != 0;
201
}
202
 
203
static inline int __test_and_change_bit(int nr, volatile void *addr)
204
{
205
        unsigned long mask = 1UL << (nr & 0x1f);
206
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
207
        unsigned long old = *p;
208
 
209
        *p = old ^ mask;
210
        return (old & mask) != 0;
211
}
212
 
213
#define smp_mb__before_clear_bit()      do { } while(0)
214
#define smp_mb__after_clear_bit()       do { } while(0)
215
 
216
/* The following routine need not be atomic. */
217
static inline int test_bit(int nr, __const__ void *addr)
218
{
219
        return (1 & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31))) != 0;
220
}
221
 
222
/* The easy/cheese version for now. */
223
static inline unsigned long ffz(unsigned long word)
224
{
225
        unsigned long result = 0;
226
 
227
        while(word & 1) {
228
                result++;
229
                word >>= 1;
230
        }
231
        return result;
232
}
233
 
234
/*
235
 * ffs: find first bit set. This is defined the same way as
236
 * the libc and compiler builtin ffs routines, therefore
237
 * differs in spirit from the above ffz (man ffs).
238
 */
239
#define ffs(x) generic_ffs(x)
240
 
241
/*
242
 * hweightN: returns the hamming weight (i.e. the number
243
 * of bits set) of a N-bit word
244
 */
245
#define hweight32(x) generic_hweight32(x)
246
#define hweight16(x) generic_hweight16(x)
247
#define hweight8(x) generic_hweight8(x)
248
 
249
/*
250
 * find_next_zero_bit() finds the first zero bit in a bit string of length
251
 * 'size' bits, starting the search at bit 'offset'. This is largely based
252
 * on Linus's ALPHA routines, which are pretty portable BTW.
253
 */
254
static inline unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
255
{
256
        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
257
        unsigned long result = offset & ~31UL;
258
        unsigned long tmp;
259
 
260
        if (offset >= size)
261
                return size;
262
        size -= result;
263
        offset &= 31UL;
264
        if (offset) {
265
                tmp = *(p++);
266
                tmp |= ~0UL >> (32-offset);
267
                if (size < 32)
268
                        goto found_first;
269
                if (~tmp)
270
                        goto found_middle;
271
                size -= 32;
272
                result += 32;
273
        }
274
        while (size & ~31UL) {
275
                if (~(tmp = *(p++)))
276
                        goto found_middle;
277
                result += 32;
278
                size -= 32;
279
        }
280
        if (!size)
281
                return result;
282
        tmp = *p;
283
 
284
found_first:
285
        tmp |= ~0UL << size;
286
        if (tmp == ~0UL)        /* Are any bits zero? */
287
                return result + size; /* Nope. */
288
found_middle:
289
        return result + ffz(tmp);
290
}
291
 
292
/*
293
 * Linus sez that gcc can optimize the following correctly, we'll see if this
294
 * holds on the Sparc as it does for the ALPHA.
295
 */
296
#define find_first_zero_bit(addr, size) \
297
        find_next_zero_bit((addr), (size), 0)
298
 
299
static inline int test_le_bit(int nr, __const__ void * addr)
300
{
301
        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
302
        return (ADDR[nr >> 3] >> (nr & 7)) & 1;
303
}
304
 
305
/*
306
 * non-atomic versions
307
 */
308
static inline void __set_le_bit(int nr, void *addr)
309
{
310
        unsigned char *ADDR = (unsigned char *)addr;
311
 
312
        ADDR += nr >> 3;
313
        *ADDR |= 1 << (nr & 0x07);
314
}
315
 
316
static inline void __clear_le_bit(int nr, void *addr)
317
{
318
        unsigned char *ADDR = (unsigned char *)addr;
319
 
320
        ADDR += nr >> 3;
321
        *ADDR &= ~(1 << (nr & 0x07));
322
}
323
 
324
static inline int __test_and_set_le_bit(int nr, void *addr)
325
{
326
        int mask, retval;
327
        unsigned char *ADDR = (unsigned char *)addr;
328
 
329
        ADDR += nr >> 3;
330
        mask = 1 << (nr & 0x07);
331
        retval = (mask & *ADDR) != 0;
332
        *ADDR |= mask;
333
        return retval;
334
}
335
 
336
static inline int __test_and_clear_le_bit(int nr, void *addr)
337
{
338
        int mask, retval;
339
        unsigned char *ADDR = (unsigned char *)addr;
340
 
341
        ADDR += nr >> 3;
342
        mask = 1 << (nr & 0x07);
343
        retval = (mask & *ADDR) != 0;
344
        *ADDR &= ~mask;
345
        return retval;
346
}
347
 
348
static inline unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset)
349
{
350
        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
351
        unsigned long result = offset & ~31UL;
352
        unsigned long tmp;
353
 
354
        if (offset >= size)
355
                return size;
356
        size -= result;
357
        offset &= 31UL;
358
        if(offset) {
359
                tmp = *(p++);
360
                tmp |= __swab32(~0UL >> (32-offset));
361
                if(size < 32)
362
                        goto found_first;
363
                if(~tmp)
364
                        goto found_middle;
365
                size -= 32;
366
                result += 32;
367
        }
368
        while(size & ~31UL) {
369
                if(~(tmp = *(p++)))
370
                        goto found_middle;
371
                result += 32;
372
                size -= 32;
373
        }
374
        if(!size)
375
                return result;
376
        tmp = *p;
377
 
378
found_first:
379
        tmp = __swab32(tmp) | (~0UL << size);
380
        if (tmp == ~0UL)        /* Are any bits zero? */
381
                return result + size; /* Nope. */
382
        return result + ffz(tmp);
383
 
384
found_middle:
385
        return result + ffz(__swab32(tmp));
386
}
387
 
388
#define find_first_zero_le_bit(addr, size) \
389
        find_next_zero_le_bit((addr), (size), 0)
390
 
391
#define ext2_set_bit                    __test_and_set_le_bit
392
#define ext2_clear_bit                  __test_and_clear_le_bit
393
#define ext2_test_bit                   test_le_bit
394
#define ext2_find_first_zero_bit        find_first_zero_le_bit
395
#define ext2_find_next_zero_bit         find_next_zero_le_bit
396
 
397
/* Bitmap functions for the minix filesystem.  */
398
#define minix_test_and_set_bit(nr,addr)         test_and_set_bit(nr,addr)
399
#define minix_set_bit(nr,addr)                  set_bit(nr,addr)
400
#define minix_test_and_clear_bit(nr,addr)       test_and_clear_bit(nr,addr)
401
#define minix_test_bit(nr,addr)                 test_bit(nr,addr)
402
#define minix_find_first_zero_bit(addr,size)    find_first_zero_bit(addr,size)
403
 
404
#endif /* __KERNEL__ */
405
 
406
#endif /* defined(_SPARC_BITOPS_H) */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.