OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [LINUX_2_4_26_OR32/] [linux/] [linux-2.4/] [include/] [asm-sh/] [bitops.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef __ASM_SH_BITOPS_H
2
#define __ASM_SH_BITOPS_H
3
 
4
#ifdef __KERNEL__
5
#include <asm/system.h>
6
/* For __swab32 */
7
#include <asm/byteorder.h>
8
 
9
static __inline__ void set_bit(int nr, volatile void * addr)
10
{
11
        int     mask;
12
        volatile unsigned int *a = addr;
13
        unsigned long flags;
14
 
15
        a += nr >> 5;
16
        mask = 1 << (nr & 0x1f);
17
        save_and_cli(flags);
18
        *a |= mask;
19
        restore_flags(flags);
20
}
21
 
22
static __inline__ void __set_bit(int nr, volatile void * addr)
23
{
24
        int     mask;
25
        volatile unsigned int *a = addr;
26
 
27
        a += nr >> 5;
28
        mask = 1 << (nr & 0x1f);
29
        *a |= mask;
30
}
31
 
32
/*
33
 * clear_bit() doesn't provide any barrier for the compiler.
34
 */
35
#define smp_mb__before_clear_bit()      barrier()
36
#define smp_mb__after_clear_bit()       barrier()
37
static __inline__ void clear_bit(int nr, volatile void * addr)
38
{
39
        int     mask;
40
        volatile unsigned int *a = addr;
41
        unsigned long flags;
42
 
43
        a += nr >> 5;
44
        mask = 1 << (nr & 0x1f);
45
        save_and_cli(flags);
46
        *a &= ~mask;
47
        restore_flags(flags);
48
}
49
 
50
static __inline__ void __clear_bit(int nr, volatile void * addr)
51
{
52
        int     mask;
53
        volatile unsigned int *a = addr;
54
 
55
        a += nr >> 5;
56
        mask = 1 << (nr & 0x1f);
57
        *a &= ~mask;
58
}
59
 
60
static __inline__ void change_bit(int nr, volatile void * addr)
61
{
62
        int     mask;
63
        volatile unsigned int *a = addr;
64
        unsigned long flags;
65
 
66
        a += nr >> 5;
67
        mask = 1 << (nr & 0x1f);
68
        save_and_cli(flags);
69
        *a ^= mask;
70
        restore_flags(flags);
71
}
72
 
73
static __inline__ void __change_bit(int nr, volatile void * addr)
74
{
75
        int     mask;
76
        volatile unsigned int *a = addr;
77
 
78
        a += nr >> 5;
79
        mask = 1 << (nr & 0x1f);
80
        *a ^= mask;
81
}
82
 
83
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
84
{
85
        int     mask, retval;
86
        volatile unsigned int *a = addr;
87
        unsigned long flags;
88
 
89
        a += nr >> 5;
90
        mask = 1 << (nr & 0x1f);
91
        save_and_cli(flags);
92
        retval = (mask & *a) != 0;
93
        *a |= mask;
94
        restore_flags(flags);
95
 
96
        return retval;
97
}
98
 
99
static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
100
{
101
        int     mask, retval;
102
        volatile unsigned int *a = addr;
103
 
104
        a += nr >> 5;
105
        mask = 1 << (nr & 0x1f);
106
        retval = (mask & *a) != 0;
107
        *a |= mask;
108
 
109
        return retval;
110
}
111
 
112
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
113
{
114
        int     mask, retval;
115
        volatile unsigned int *a = addr;
116
        unsigned long flags;
117
 
118
        a += nr >> 5;
119
        mask = 1 << (nr & 0x1f);
120
        save_and_cli(flags);
121
        retval = (mask & *a) != 0;
122
        *a &= ~mask;
123
        restore_flags(flags);
124
 
125
        return retval;
126
}
127
 
128
static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
129
{
130
        int     mask, retval;
131
        volatile unsigned int *a = addr;
132
 
133
        a += nr >> 5;
134
        mask = 1 << (nr & 0x1f);
135
        retval = (mask & *a) != 0;
136
        *a &= ~mask;
137
 
138
        return retval;
139
}
140
 
141
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
142
{
143
        int     mask, retval;
144
        volatile unsigned int *a = addr;
145
        unsigned long flags;
146
 
147
        a += nr >> 5;
148
        mask = 1 << (nr & 0x1f);
149
        save_and_cli(flags);
150
        retval = (mask & *a) != 0;
151
        *a ^= mask;
152
        restore_flags(flags);
153
 
154
        return retval;
155
}
156
 
157
static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
158
{
159
        int     mask, retval;
160
        volatile unsigned int *a = addr;
161
 
162
        a += nr >> 5;
163
        mask = 1 << (nr & 0x1f);
164
        retval = (mask & *a) != 0;
165
        *a ^= mask;
166
 
167
        return retval;
168
}
169
 
170
static __inline__ int test_bit(int nr, const volatile void *addr)
171
{
172
        return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
173
}
174
 
175
static __inline__ unsigned long ffz(unsigned long word)
176
{
177
        unsigned long result;
178
 
179
        __asm__("1:\n\t"
180
                "shlr   %1\n\t"
181
                "bt/s   1b\n\t"
182
                " add   #1, %0"
183
                : "=r" (result), "=r" (word)
184
                : "0" (~0L), "1" (word)
185
                : "t");
186
        return result;
187
}
188
 
189
static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
190
{
191
        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
192
        unsigned long result = offset & ~31UL;
193
        unsigned long tmp;
194
 
195
        if (offset >= size)
196
                return size;
197
        size -= result;
198
        offset &= 31UL;
199
        if (offset) {
200
                tmp = *(p++);
201
                tmp |= ~0UL >> (32-offset);
202
                if (size < 32)
203
                        goto found_first;
204
                if (~tmp)
205
                        goto found_middle;
206
                size -= 32;
207
                result += 32;
208
        }
209
        while (size & ~31UL) {
210
                if (~(tmp = *(p++)))
211
                        goto found_middle;
212
                result += 32;
213
                size -= 32;
214
        }
215
        if (!size)
216
                return result;
217
        tmp = *p;
218
 
219
found_first:
220
        tmp |= ~0UL << size;
221
found_middle:
222
        return result + ffz(tmp);
223
}
224
 
225
#define find_first_zero_bit(addr, size) \
226
        find_next_zero_bit((addr), (size), 0)
227
 
228
/*
229
 * ffs: find first bit set. This is defined the same way as
230
 * the libc and compiler builtin ffs routines, therefore
231
 * differs in spirit from the above ffz (man ffs).
232
 */
233
 
234
#define ffs(x) generic_ffs(x)
235
 
236
/*
237
 * hweightN: returns the hamming weight (i.e. the number
238
 * of bits set) of a N-bit word
239
 */
240
 
241
#define hweight32(x) generic_hweight32(x)
242
#define hweight16(x) generic_hweight16(x)
243
#define hweight8(x) generic_hweight8(x)
244
 
245
#ifdef __LITTLE_ENDIAN__
246
#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
247
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
248
#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
249
#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
250
#define ext2_find_next_zero_bit(addr, size, offset) \
251
                find_next_zero_bit((addr), (size), (offset))
252
#else
253
static __inline__ int ext2_set_bit(int nr, volatile void * addr)
254
{
255
        int             mask, retval;
256
        unsigned long   flags;
257
        volatile unsigned char  *ADDR = (unsigned char *) addr;
258
 
259
        ADDR += nr >> 3;
260
        mask = 1 << (nr & 0x07);
261
        save_and_cli(flags);
262
        retval = (mask & *ADDR) != 0;
263
        *ADDR |= mask;
264
        restore_flags(flags);
265
        return retval;
266
}
267
 
268
static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
269
{
270
        int             mask, retval;
271
        unsigned long   flags;
272
        volatile unsigned char  *ADDR = (unsigned char *) addr;
273
 
274
        ADDR += nr >> 3;
275
        mask = 1 << (nr & 0x07);
276
        save_and_cli(flags);
277
        retval = (mask & *ADDR) != 0;
278
        *ADDR &= ~mask;
279
        restore_flags(flags);
280
        return retval;
281
}
282
 
283
static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
284
{
285
        int                     mask;
286
        const volatile unsigned char    *ADDR = (const unsigned char *) addr;
287
 
288
        ADDR += nr >> 3;
289
        mask = 1 << (nr & 0x07);
290
        return ((mask & *ADDR) != 0);
291
}
292
 
293
#define ext2_find_first_zero_bit(addr, size) \
294
        ext2_find_next_zero_bit((addr), (size), 0)
295
 
296
static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
297
{
298
        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
299
        unsigned long result = offset & ~31UL;
300
        unsigned long tmp;
301
 
302
        if (offset >= size)
303
                return size;
304
        size -= result;
305
        offset &= 31UL;
306
        if(offset) {
307
                /* We hold the little endian value in tmp, but then the
308
                 * shift is illegal. So we could keep a big endian value
309
                 * in tmp, like this:
310
                 *
311
                 * tmp = __swab32(*(p++));
312
                 * tmp |= ~0UL >> (32-offset);
313
                 *
314
                 * but this would decrease preformance, so we change the
315
                 * shift:
316
                 */
317
                tmp = *(p++);
318
                tmp |= __swab32(~0UL >> (32-offset));
319
                if(size < 32)
320
                        goto found_first;
321
                if(~tmp)
322
                        goto found_middle;
323
                size -= 32;
324
                result += 32;
325
        }
326
        while(size & ~31UL) {
327
                if(~(tmp = *(p++)))
328
                        goto found_middle;
329
                result += 32;
330
                size -= 32;
331
        }
332
        if(!size)
333
                return result;
334
        tmp = *p;
335
 
336
found_first:
337
        /* tmp is little endian, so we would have to swab the shift,
338
         * see above. But then we have to swab tmp below for ffz, so
339
         * we might as well do this here.
340
         */
341
        return result + ffz(__swab32(tmp) | (~0UL << size));
342
found_middle:
343
        return result + ffz(__swab32(tmp));
344
}
345
#endif
346
 
347
/* Bitmap functions for the minix filesystem.  */
348
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
349
#define minix_set_bit(nr,addr) set_bit(nr,addr)
350
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
351
#define minix_test_bit(nr,addr) test_bit(nr,addr)
352
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
353
 
354
#endif /* __KERNEL__ */
355
 
356
#endif /* __ASM_SH_BITOPS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.