OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-parisc/] [bitops.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _PARISC_BITOPS_H
2
#define _PARISC_BITOPS_H
3
 
4
#include <linux/spinlock.h>
5
#include <asm/system.h>
6
#include <asm/byteorder.h>
7
#include <asm/atomic.h>
8
 
9
/*
10
 * HP-PARISC specific bit operations
11
 * for a detailed description of the functions please refer
12
 * to include/asm-i386/bitops.h or kerneldoc
13
 */
14
 
15
#ifdef __LP64__
16
#   define SHIFT_PER_LONG 6
17
#ifndef BITS_PER_LONG
18
#   define BITS_PER_LONG 64
19
#endif
20
#else
21
#   define SHIFT_PER_LONG 5
22
#ifndef BITS_PER_LONG
23
#   define BITS_PER_LONG 32
24
#endif
25
#endif
26
 
27
#define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
28
 
29
 
30
#define smp_mb__before_clear_bit()      smp_mb()
31
#define smp_mb__after_clear_bit()       smp_mb()
32
 
33
static __inline__ void set_bit(int nr, void * address)
34
{
35
        unsigned long mask;
36
        unsigned long *addr = (unsigned long *) address;
37
        unsigned long flags;
38
 
39
        addr += (nr >> SHIFT_PER_LONG);
40
        mask = 1L << CHOP_SHIFTCOUNT(nr);
41
        SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
42
        *addr |= mask;
43
        SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
44
}
45
 
46
static __inline__ void __set_bit(int nr, void * address)
47
{
48
        unsigned long mask;
49
        unsigned long *addr = (unsigned long *) address;
50
 
51
        addr += (nr >> SHIFT_PER_LONG);
52
        mask = 1L << CHOP_SHIFTCOUNT(nr);
53
        *addr |= mask;
54
}
55
 
56
static __inline__ void clear_bit(int nr, void * address)
57
{
58
        unsigned long mask;
59
        unsigned long *addr = (unsigned long *) address;
60
        unsigned long flags;
61
 
62
        addr += (nr >> SHIFT_PER_LONG);
63
        mask = 1L << CHOP_SHIFTCOUNT(nr);
64
        SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
65
        *addr &= ~mask;
66
        SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
67
}
68
 
69
static __inline__ void change_bit(int nr, void * address)
70
{
71
        unsigned long mask;
72
        unsigned long *addr = (unsigned long *) address;
73
        unsigned long flags;
74
 
75
        addr += (nr >> SHIFT_PER_LONG);
76
        mask = 1L << CHOP_SHIFTCOUNT(nr);
77
        SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
78
        *addr ^= mask;
79
        SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
80
}
81
 
82
static __inline__ void __change_bit(int nr, void * address)
83
{
84
        unsigned long mask;
85
        unsigned long *addr = (unsigned long *) address;
86
 
87
        addr += (nr >> SHIFT_PER_LONG);
88
        mask = 1L << CHOP_SHIFTCOUNT(nr);
89
        *addr ^= mask;
90
}
91
 
92
static __inline__ int test_and_set_bit(int nr, void * address)
93
{
94
        unsigned long mask;
95
        unsigned long *addr = (unsigned long *) address;
96
        int oldbit;
97
        unsigned long flags;
98
 
99
        addr += (nr >> SHIFT_PER_LONG);
100
        mask = 1L << CHOP_SHIFTCOUNT(nr);
101
        SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
102
        oldbit = (*addr & mask) ? 1 : 0;
103
        *addr |= mask;
104
        SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
105
 
106
        return oldbit;
107
}
108
 
109
static __inline__ int __test_and_set_bit(int nr, void * address)
110
{
111
        unsigned long mask;
112
        unsigned long *addr = (unsigned long *) address;
113
        int oldbit;
114
 
115
        addr += (nr >> SHIFT_PER_LONG);
116
        mask = 1L << CHOP_SHIFTCOUNT(nr);
117
        oldbit = (*addr & mask) ? 1 : 0;
118
        *addr |= mask;
119
 
120
        return oldbit;
121
}
122
 
123
static __inline__ int test_and_clear_bit(int nr, void * address)
124
{
125
        unsigned long mask;
126
        unsigned long *addr = (unsigned long *) address;
127
        int oldbit;
128
        unsigned long flags;
129
 
130
        addr += (nr >> SHIFT_PER_LONG);
131
        mask = 1L << CHOP_SHIFTCOUNT(nr);
132
        SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
133
        oldbit = (*addr & mask) ? 1 : 0;
134
        *addr &= ~mask;
135
        SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
136
 
137
        return oldbit;
138
}
139
 
140
static __inline__ int __test_and_clear_bit(int nr, void * address)
141
{
142
        unsigned long mask;
143
        unsigned long *addr = (unsigned long *) address;
144
        int oldbit;
145
 
146
        addr += (nr >> SHIFT_PER_LONG);
147
        mask = 1L << CHOP_SHIFTCOUNT(nr);
148
        oldbit = (*addr & mask) ? 1 : 0;
149
        *addr &= ~mask;
150
 
151
        return oldbit;
152
}
153
 
154
static __inline__ int test_and_change_bit(int nr, void * address)
155
{
156
        unsigned long mask;
157
        unsigned long *addr = (unsigned long *) address;
158
        int oldbit;
159
        unsigned long flags;
160
 
161
        addr += (nr >> SHIFT_PER_LONG);
162
        mask = 1L << CHOP_SHIFTCOUNT(nr);
163
        SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
164
        oldbit = (*addr & mask) ? 1 : 0;
165
        *addr ^= mask;
166
        SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
167
 
168
        return oldbit;
169
}
170
 
171
static __inline__ int __test_and_change_bit(int nr, void * address)
172
{
173
        unsigned long mask;
174
        unsigned long *addr = (unsigned long *) address;
175
        int oldbit;
176
 
177
        addr += (nr >> SHIFT_PER_LONG);
178
        mask = 1L << CHOP_SHIFTCOUNT(nr);
179
        oldbit = (*addr & mask) ? 1 : 0;
180
        *addr ^= mask;
181
 
182
        return oldbit;
183
}
184
 
185
static __inline__ int test_bit(int nr, const void *address)
186
{
187
        unsigned long mask;
188
        unsigned long *addr = (unsigned long *) address;
189
 
190
        addr += (nr >> SHIFT_PER_LONG);
191
        mask = 1L << CHOP_SHIFTCOUNT(nr);
192
 
193
        return !!(*addr & mask);
194
}
195
 
196
extern __inline__ unsigned long ffz(unsigned long word)
197
{
198
        unsigned long result;
199
 
200
        result = 0;
201
        while (word & 1) {
202
                result++;
203
                word >>= 1;
204
        }
205
 
206
        return result;
207
}
208
 
209
#ifdef __KERNEL__
210
 
211
/*
212
 * ffs: find first bit set. This is defined the same way as
213
 * the libc and compiler builtin ffs routines, therefore
214
 * differs in spirit from the above ffz (man ffs).
215
 */
216
 
217
#define ffs(x) generic_ffs(x)
218
 
219
/*
220
 * hweightN: returns the hamming weight (i.e. the number
221
 * of bits set) of a N-bit word
222
 */
223
 
224
#define hweight32(x) generic_hweight32(x)
225
#define hweight16(x) generic_hweight16(x)
226
#define hweight8(x) generic_hweight8(x)
227
 
228
#endif /* __KERNEL__ */
229
 
230
/*
231
 * This implementation of find_{first,next}_zero_bit was stolen from
232
 * Linus' asm-alpha/bitops.h.
233
 */
234
#define find_first_zero_bit(addr, size) \
235
        find_next_zero_bit((addr), (size), 0)
236
 
237
static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
238
{
239
        unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
240
        unsigned long result = offset & ~(BITS_PER_LONG-1);
241
        unsigned long tmp;
242
 
243
        if (offset >= size)
244
                return size;
245
        size -= result;
246
        offset &= (BITS_PER_LONG-1);
247
        if (offset) {
248
                tmp = *(p++);
249
                tmp |= ~0UL >> (BITS_PER_LONG-offset);
250
                if (size < BITS_PER_LONG)
251
                        goto found_first;
252
                if (~tmp)
253
                        goto found_middle;
254
                size -= BITS_PER_LONG;
255
                result += BITS_PER_LONG;
256
        }
257
        while (size & ~(BITS_PER_LONG -1)) {
258
                if (~(tmp = *(p++)))
259
                        goto found_middle;
260
                result += BITS_PER_LONG;
261
                size -= BITS_PER_LONG;
262
        }
263
        if (!size)
264
                return result;
265
        tmp = *p;
266
found_first:
267
        tmp |= ~0UL << size;
268
found_middle:
269
        return result + ffz(tmp);
270
}
271
 
272
#define _EXT2_HAVE_ASM_BITOPS_
273
 
274
#ifdef __KERNEL__
275
/*
276
 * test_and_{set,clear}_bit guarantee atomicity without
277
 * disabling interrupts.
278
 */
279
#ifdef __LP64__
280
#define ext2_set_bit(nr, addr)          test_and_set_bit((nr) ^ 0x38, addr)
281
#define ext2_clear_bit(nr, addr)        test_and_clear_bit((nr) ^ 0x38, addr)
282
#else
283
#define ext2_set_bit(nr, addr)          test_and_set_bit((nr) ^ 0x18, addr)
284
#define ext2_clear_bit(nr, addr)        test_and_clear_bit((nr) ^ 0x18, addr)
285
#endif
286
 
287
#endif  /* __KERNEL__ */
288
 
289
static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
290
{
291
        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
292
 
293
        return (ADDR[nr >> 3] >> (nr & 7)) & 1;
294
}
295
 
296
/*
297
 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
298
 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
299
 */
300
 
301
#define ext2_find_first_zero_bit(addr, size) \
302
        ext2_find_next_zero_bit((addr), (size), 0)
303
 
304
extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
305
        unsigned long size, unsigned long offset)
306
{
307
        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
308
        unsigned int result = offset & ~31UL;
309
        unsigned int tmp;
310
 
311
        if (offset >= size)
312
                return size;
313
        size -= result;
314
        offset &= 31UL;
315
        if (offset) {
316
                tmp = cpu_to_le32p(p++);
317
                tmp |= ~0UL >> (32-offset);
318
                if (size < 32)
319
                        goto found_first;
320
                if (tmp != ~0U)
321
                        goto found_middle;
322
                size -= 32;
323
                result += 32;
324
        }
325
        while (size >= 32) {
326
                if ((tmp = cpu_to_le32p(p++)) != ~0U)
327
                        goto found_middle;
328
                result += 32;
329
                size -= 32;
330
        }
331
        if (!size)
332
                return result;
333
        tmp = cpu_to_le32p(p);
334
found_first:
335
        tmp |= ~0U << size;
336
found_middle:
337
        return result + ffz(tmp);
338
}
339
 
340
/* Bitmap functions for the minix filesystem.  */
341
#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
342
#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
343
#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
344
#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
345
#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
346
 
347
#endif /* _PARISC_BITOPS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.