OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-mips64/] [bitops.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * This file is subject to the terms and conditions of the GNU General Public
3
 * License.  See the file "COPYING" in the main directory of this archive
4
 * for more details.
5
 *
6
 * Copyright (c) 1994, 95, 96, 97, 98, 99, 2000  Ralf Baechle
7
 * Copyright (c) 1999, 2000  Silicon Graphics, Inc.
8
 */
9
#ifndef _ASM_BITOPS_H
10
#define _ASM_BITOPS_H
11
 
12
#include <linux/config.h>
13
#include <linux/types.h>
14
#include <asm/byteorder.h>              /* sigh ... */
15
 
16
#if (_MIPS_SZLONG == 32)
17
#define SZLONG_LOG 5
18
#define SZLONG_MASK 31UL
19
#elif (_MIPS_SZLONG == 64)
20
#define SZLONG_LOG 6
21
#define SZLONG_MASK 63UL
22
#endif
23
 
24
#ifndef __KERNEL__
25
#error "Don't do this, sucker ..."
26
#endif
27
 
28
#include <asm/system.h>
29
#include <asm/sgidefs.h>
30
 
31
/*
32
 * set_bit - Atomically set a bit in memory
33
 * @nr: the bit to set
34
 * @addr: the address to start counting from
35
 *
36
 * This function is atomic and may not be reordered.  See __set_bit()
37
 * if you do not require the atomic guarantees.
38
 * Note that @nr may be almost arbitrarily large; this function is not
39
 * restricted to acting on a single-word quantity.
40
 */
41
static inline void set_bit(unsigned long nr, volatile void *addr)
42
{
43
        unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
44
        unsigned long temp;
45
 
46
        __asm__ __volatile__(
47
                "1:\tlld\t%0, %1\t\t# set_bit\n\t"
48
                "or\t%0, %2\n\t"
49
                "scd\t%0, %1\n\t"
50
                "beqz\t%0, 1b"
51
                : "=&r" (temp), "=m" (*m)
52
                : "ir" (1UL << (nr & 0x3f)), "m" (*m)
53
                : "memory");
54
}
55
 
56
/*
57
 * __set_bit - Set a bit in memory
58
 * @nr: the bit to set
59
 * @addr: the address to start counting from
60
 *
61
 * Unlike set_bit(), this function is non-atomic and may be reordered.
62
 * If it's called on the same region of memory simultaneously, the effect
63
 * may be that only one operation succeeds.
64
 */
65
static inline void __set_bit(int nr, volatile void * addr)
66
{
67
        unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
68
 
69
        *m |= 1UL << (nr & 0x3f);
70
}
71
 
72
/*
73
 * clear_bit - Clears a bit in memory
74
 * @nr: Bit to clear
75
 * @addr: Address to start counting from
76
 *
77
 * clear_bit() is atomic and may not be reordered.  However, it does
78
 * not contain a memory barrier, so if it is used for locking purposes,
79
 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
80
 * in order to ensure changes are visible on other processors.
81
 */
82
static inline void clear_bit(unsigned long nr, volatile void *addr)
83
{
84
        unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
85
        unsigned long temp;
86
 
87
        __asm__ __volatile__(
88
                "1:\tlld\t%0, %1\t\t# clear_bit\n\t"
89
                "and\t%0, %2\n\t"
90
                "scd\t%0, %1\n\t"
91
                "beqz\t%0, 1b\n\t"
92
                : "=&r" (temp), "=m" (*m)
93
                : "ir" (~(1UL << (nr & 0x3f))), "m" (*m));
94
}
95
 
96
#define smp_mb__before_clear_bit()      smp_mb()
97
#define smp_mb__after_clear_bit()       smp_mb()
98
 
99
/*
100
 * change_bit - Toggle a bit in memory
101
 * @nr: Bit to change
102
 * @addr: Address to start counting from
103
 *
104
 * change_bit() is atomic and may not be reordered.
105
 * Note that @nr may be almost arbitrarily large; this function is not
106
 * restricted to acting on a single-word quantity.
107
 */
108
static inline void change_bit(unsigned long nr, volatile void *addr)
109
{
110
        unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
111
        unsigned long temp;
112
 
113
        __asm__ __volatile__(
114
                "1:\tlld\t%0, %1\t\t# change_bit\n\t"
115
                "xor\t%0, %2\n\t"
116
                "scd\t%0, %1\n\t"
117
                "beqz\t%0, 1b"
118
                :"=&r" (temp), "=m" (*m)
119
                :"ir" (1UL << (nr & 0x3f)), "m" (*m));
120
}
121
 
122
/*
123
 * __change_bit - Toggle a bit in memory
124
 * @nr: the bit to change
125
 * @addr: the address to start counting from
126
 *
127
 * Unlike change_bit(), this function is non-atomic and may be reordered.
128
 * If it's called on the same region of memory simultaneously, the effect
129
 * may be that only one operation succeeds.
130
 */
131
static inline void __change_bit(int nr, volatile void * addr)
132
{
133
        unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
134
 
135
        *m ^= 1UL << (nr & 0x3f);
136
}
137
 
138
/*
139
 * test_and_set_bit - Set a bit and return its old value
140
 * @nr: Bit to set
141
 * @addr: Address to count from
142
 *
143
 * This operation is atomic and cannot be reordered.
144
 * It also implies a memory barrier.
145
 */
146
static inline unsigned long test_and_set_bit(unsigned long nr,
147
        volatile void *addr)
148
{
149
        unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
150
        unsigned long temp, res;
151
 
152
        __asm__ __volatile__(
153
                ".set\tnoreorder\t\t# test_and_set_bit\n"
154
                "1:\tlld\t%0, %1\n\t"
155
                "or\t%2, %0, %3\n\t"
156
                "scd\t%2, %1\n\t"
157
                "beqz\t%2, 1b\n\t"
158
                " and\t%2, %0, %3\n\t"
159
#ifdef CONFIG_SMP
160
                "sync\n\t"
161
#endif
162
                ".set\treorder"
163
                : "=&r" (temp), "=m" (*m), "=&r" (res)
164
                : "r" (1UL << (nr & 0x3f)), "m" (*m)
165
                : "memory");
166
 
167
        return res != 0;
168
}
169
 
170
/*
171
 * __test_and_set_bit - Set a bit and return its old value
172
 * @nr: Bit to set
173
 * @addr: Address to count from
174
 *
175
 * This operation is non-atomic and can be reordered.
176
 * If two examples of this operation race, one can appear to succeed
177
 * but actually fail.  You must protect multiple accesses with a lock.
178
 */
179
static inline int __test_and_set_bit(int nr, volatile void *addr)
180
{
181
        unsigned long mask, retval;
182
        long *a = (unsigned long *) addr;
183
 
184
        a += (nr >> 6);
185
        mask = 1UL << (nr & 0x3f);
186
        retval = ((mask & *a) != 0);
187
        *a |= mask;
188
 
189
        return retval;
190
}
191
 
192
/*
193
 * test_and_clear_bit - Clear a bit and return its old value
194
 * @nr: Bit to clear
195
 * @addr: Address to count from
196
 *
197
 * This operation is atomic and cannot be reordered.
198
 * It also implies a memory barrier.
199
 */
200
static inline unsigned long test_and_clear_bit(unsigned long nr,
201
        volatile void *addr)
202
{
203
        unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
204
        unsigned long temp, res;
205
 
206
        __asm__ __volatile__(
207
                ".set\tnoreorder\t\t# test_and_clear_bit\n"
208
                "1:\tlld\t%0, %1\n\t"
209
                "or\t%2, %0, %3\n\t"
210
                "xor\t%2, %3\n\t"
211
                "scd\t%2, %1\n\t"
212
                "beqz\t%2, 1b\n\t"
213
                " and\t%2, %0, %3\n\t"
214
#ifdef CONFIG_SMP
215
                "sync\n\t"
216
#endif
217
                ".set\treorder"
218
                : "=&r" (temp), "=m" (*m), "=&r" (res)
219
                : "r" (1UL << (nr & 0x3f)), "m" (*m)
220
                : "memory");
221
 
222
        return res != 0;
223
}
224
 
225
/*
226
 * __test_and_clear_bit - Clear a bit and return its old value
227
 * @nr: Bit to clear
228
 * @addr: Address to count from
229
 *
230
 * This operation is non-atomic and can be reordered.
231
 * If two examples of this operation race, one can appear to succeed
232
 * but actually fail.  You must protect multiple accesses with a lock.
233
 */
234
static inline int __test_and_clear_bit(int nr, volatile void * addr)
235
{
236
        unsigned long mask, retval;
237
        unsigned long *a = (unsigned long *) addr;
238
 
239
        a += (nr >> 6);
240
        mask = 1UL << (nr & 0x3f);
241
        retval = ((mask & *a) != 0);
242
        *a &= ~mask;
243
 
244
        return retval;
245
}
246
 
247
/*
248
 * test_and_change_bit - Change a bit and return its new value
249
 * @nr: Bit to change
250
 * @addr: Address to count from
251
 *
252
 * This operation is atomic and cannot be reordered.
253
 * It also implies a memory barrier.
254
 */
255
static inline unsigned long test_and_change_bit(unsigned long nr,
256
        volatile void *addr)
257
{
258
        unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
259
        unsigned long temp, res;
260
 
261
        __asm__ __volatile__(
262
                ".set\tnoreorder\t\t# test_and_change_bit\n"
263
                "1:\tlld\t%0, %1\n\t"
264
                "xor\t%2, %0, %3\n\t"
265
                "scd\t%2, %1\n\t"
266
                "beqz\t%2, 1b\n\t"
267
                " and\t%2, %0, %3\n\t"
268
#ifdef CONFIG_SMP
269
                "sync\n\t"
270
#endif
271
                ".set\treorder"
272
                : "=&r" (temp), "=m" (*m), "=&r" (res)
273
                : "r" (1UL << (nr & 0x3f)), "m" (*m)
274
                : "memory");
275
 
276
        return res != 0;
277
}
278
 
279
/*
280
 * __test_and_change_bit - Change a bit and return its old value
281
 * @nr: Bit to change
282
 * @addr: Address to count from
283
 *
284
 * This operation is non-atomic and can be reordered.
285
 * If two examples of this operation race, one can appear to succeed
286
 * but actually fail.  You must protect multiple accesses with a lock.
287
 */
288
static inline int __test_and_change_bit(int nr, volatile void *addr)
289
{
290
        unsigned long mask, retval;
291
        unsigned long *a = (unsigned long *) addr;
292
 
293
        a += (nr >> 6);
294
        mask = 1UL << (nr & 0x3f);
295
        retval = ((mask & *a) != 0);
296
        *a ^= mask;
297
 
298
        return retval;
299
}
300
/*
301
 * test_bit - Determine whether a bit is set
302
 * @nr: bit number to test
303
 * @addr: Address to start counting from
304
 */
305
static inline int test_bit(int nr, volatile void * addr)
306
{
307
        return 1UL & (((const volatile unsigned long *) addr)[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
308
}
309
 
310
/*
311
 * ffz - find first zero in word.
312
 * @word: The word to search
313
 *
314
 * Undefined if no zero exists, so code should check against ~0UL first.
315
 */
316
static __inline__ unsigned long ffz(unsigned long word)
317
{
318
        int b = 0, s;
319
 
320
        word = ~word;
321
        s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
322
        s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
323
        s =  8; if (word << 56 != 0) s = 0; b += s; word >>= s;
324
        s =  4; if (word << 60 != 0) s = 0; b += s; word >>= s;
325
        s =  2; if (word << 62 != 0) s = 0; b += s; word >>= s;
326
        s =  1; if (word << 63 != 0) s = 0; b += s;
327
 
328
        return b;
329
}
330
 
331
/*
332
 * find_next_zero_bit - find the first zero bit in a memory region
333
 * @addr: The address to base the search on
334
 * @offset: The bitnumber to start searching at
335
 * @size: The maximum size to search
336
 */
337
static inline unsigned long find_next_zero_bit(void *addr, unsigned long size,
338
                                               unsigned long offset)
339
{
340
        unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
341
        unsigned long result = offset & ~SZLONG_MASK;
342
        unsigned long tmp;
343
 
344
        if (offset >= size)
345
                return size;
346
        size -= result;
347
        offset &= SZLONG_MASK;
348
        if (offset) {
349
                tmp = *(p++);
350
                tmp |= ~0UL >> (_MIPS_SZLONG-offset);
351
                if (size < _MIPS_SZLONG)
352
                        goto found_first;
353
                if (~tmp)
354
                        goto found_middle;
355
                size -= _MIPS_SZLONG;
356
                result += _MIPS_SZLONG;
357
        }
358
        while (size & ~SZLONG_MASK) {
359
                if (~(tmp = *(p++)))
360
                        goto found_middle;
361
                result += _MIPS_SZLONG;
362
                size -= _MIPS_SZLONG;
363
        }
364
        if (!size)
365
                return result;
366
        tmp = *p;
367
 
368
found_first:
369
        tmp |= ~0UL << size;
370
        if (tmp == ~0UL)                /* Are any bits zero? */
371
                return result + size;   /* Nope. */
372
found_middle:
373
        return result + ffz(tmp);
374
}
375
 
376
#define find_first_zero_bit(addr, size) \
377
        find_next_zero_bit((addr), (size), 0)
378
 
379
#ifdef __KERNEL__
380
 
381
/*
382
 * ffs - find first bit set
383
 * @x: the word to search
384
 *
385
 * This is defined the same way as
386
 * the libc and compiler builtin ffs routines, therefore
387
 * differs in spirit from the above ffz (man ffs).
388
 */
389
 
390
#define ffs(x) generic_ffs(x)
391
 
392
/*
393
 * hweightN - returns the hamming weight of a N-bit word
394
 * @x: the word to weigh
395
 *
396
 * The Hamming Weight of a number is the total number of bits set in it.
397
 */
398
 
399
#define hweight32(x) generic_hweight32(x)
400
#define hweight16(x) generic_hweight16(x)
401
#define hweight8(x)  generic_hweight8(x)
402
 
403
static inline int __test_and_set_le_bit(unsigned long nr, void * addr)
404
{
405
        unsigned char   *ADDR = (unsigned char *) addr;
406
        int             mask, retval;
407
 
408
        ADDR += nr >> 3;
409
        mask = 1 << (nr & 0x07);
410
        retval = (mask & *ADDR) != 0;
411
        *ADDR |= mask;
412
 
413
        return retval;
414
}
415
 
416
static inline int __test_and_clear_le_bit(unsigned long nr, void * addr)
417
{
418
        unsigned char   *ADDR = (unsigned char *) addr;
419
        int             mask, retval;
420
 
421
        ADDR += nr >> 3;
422
        mask = 1 << (nr & 0x07);
423
        retval = (mask & *ADDR) != 0;
424
        *ADDR &= ~mask;
425
 
426
        return retval;
427
}
428
 
429
static inline int test_le_bit(unsigned long nr, const void * addr)
430
{
431
        const unsigned char     *ADDR = (const unsigned char *) addr;
432
        int                     mask;
433
 
434
        ADDR += nr >> 3;
435
        mask = 1 << (nr & 0x07);
436
 
437
        return ((mask & *ADDR) != 0);
438
}
439
 
440
static inline unsigned long ext2_ffz(unsigned int word)
441
{
442
        int b = 0, s;
443
 
444
        word = ~word;
445
        s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
446
        s =  8; if (word << 24 != 0) s = 0; b += s; word >>= s;
447
        s =  4; if (word << 28 != 0) s = 0; b += s; word >>= s;
448
        s =  2; if (word << 30 != 0) s = 0; b += s; word >>= s;
449
        s =  1; if (word << 31 != 0) s = 0; b += s;
450
 
451
        return b;
452
}
453
 
454
static inline unsigned long find_next_zero_le_bit(void *addr,
455
        unsigned long size, unsigned long offset)
456
{
457
        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
458
        unsigned int result = offset & ~31;
459
        unsigned int tmp;
460
 
461
        if (offset >= size)
462
                return size;
463
 
464
        size -= result;
465
        offset &= 31;
466
        if (offset) {
467
                tmp = cpu_to_le32p(p++);
468
                tmp |= ~0U >> (32-offset); /* bug or feature ? */
469
                if (size < 32)
470
                        goto found_first;
471
                if (tmp != ~0U)
472
                        goto found_middle;
473
                size -= 32;
474
                result += 32;
475
        }
476
        while (size >= 32) {
477
                if ((tmp = cpu_to_le32p(p++)) != ~0U)
478
                        goto found_middle;
479
                result += 32;
480
                size -= 32;
481
        }
482
        if (!size)
483
                return result;
484
 
485
        tmp = cpu_to_le32p(p);
486
found_first:
487
        tmp |= ~0 << size;
488
        if (tmp == ~0U)                 /* Are any bits zero? */
489
                return result + size;   /* Nope. */
490
 
491
found_middle:
492
        return result + ext2_ffz(tmp);
493
}
494
 
495
#define find_first_zero_le_bit(addr, size) \
496
        find_next_zero_le_bit((addr), (size), 0)
497
 
498
#define ext2_set_bit                    __test_and_set_le_bit
499
#define ext2_clear_bit                  __test_and_clear_le_bit
500
#define ext2_test_bit                   test_le_bit
501
#define ext2_find_first_zero_bit        find_first_zero_le_bit
502
#define ext2_find_next_zero_bit         find_next_zero_le_bit
503
 
504
/*
505
 * Bitmap functions for the minix filesystem.
506
 *
507
 * FIXME: These assume that Minix uses the native byte/bitorder.
508
 * This limits the Minix filesystem's value for data exchange very much.
509
 */
510
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
511
#define minix_set_bit(nr,addr) set_bit(nr,addr)
512
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
513
#define minix_test_bit(nr,addr) test_bit(nr,addr)
514
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
515
 
516
#endif /* __KERNEL__ */
517
 
518
#endif /* _ASM_BITOPS_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.