OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [include/] [asm-or32/] [bitops.h] - Blame information for rev 7

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 7 xianfeng
/* asm/bitops.h for Linux/or32
2
 *
3
 * __PHX__ TODO: asm versions
4
 *
5
 */
6
 
7
#ifdef __KERNEL__
8
#ifndef _OR32_BITOPS_H
9
#define _OR32_BITOPS_H
10
 
11
#include <asm/system.h>
12
#include <asm/byteorder.h>
13
#include <linux/compiler.h>
14
 
15
 
16
static __inline__ int set_bit(int nr, volatile void * a)
17
{
18
        int     * addr = (void *)a;
19
        int     mask, retval;
20
        unsigned long flags;
21
 
22
        addr += nr >> 5;
23
        mask = 1 << (nr & 0x1f);
24
        local_irq_save(flags);
25
        retval = (mask & *addr) != 0;
26
        *addr |= mask;
27
        local_irq_restore(flags);
28
        return retval;
29
}
30
 
31
/*
32
 * non-atomic version
33
 */
34
static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
35
{
36
        unsigned long mask = 1 << (nr & 0x1f);
37
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
38
 
39
        *p |= mask;
40
}
41
 
42
static __inline__ int clear_bit(int nr, volatile void * a)
43
{
44
        int     * addr = (void *)a;
45
        int     mask, retval;
46
        unsigned long flags;
47
 
48
        addr += nr >> 5;
49
        mask = 1 << (nr & 0x1f);
50
        local_irq_save(flags);
51
        retval = (mask & *addr) != 0;
52
        *addr &= ~mask;
53
        local_irq_restore(flags);
54
        return retval;
55
}
56
 
57
/*
58
 * non-atomic version
59
 */
60
static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
61
{
62
        unsigned long mask = 1 << (nr & 0x1f);
63
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
64
 
65
        *p &= ~mask;
66
}
67
 
68
static __inline__ unsigned long change_bit(unsigned long nr,  void *addr)
69
{
70
        int mask;
71
  unsigned long flags;
72
        unsigned long *ADDR = (unsigned long *) addr;
73
        unsigned long oldbit;
74
 
75
        ADDR += nr >> 5;
76
        mask = 1 << (nr & 31);
77
        local_irq_save(flags);
78
        oldbit = (mask & *ADDR);
79
        *ADDR ^= mask;
80
        local_irq_restore(flags);
81
        return oldbit != 0;
82
}
83
 
84
/*
85
 * non-atomic version
86
 */
87
static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
88
{
89
        unsigned long mask = 1 << (nr & 0x1f);
90
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
91
 
92
        *p ^= mask;
93
}
94
 
95
static __inline__ int test_bit(int nr, const void *a)
96
{
97
        unsigned int mask;
98
        unsigned int *adr = (unsigned int *)a;
99
 
100
        adr += nr >> 5;
101
        mask = 1 << (nr & 0x1f);
102
        return ((mask & *adr) != 0);
103
}
104
 
105
static __inline__ int test_and_set_bit(int nr, void *addr)
106
{
107
        unsigned int mask, retval;
108
        unsigned long flags;
109
        unsigned int *adr = (unsigned int *)addr;
110
 
111
        adr += nr >> 5;
112
        mask = 1 << (nr & 0x1f);
113
        local_irq_save(flags);
114
        retval = (mask & *adr) != 0;
115
        *adr |= mask;
116
        local_irq_restore(flags);
117
        return retval;
118
}
119
 
120
/*
121
 * non-atomic version
122
 */
123
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
124
{
125
        unsigned long mask = 1 << (nr & 0x1f);
126
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
127
        unsigned long old = *p;
128
 
129
        *p = old | mask;
130
        return (old & mask) != 0;
131
}
132
 
133
 
134
static __inline__ int test_and_clear_bit(int nr, void *addr)
135
{
136
        unsigned int mask, retval;
137
        unsigned long flags;
138
        unsigned int *adr = (unsigned int *)addr;
139
 
140
        adr += nr >> 5;
141
        mask = 1 << (nr & 0x1f);
142
        local_irq_save(flags);
143
        retval = (mask & *adr) != 0;
144
        *adr &= ~mask;
145
        local_irq_restore(flags);
146
        return retval;
147
}
148
 
149
/*
150
 * non-atomic version
151
 */
152
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
153
{
154
        unsigned long mask = 1 << (nr & 0x1f);
155
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
156
        unsigned long old = *p;
157
 
158
        *p = old & ~mask;
159
        return (old & mask) != 0;
160
}
161
 
162
static __inline__ int test_and_change_bit(int nr, void *addr)
163
{
164
        unsigned int mask, retval;
165
        unsigned long flags;
166
        unsigned int *adr = (unsigned int *)addr;
167
        adr += nr >> 5;
168
        mask = 1 << (nr & 0x1f);
169
        local_irq_save(flags);
170
        retval = (mask & *adr) != 0;
171
        *adr ^= mask;
172
        local_irq_restore(flags);
173
        return retval;
174
}
175
 
176
/*
177
 * non-atomic version
178
 */
179
static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
180
{
181
        unsigned long mask = 1 << (nr & 0x1f);
182
        unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
183
        unsigned long old = *p;
184
 
185
        *p = old ^ mask;
186
        return (old & mask) != 0;
187
}
188
 
189
#define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr)
190
 
191
/*
192
 * Find-bit routines..
193
 */
194
 
195
/*
196
 * fls: find last bit set.
197
 */
198
 
199
#define fls(x) generic_fls(x)
200
 
201
/* The easy/cheese version for now. */
202
static __inline__ unsigned long ffz(unsigned long word)
203
{
204
        unsigned long result = 0;
205
 
206
        while(word & 1) {
207
                result++;
208
                word >>= 1;
209
        }
210
        return result;
211
}
212
 
213
/*
214
 * ffs: find first bit set. This is defined the same way as
215
 * the libc and compiler builtin ffs routines, therefore
216
 * differs in spirit from the above ffz (man ffs).
217
 */
218
 
219
#define ffs(x) generic_ffs(x)
220
 
221
/*
222
 * hweightN - returns the hamming weight of a N-bit word
223
 * @x: the word to weigh
224
 *
225
 * The Hamming Weight of a number is the total number of bits set in it.
226
 */
227
#if 0 /*RGD*/
228
#define hweight32(x) generic_hweight32(x)
229
#define hweight16(x) generic_hweight16(x)
230
#define hweight8(x) generic_hweight8(x)
231
#endif
232
/**
233
 * __ffs - find first bit in word.
234
 * @word: The word to search
235
 *
236
 * Undefined if no bit exists, so code should check against 0 first.
237
 */
238
static __inline__ int __ffs(unsigned long word)
239
{
240
        int num = 0;
241
 
242
        if ((word & 0xffff) == 0) {
243
                num += 16;
244
                word >>= 16;
245
        }
246
        if ((word & 0xff) == 0) {
247
                num += 8;
248
                word >>= 8;
249
        }
250
        if ((word & 0xf) == 0) {
251
                num += 4;
252
                word >>= 4;
253
        }
254
        if ((word & 0x3) == 0) {
255
                num += 2;
256
                word >>= 2;
257
        }
258
        if ((word & 0x1) == 0)
259
                num += 1;
260
        return num;
261
}
262
 
263
/*
264
 * Every architecture must define this function. It's the fastest
265
 * way of searching a 140-bit bitmap where the first 100 bits are
266
 * unlikely to be set. It's guaranteed that at least one of the 140
267
 * bits is cleared.
268
 */
269
static __inline__ int sched_find_first_bit(unsigned long *b)
270
{
271
 
272
        if (unlikely(b[0]))
273
                return __ffs(b[0]);
274
        if (unlikely(b[1]))
275
                return __ffs(b[1]) + 32;
276
        if (unlikely(b[2]))
277
                return __ffs(b[2]) + 64;
278
        if (b[3])
279
                return __ffs(b[3]) + 96;
280
        return __ffs(b[4]) + 128;
281
}
282
 
283
/**
284
 *  * find_next_bit - find the next set bit in a memory region
285
 *  * @addr: The address to base the search on
286
 *  * @offset: The bitnumber to start searching at
287
 *  * @size: The maximum size to search
288
 *  */
289
static __inline__ unsigned long find_next_bit(const unsigned long *addr,
290
                                                      unsigned long size, unsigned long offset)
291
{
292
        unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
293
        unsigned int result = offset & ~31UL;
294
        unsigned int tmp;
295
 
296
        if (offset >= size)
297
          return size;
298
        size -= result;
299
        offset &= 31UL;
300
        if (offset) {
301
                tmp = *p++;
302
                tmp &= ~0UL << offset;
303
                if (size < 32)
304
                  goto found_first;
305
                if (tmp)
306
                  goto found_middle;
307
                size -= 32;
308
                result += 32;
309
        }
310
        while (size >= 32) {
311
                if ((tmp = *p++) != 0)
312
                  goto found_middle;
313
                result += 32;
314
                size -= 32;
315
        }
316
        if (!size)
317
          return result;
318
        tmp = *p;
319
 
320
        found_first:
321
        tmp &= ~0UL >> (32 - size);
322
        if (tmp == 0UL)        /* Are any bits set? */
323
          return result + size; /* Nope. */
324
        found_middle:
325
        return result + __ffs(tmp);
326
}
327
 
328
 
329
/* find_next_zero_bit() finds the first zero bit in a bit string of length
330
 * 'size' bits, starting the search at bit 'offset'. This is largely based
331
 * on Linus's ALPHA routines, which are pretty portable BTW.
332
 */
333
 
334
static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr,
335
                                                   unsigned long size,
336
                                                   unsigned long offset)
337
{
338
        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
339
        unsigned long result = offset & ~31UL;
340
        unsigned long tmp;
341
 
342
        if (offset >= size)
343
                return size;
344
        size -= result;
345
        offset &= 31UL;
346
        if (offset) {
347
                tmp = *(p++);
348
                tmp |= ~0UL >> (32-offset);
349
                if (size < 32)
350
                        goto found_first;
351
                if (~tmp)
352
                        goto found_middle;
353
                size -= 32;
354
                result += 32;
355
        }
356
        while (size & ~31UL) {
357
                if (~(tmp = *(p++)))
358
                        goto found_middle;
359
                result += 32;
360
                size -= 32;
361
        }
362
        if (!size)
363
                return result;
364
        tmp = *p;
365
 
366
found_first:
367
        tmp |= ~0UL >> size;
368
found_middle:
369
        return result + ffz(tmp);
370
}
371
 
372
/**
373
 * find_first_bit - find the first set bit in a memory region
374
 * @addr: The address to start the search at
375
 * @size: The maximum size to search
376
 *
377
 * Returns the bit-number of the first set bit, not the number of the byte
378
 * containing a bit.
379
 */
380
#define find_first_bit(addr, size) \
381
        find_next_bit((addr), (size), 0)
382
 
383
 
384
/* Linus sez that gcc can optimize the following correctly, we'll see if this
385
 * holds on the Sparc as it does for the ALPHA.
386
 */
387
 
388
#define find_first_zero_bit(addr, size) \
389
        find_next_zero_bit((addr), (size), 0)
390
 
391
/* Now for the ext2 filesystem bit operations and helper routines. */
392
 
393
static __inline__ int ext2_set_bit(int nr,void * addr)
394
{
395
        int             mask, retval;
396
  unsigned long flags;
397
        unsigned char   *ADDR = (unsigned char *) addr;
398
 
399
        ADDR += nr >> 3;
400
        mask = 1 << (nr & 0x07);
401
        local_irq_save(flags);
402
        retval = (mask & *ADDR) != 0;
403
        *ADDR |= mask;
404
        local_irq_restore(flags);
405
        return retval;
406
}
407
 
408
static __inline__ int ext2_clear_bit(int nr, void * addr)
409
{
410
        int             mask, retval;
411
  unsigned long flags;
412
        unsigned char   *ADDR = (unsigned char *) addr;
413
 
414
        ADDR += nr >> 3;
415
        mask = 1 << (nr & 0x07);
416
        local_irq_save(flags);
417
        retval = (mask & *ADDR) != 0;
418
        *ADDR &= ~mask;
419
        local_irq_restore(flags);
420
        return retval;
421
}
422
 
423
static __inline__ int ext2_test_bit(int nr, const void * addr)
424
{
425
        int                     mask;
426
        const unsigned char     *ADDR = (const unsigned char *) addr;
427
 
428
        ADDR += nr >> 3;
429
        mask = 1 << (nr & 0x07);
430
        return ((mask & *ADDR) != 0);
431
}
432
 
433
#define ext2_find_first_zero_bit(addr, size) \
434
        ext2_find_next_zero_bit((addr), (size), 0)
435
 
436
static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
437
{
438
        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
439
        unsigned long result = offset & ~31UL;
440
        unsigned long tmp;
441
 
442
        if (offset >= size)
443
                return size;
444
        size -= result;
445
        offset &= 31UL;
446
        if(offset) {
447
                tmp = *(p++);
448
                tmp |= ~0UL << (32-offset);
449
                if(size < 32)
450
                        goto found_first;
451
                if(~tmp)
452
                        goto found_middle;
453
                size -= 32;
454
                result += 32;
455
        }
456
        while(size & ~31UL) {
457
                if(~(tmp = *(p++)))
458
                        goto found_middle;
459
                result += 32;
460
                size -= 32;
461
        }
462
        if(!size)
463
                return result;
464
        tmp = *p;
465
 
466
found_first:
467
        tmp |= ~0UL << size;
468
found_middle:
469
        tmp = ((tmp>>24) | ((tmp>>8)&0xff00) | ((tmp<<8)&0xff0000) | (tmp<<24));
470
        return result + ffz(tmp);
471
}
472
 
473
#define __ext2_set_bit ext2_set_bit
474
#define __ext2_clear_bit ext2_clear_bit
475
 
476
static __inline__ int __ext2_test_bit(int nr, __const__ void * addr)
477
{
478
        int                     mask;
479
        __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
480
 
481
        ADDR += nr >> 3;
482
        mask = 1 << (nr & 0x07);
483
        return ((mask & *ADDR) != 0);
484
}
485
 
486
#define __ext2_find_first_zero_bit(addr, size) \
487
        __ext2_find_next_zero_bit((addr), (size), 0)
488
 
489
static __inline__ unsigned long __ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
490
{
491
        unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
492
        unsigned long result = offset & ~31UL;
493
        unsigned long tmp;
494
 
495
        if (offset >= size)
496
                return size;
497
        size -= result;
498
        offset &= 31UL;
499
        if(offset) {
500
                tmp = *(p++);
501
                tmp |= __swab32(~0UL >> (32-offset));
502
                if(size < 32)
503
                        goto found_first;
504
                if(~tmp)
505
                        goto found_middle;
506
                size -= 32;
507
                result += 32;
508
        }
509
        while(size & ~31UL) {
510
                if(~(tmp = *(p++)))
511
                        goto found_middle;
512
                result += 32;
513
                size -= 32;
514
        }
515
        if(!size)
516
                return result;
517
        tmp = *p;
518
 
519
found_first:
520
        return result + ffz(__swab32(tmp) | (~0UL << size));
521
found_middle:
522
        return result + ffz(__swab32(tmp));
523
}
524
 
525
#define ext2_set_bit_atomic(lock, nr, addr)             \
526
        ({                                              \
527
                int ret;                                \
528
                spin_lock(lock);                        \
529
                ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
530
                spin_unlock(lock);                      \
531
                ret;                                    \
532
        })
533
 
534
#define ext2_clear_bit_atomic(lock, nr, addr)           \
535
        ({                                              \
536
                int ret;                                \
537
                spin_lock(lock);                        \
538
                ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
539
                spin_unlock(lock);                      \
540
                ret;                                    \
541
        })
542
 
543
/*
544
 * clear_bit() doesn't provide any barrier for the compiler.
545
 */
546
 
547
#define smp_mb__before_clear_bit()      barrier()
548
#define smp_mb__after_clear_bit()       barrier()
549
#include <asm-generic/bitops/ffs.h>
550
#include <asm-generic/bitops/fls.h>
551
#include <asm-generic/bitops/hweight.h>
552
#include <asm-generic/bitops/fls64.h>
553
#include <asm-generic/bitops/lock.h>
554
#endif /* _OR32_BITOPS_H */
555
#endif /* __KERNEL__ */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.