OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-sparc/] [atomic.h] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* atomic.h: These still suck, but the I-cache hit rate is higher.
2
 *
3
 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4
 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5
 */
6
 
7
#ifndef __ARCH_SPARC_ATOMIC__
8
#define __ARCH_SPARC_ATOMIC__
9
 
10
#include <linux/config.h>
11
 
12
typedef struct { volatile int counter; } atomic_t;
13
 
14
#ifdef __KERNEL__
15
#ifndef CONFIG_SMP
16
 
17
#define ATOMIC_INIT(i)  { (i) }
18
#define atomic_read(v)          ((v)->counter)
19
#define atomic_set(v, i)        (((v)->counter) = i)
20
 
21
#else
22
/* We do the bulk of the actual work out of line in two common
23
 * routines in assembler, see arch/sparc/lib/atomic.S for the
24
 * "fun" details.
25
 *
26
 * For SMP the trick is you embed the spin lock byte within
27
 * the word, use the low byte so signedness is easily retained
28
 * via a quick arithmetic shift.  It looks like this:
29
 *
30
 *      ----------------------------------------
31
 *      | signed 24-bit counter value |  lock  |  atomic_t
32
 *      ----------------------------------------
33
 *       31                          8 7      0
34
 */
35
 
36
#define ATOMIC_INIT(i)  { (i << 8) }
37
 
38
static __inline__ int atomic_read(atomic_t *v)
39
{
40
        int ret = v->counter;
41
 
42
        while(ret & 0xff)
43
                ret = v->counter;
44
 
45
        return ret >> 8;
46
}
47
 
48
#define atomic_set(v, i)        (((v)->counter) = ((i) << 8))
49
#endif
50
 
51
static inline int __atomic_add(int i, atomic_t *v)
52
{
53
        register volatile int *ptr asm("g1");
54
        register int increment asm("g2");
55
        register int tmp1 asm("g3");
56
        register int tmp2 asm("g4");
57
        register int tmp3 asm("g7");
58
 
59
        ptr = &v->counter;
60
        increment = i;
61
 
62
        __asm__ __volatile__(
63
        "mov    %%o7, %%g4\n\t"
64
        "call   ___atomic_add\n\t"
65
        " add   %%o7, 8, %%o7\n"
66
        : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
67
        : "0" (increment), "r" (ptr)
68
        : "memory", "cc");
69
 
70
        return increment;
71
}
72
 
73
static inline int __atomic_sub(int i, atomic_t *v)
74
{
75
        register volatile int *ptr asm("g1");
76
        register int increment asm("g2");
77
        register int tmp1 asm("g3");
78
        register int tmp2 asm("g4");
79
        register int tmp3 asm("g7");
80
 
81
        ptr = &v->counter;
82
        increment = i;
83
 
84
        __asm__ __volatile__(
85
        "mov    %%o7, %%g4\n\t"
86
        "call   ___atomic_sub\n\t"
87
        " add   %%o7, 8, %%o7\n"
88
        : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
89
        : "0" (increment), "r" (ptr)
90
        : "memory", "cc");
91
 
92
        return increment;
93
}
94
 
95
#define atomic_add(i, v) ((void)__atomic_add((i), (v)))
96
#define atomic_sub(i, v) ((void)__atomic_sub((i), (v)))
97
 
98
#define atomic_dec_return(v) __atomic_sub(1, (v))
99
#define atomic_inc_return(v) __atomic_add(1, (v))
100
 
101
#define atomic_sub_and_test(i, v) (__atomic_sub((i), (v)) == 0)
102
#define atomic_dec_and_test(v) (__atomic_sub(1, (v)) == 0)
103
 
104
#define atomic_inc(v) ((void)__atomic_add(1, (v)))
105
#define atomic_dec(v) ((void)__atomic_sub(1, (v)))
106
 
107
#define atomic_add_negative(i, v) (__atomic_add((i), (v)) < 0)
108
 
109
/* Atomic operations are already serializing */
110
#define smp_mb__before_atomic_dec()     barrier()
111
#define smp_mb__after_atomic_dec()      barrier()
112
#define smp_mb__before_atomic_inc()     barrier()
113
#define smp_mb__after_atomic_inc()      barrier()
114
 
115
#endif /* !(__KERNEL__) */
116
 
117
#endif /* !(__ARCH_SPARC_ATOMIC__) */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.