OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ppc64/] [atomic.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * PowerPC64 atomic operations
3
 *
4
 * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
5
 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6
 *
7
 * This program is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU General Public License
9
 * as published by the Free Software Foundation; either version
10
 * 2 of the License, or (at your option) any later version.
11
 */
12
 
13
#ifndef _ASM_PPC64_ATOMIC_H_ 
14
#define _ASM_PPC64_ATOMIC_H_
15
 
16
#include <asm/memory.h>
17
 
18
typedef struct { volatile int counter; } atomic_t;
19
 
20
#define ATOMIC_INIT(i)  { (i) }
21
 
22
#define atomic_read(v)          ((v)->counter)
23
#define atomic_set(v,i)         (((v)->counter) = (i))
24
 
25
static __inline__ void atomic_add(int a, atomic_t *v)
26
{
27
        int t;
28
 
29
        __asm__ __volatile__(
30
"1:     lwarx   %0,0,%3         # atomic_add\n\
31
        add     %0,%2,%0\n\
32
        stwcx.  %0,0,%3\n\
33
        bne-    1b"
34
        : "=&r" (t), "=m" (v->counter)
35
        : "r" (a), "r" (&v->counter), "m" (v->counter)
36
        : "cc");
37
}
38
 
39
static __inline__ int atomic_add_return(int a, atomic_t *v)
40
{
41
        int t;
42
 
43
        __asm__ __volatile__(
44
"1:     lwarx   %0,0,%2         # atomic_add_return\n\
45
        add     %0,%1,%0\n\
46
        stwcx.  %0,0,%2\n\
47
        bne-    1b"
48
        ISYNC_ON_SMP
49
        : "=&r" (t)
50
        : "r" (a), "r" (&v->counter)
51
        : "cc", "memory");
52
 
53
        return t;
54
}
55
 
56
static __inline__ void atomic_sub(int a, atomic_t *v)
57
{
58
        int t;
59
 
60
        __asm__ __volatile__(
61
"1:     lwarx   %0,0,%3         # atomic_sub\n\
62
        subf    %0,%2,%0\n\
63
        stwcx.  %0,0,%3\n\
64
        bne-    1b"
65
        : "=&r" (t), "=m" (v->counter)
66
        : "r" (a), "r" (&v->counter), "m" (v->counter)
67
        : "cc");
68
}
69
 
70
static __inline__ int atomic_sub_return(int a, atomic_t *v)
71
{
72
        int t;
73
 
74
        __asm__ __volatile__(
75
"1:     lwarx   %0,0,%2         # atomic_sub_return\n\
76
        subf    %0,%1,%0\n\
77
        stwcx.  %0,0,%2\n\
78
        bne-    1b"
79
        ISYNC_ON_SMP
80
        : "=&r" (t)
81
        : "r" (a), "r" (&v->counter)
82
        : "cc", "memory");
83
 
84
        return t;
85
}
86
 
87
static __inline__ void atomic_inc(atomic_t *v)
88
{
89
        int t;
90
 
91
        __asm__ __volatile__(
92
"1:     lwarx   %0,0,%2         # atomic_inc\n\
93
        addic   %0,%0,1\n\
94
        stwcx.  %0,0,%2\n\
95
        bne-    1b"
96
        : "=&r" (t), "=m" (v->counter)
97
        : "r" (&v->counter), "m" (v->counter)
98
        : "cc");
99
}
100
 
101
static __inline__ int atomic_inc_return(atomic_t *v)
102
{
103
        int t;
104
 
105
        __asm__ __volatile__(
106
"1:     lwarx   %0,0,%1         # atomic_inc_return\n\
107
        addic   %0,%0,1\n\
108
        stwcx.  %0,0,%1\n\
109
        bne-    1b"
110
        ISYNC_ON_SMP
111
        : "=&r" (t)
112
        : "r" (&v->counter)
113
        : "cc", "memory");
114
 
115
        return t;
116
}
117
 
118
static __inline__ void atomic_dec(atomic_t *v)
119
{
120
        int t;
121
 
122
        __asm__ __volatile__(
123
"1:     lwarx   %0,0,%2         # atomic_dec\n\
124
        addic   %0,%0,-1\n\
125
        stwcx.  %0,0,%2\n\
126
        bne-    1b"
127
        : "=&r" (t), "=m" (v->counter)
128
        : "r" (&v->counter), "m" (v->counter)
129
        : "cc");
130
}
131
 
132
static __inline__ int atomic_dec_return(atomic_t *v)
133
{
134
        int t;
135
 
136
        __asm__ __volatile__(
137
"1:     lwarx   %0,0,%1         # atomic_dec_return\n\
138
        addic   %0,%0,-1\n\
139
        stwcx.  %0,0,%1\n\
140
        bne-    1b"
141
        ISYNC_ON_SMP
142
        : "=&r" (t)
143
        : "r" (&v->counter)
144
        : "cc", "memory");
145
 
146
        return t;
147
}
148
 
149
#define atomic_sub_and_test(a, v)       (atomic_sub_return((a), (v)) == 0)
150
#define atomic_dec_and_test(v)          (atomic_dec_return((v)) == 0)
151
 
152
/*
153
 * Atomically test *v and decrement if it is greater than 0.
154
 * The function returns the old value of *v minus 1.
155
 */
156
static __inline__ int atomic_dec_if_positive(atomic_t *v)
157
{
158
        int t;
159
 
160
        __asm__ __volatile__(
161
"1:     lwarx   %0,0,%1         # atomic_dec_if_positive\n\
162
        addic.  %0,%0,-1\n\
163
        blt-    2f\n\
164
        stwcx.  %0,0,%1\n\
165
        bne-    1b"
166
        ISYNC_ON_SMP
167
        "\n\
168
2:"     : "=&r" (t)
169
        : "r" (&v->counter)
170
        : "cc", "memory");
171
 
172
        return t;
173
}
174
 
175
#define smp_mb__before_atomic_dec()     smp_mb()
176
#define smp_mb__after_atomic_dec()      smp_mb()
177
#define smp_mb__before_atomic_inc()     smp_mb()
178
#define smp_mb__after_atomic_inc()      smp_mb()
179
 
180
#endif /* _ASM_PPC64_ATOMIC_H_ */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.