OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ia64/] [spinlock.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _ASM_IA64_SPINLOCK_H
2
#define _ASM_IA64_SPINLOCK_H
3
 
4
/*
5
 * Copyright (C) 1998-2001 Hewlett-Packard Co
6
 *      David Mosberger-Tang <davidm@hpl.hp.com>
7
 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8
 *
9
 * This file is used for SMP configurations only.
10
 */
11
 
12
#include <linux/kernel.h>
13
 
14
#include <asm/system.h>
15
#include <asm/bitops.h>
16
#include <asm/atomic.h>
17
 
18
#undef NEW_LOCK
19
 
20
#ifdef NEW_LOCK
21
 
22
typedef struct {
23
        volatile unsigned int lock;
24
} spinlock_t;
25
 
26
#define SPIN_LOCK_UNLOCKED                      (spinlock_t) { 0 }
27
#define spin_lock_init(x)                       ((x)->lock = 0)
28
 
29
/*
30
 * Streamlined test_and_set_bit(0, (x)).  We use test-and-test-and-set
31
 * rather than a simple xchg to avoid writing the cache-line when
32
 * there is contention.
33
 */
34
#define spin_lock(x)                                                                    \
35
{                                                                                       \
36
        register char *addr __asm__ ("r31") = (char *) &(x)->lock;                      \
37
                                                                                        \
38
        __asm__ __volatile__ (                                                          \
39
                "mov r30=1\n"                                                           \
40
                "mov ar.ccv=r0\n"                                                       \
41
                ";;\n"                                                                  \
42
                "cmpxchg4.acq r30=[%0],r30,ar.ccv\n"                                    \
43
                ";;\n"                                                                  \
44
                "cmp.ne p15,p0=r30,r0\n"                                                \
45
                "(p15) br.call.spnt.few b7=ia64_spinlock_contention\n"                  \
46
                ";;\n"                                                                  \
47
                "1:\n"                          /* force a new bundle */                \
48
                :: "r"(addr)                                                            \
49
                : "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory");      \
50
}
51
 
52
#define spin_trylock(x)                                                                 \
53
({                                                                                      \
54
        register long result;                                                           \
55
                                                                                        \
56
        __asm__ __volatile__ (                                                          \
57
                "mov ar.ccv=r0\n"                                                       \
58
                ";;\n"                                                                  \
59
                "cmpxchg4.acq %0=[%2],%1,ar.ccv\n"                                      \
60
                : "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory");         \
61
        (result == 0);                                                                   \
62
})
63
 
64
#define spin_is_locked(x)       ((x)->lock != 0)
65
#define spin_unlock(x)          do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
66
#define spin_unlock_wait(x)     do { barrier(); } while ((x)->lock)
67
 
68
#else /* !NEW_LOCK */
69
 
70
typedef struct {
71
        volatile unsigned int lock;
72
} spinlock_t;
73
 
74
#define SPIN_LOCK_UNLOCKED                      (spinlock_t) { 0 }
75
#define spin_lock_init(x)                       ((x)->lock = 0)
76
 
77
#ifdef GAS_HAS_HINT_INSN
78
#define HINT_PAUSE      ";; (p7) hint @pause\n"
79
#else
80
#define HINT_PAUSE
81
#endif                                                                          
82
 
83
/*
84
 * Streamlined test_and_set_bit(0, (x)).  We use test-and-test-and-set
85
 * rather than a simple xchg to avoid writing the cache-line when
86
 * there is contention.
87
 */
88
#define spin_lock(x) __asm__ __volatile__ (                     \
89
        "mov ar.ccv = r0\n"                                     \
90
        "mov r29 = 1\n"                                         \
91
        ";;\n"                                                  \
92
        "1:\n"                                                  \
93
        "ld4 r2 = [%0]\n"                                       \
94
        ";;\n"                                                  \
95
        "cmp4.eq p0,p7 = r0,r2\n"                               \
96
        HINT_PAUSE                                              \
97
        "(p7) br.cond.spnt.few 1b \n"                           \
98
        "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n"                 \
99
        ";;\n"                                                  \
100
        "cmp4.eq p0,p7 = r0, r2\n"                              \
101
        "(p7) br.cond.spnt.few 1b\n"                            \
102
        ";;\n"                                                  \
103
        :: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
104
 
105
#define spin_is_locked(x)       ((x)->lock != 0)
106
#define spin_unlock(x)          do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
107
#define spin_trylock(x)         (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
108
#define spin_unlock_wait(x)     do { barrier(); } while ((x)->lock)
109
 
110
#endif /* !NEW_LOCK */
111
 
112
typedef struct {
113
        volatile int read_counter:31;
114
        volatile int write_lock:1;
115
} rwlock_t;
116
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
117
 
118
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
119
 
120
#define read_lock(rw)                                                           \
121
do {                                                                            \
122
        int tmp = 0;                                                             \
123
        __asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n"                \
124
                              ";;\n"                                            \
125
                              "tbit.nz p7,p0 = %0, 31\n"                        \
126
                              "(p7) br.cond.sptk.few 2f\n"                      \
127
                              ".section .text.lock,\"ax\"\n"                    \
128
                              "2:\tfetchadd4.rel %0 = [%1], -1\n"               \
129
                              ";;\n"                                            \
130
                              "3:\tld4.acq %0 = [%1]\n"                         \
131
                              ";;\n"                                            \
132
                              "tbit.nz p7,p0 = %0, 31\n"                        \
133
                              HINT_PAUSE                                        \
134
                              "(p7) br.cond.sptk.few 3b\n"                      \
135
                              "br.cond.sptk.few 1b\n"                           \
136
                              ";;\n"                                            \
137
                              ".previous\n"                                     \
138
                              : "=&r" (tmp)                                     \
139
                              : "r" (rw) : "p7", "memory");                     \
140
} while(0)
141
 
142
#define read_unlock(rw)                                                         \
143
do {                                                                            \
144
        int tmp = 0;                                                             \
145
        __asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n"                   \
146
                              : "=r" (tmp)                                      \
147
                              : "r" (rw)                                        \
148
                              : "memory");                                      \
149
} while(0)
150
 
151
#define write_lock(rw)                                                          \
152
do {                                                                            \
153
        __asm__ __volatile__ (                                                  \
154
                "mov ar.ccv = r0\n"                                             \
155
                "dep r29 = -1, r0, 31, 1\n"                                     \
156
                ";;\n"                                                          \
157
                "1:\n"                                                          \
158
                "ld4 r2 = [%0]\n"                                               \
159
                ";;\n"                                                          \
160
                "cmp4.eq p0,p7 = r0,r2\n"                                       \
161
                HINT_PAUSE                                                      \
162
                "(p7) br.cond.spnt.few 1b \n"                                   \
163
                "cmpxchg4.acq r2 = [%0], r29, ar.ccv\n"                         \
164
                ";;\n"                                                          \
165
                "cmp4.eq p0,p7 = r0, r2\n"                                      \
166
                "(p7) br.cond.spnt.few 1b\n"                                    \
167
                ";;\n"                                                          \
168
                :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory");            \
169
} while(0)
170
 
171
#define write_unlock(x)                                                                 \
172
({                                                                                      \
173
        smp_mb__before_clear_bit();     /* need barrier before releasing lock... */     \
174
        clear_bit(31, (x));                                                             \
175
})
176
 
177
#endif /*  _ASM_IA64_SPINLOCK_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.