OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [include/] [asm-s390/] [spinlock.h] - Blame information for rev 24

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  include/asm-s390/spinlock.h
3
 *
4
 *  S390 version
5
 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7
 *
8
 *  Derived from "include/asm-i386/spinlock.h"
9
 */
10
 
11
#ifndef __ASM_SPINLOCK_H
12
#define __ASM_SPINLOCK_H
13
 
14
#include <linux/smp.h>
15
 
16
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
17
 
18
static inline int
19
_raw_compare_and_swap(volatile unsigned int *lock,
20
                      unsigned int old, unsigned int new)
21
{
22
        asm volatile(
23
                "       cs      %0,%3,%1"
24
                : "=d" (old), "=Q" (*lock)
25
                : "0" (old), "d" (new), "Q" (*lock)
26
                : "cc", "memory" );
27
        return old;
28
}
29
 
30
#else /* __GNUC__ */
31
 
32
static inline int
33
_raw_compare_and_swap(volatile unsigned int *lock,
34
                      unsigned int old, unsigned int new)
35
{
36
        asm volatile(
37
                "       cs      %0,%3,0(%4)"
38
                : "=d" (old), "=m" (*lock)
39
                : "0" (old), "d" (new), "a" (lock), "m" (*lock)
40
                : "cc", "memory" );
41
        return old;
42
}
43
 
44
#endif /* __GNUC__ */
45
 
46
/*
47
 * Simple spin lock operations.  There are two variants, one clears IRQ's
48
 * on the local processor, one does not.
49
 *
50
 * We make no fairness assumptions. They have a cost.
51
 *
52
 * (the type definitions are in asm/spinlock_types.h)
53
 */
54
 
55
#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
56
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
57
#define __raw_spin_unlock_wait(lock) \
58
        do { while (__raw_spin_is_locked(lock)) \
59
                 _raw_spin_relax(lock); } while (0)
60
 
61
extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc);
62
extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc);
63
extern void _raw_spin_relax(raw_spinlock_t *lock);
64
 
65
static inline void __raw_spin_lock(raw_spinlock_t *lp)
66
{
67
        unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
68
        int old;
69
 
70
        old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
71
        if (likely(old == 0)) {
72
                lp->owner_pc = pc;
73
                return;
74
        }
75
        _raw_spin_lock_wait(lp, pc);
76
}
77
 
78
static inline int __raw_spin_trylock(raw_spinlock_t *lp)
79
{
80
        unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
81
        int old;
82
 
83
        old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
84
        if (likely(old == 0)) {
85
                lp->owner_pc = pc;
86
                return 1;
87
        }
88
        return _raw_spin_trylock_retry(lp, pc);
89
}
90
 
91
static inline void __raw_spin_unlock(raw_spinlock_t *lp)
92
{
93
        lp->owner_pc = 0;
94
        _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
95
}
96
 
97
/*
98
 * Read-write spinlocks, allowing multiple readers
99
 * but only one writer.
100
 *
101
 * NOTE! it is quite common to have readers in interrupts
102
 * but no interrupt writers. For those circumstances we
103
 * can "mix" irq-safe locks - any writer needs to get a
104
 * irq-safe write-lock, but readers can get non-irqsafe
105
 * read-locks.
106
 */
107
 
108
/**
109
 * read_can_lock - would read_trylock() succeed?
110
 * @lock: the rwlock in question.
111
 */
112
#define __raw_read_can_lock(x) ((int)(x)->lock >= 0)
113
 
114
/**
115
 * write_can_lock - would write_trylock() succeed?
116
 * @lock: the rwlock in question.
117
 */
118
#define __raw_write_can_lock(x) ((x)->lock == 0)
119
 
120
extern void _raw_read_lock_wait(raw_rwlock_t *lp);
121
extern int _raw_read_trylock_retry(raw_rwlock_t *lp);
122
extern void _raw_write_lock_wait(raw_rwlock_t *lp);
123
extern int _raw_write_trylock_retry(raw_rwlock_t *lp);
124
 
125
static inline void __raw_read_lock(raw_rwlock_t *rw)
126
{
127
        unsigned int old;
128
        old = rw->lock & 0x7fffffffU;
129
        if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
130
                _raw_read_lock_wait(rw);
131
}
132
 
133
static inline void __raw_read_unlock(raw_rwlock_t *rw)
134
{
135
        unsigned int old, cmp;
136
 
137
        old = rw->lock;
138
        do {
139
                cmp = old;
140
                old = _raw_compare_and_swap(&rw->lock, old, old - 1);
141
        } while (cmp != old);
142
}
143
 
144
static inline void __raw_write_lock(raw_rwlock_t *rw)
145
{
146
        if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
147
                _raw_write_lock_wait(rw);
148
}
149
 
150
static inline void __raw_write_unlock(raw_rwlock_t *rw)
151
{
152
        _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
153
}
154
 
155
static inline int __raw_read_trylock(raw_rwlock_t *rw)
156
{
157
        unsigned int old;
158
        old = rw->lock & 0x7fffffffU;
159
        if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
160
                return 1;
161
        return _raw_read_trylock_retry(rw);
162
}
163
 
164
static inline int __raw_write_trylock(raw_rwlock_t *rw)
165
{
166
        if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
167
                return 1;
168
        return _raw_write_trylock_retry(rw);
169
}
170
 
171
#define _raw_read_relax(lock)   cpu_relax()
172
#define _raw_write_relax(lock)  cpu_relax()
173
 
174
#endif /* __ASM_SPINLOCK_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.