OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-x86_64/] [semaphore.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _X86_64_SEMAPHORE_H
2
#define _X86_64_SEMAPHORE_H
3
 
4
#include <linux/linkage.h>
5
 
6
#ifdef __KERNEL__
7
 
8
/*
9
 * SMP- and interrupt-safe semaphores..
10
 *
11
 * (C) Copyright 1996 Linus Torvalds
12
 *
13
 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14
 *                     the original code and to make semaphore waits
15
 *                     interruptible so that processes waiting on
16
 *                     semaphores can be killed.
17
 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18
 *                     functions in asm/sempahore-helper.h while fixing a
19
 *                     potential and subtle race discovered by Ulrich Schmid
20
 *                     in down_interruptible(). Since I started to play here I
21
 *                     also implemented the `trylock' semaphore operation.
22
 *          1999-07-02 Artur Skawina <skawina@geocities.com>
23
 *                     Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24
 *                     do this). Changed calling sequences from push/jmp to
25
 *                     traditional call/ret.
26
 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27
 *                     Some hacks to ensure compatibility with recent
28
 *                     GCC snapshots, to avoid stack corruption when compiling
29
 *                     with -fomit-frame-pointer. It's not sure if this will
30
 *                     be fixed in GCC, as our previous implementation was a
31
 *                     bit dubious.
32
 *
33
 * If you would like to see an analysis of this implementation, please
34
 * ftp to gcom.com and download the file
35
 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36
 *
37
 */
38
 
39
#include <asm/system.h>
40
#include <asm/atomic.h>
41
#include <asm/rwlock.h>
42
#include <linux/wait.h>
43
#include <linux/rwsem.h>
44
#include <linux/stringify.h>
45
 
46
struct semaphore {
47
        atomic_t count;
48
        int sleepers;
49
        wait_queue_head_t wait;
50
#if WAITQUEUE_DEBUG
51
        long __magic;
52
#endif
53
};
54
 
55
#if WAITQUEUE_DEBUG
56
# define __SEM_DEBUG_INIT(name) \
57
                , (int)&(name).__magic
58
#else
59
# define __SEM_DEBUG_INIT(name)
60
#endif
61
 
62
#define __SEMAPHORE_INITIALIZER(name,count) \
63
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
64
        __SEM_DEBUG_INIT(name) }
65
 
66
#define __MUTEX_INITIALIZER(name) \
67
        __SEMAPHORE_INITIALIZER(name,1)
68
 
69
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
70
        struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
71
 
72
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
73
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
74
 
75
static inline void sema_init (struct semaphore *sem, int val)
76
{
77
/*
78
 *      *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
79
 *
80
 * i'd rather use the more flexible initialization above, but sadly
81
 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
82
 */
83
        atomic_set(&sem->count, val);
84
        sem->sleepers = 0;
85
        init_waitqueue_head(&sem->wait);
86
#if WAITQUEUE_DEBUG
87
        sem->__magic = (int)&sem->__magic;
88
#endif
89
}
90
 
91
static inline void init_MUTEX (struct semaphore *sem)
92
{
93
        sema_init(sem, 1);
94
}
95
 
96
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
97
{
98
        sema_init(sem, 0);
99
}
100
 
101
asmlinkage void __down_failed(void /* special register calling convention */);
102
asmlinkage int  __down_failed_interruptible(void  /* params in registers */);
103
asmlinkage int  __down_failed_trylock(void  /* params in registers */);
104
asmlinkage void __up_wakeup(void /* special register calling convention */);
105
 
106
asmlinkage void __down(struct semaphore * sem);
107
asmlinkage int  __down_interruptible(struct semaphore * sem);
108
asmlinkage int  __down_trylock(struct semaphore * sem);
109
asmlinkage void __up(struct semaphore * sem);
110
 
111
/*
112
 * This is ugly, but we want the default case to fall through.
113
 * "__down_failed" is a special asm handler that calls the C
114
 * routine that actually waits. See arch/x86_64/kernel/semaphore.c
115
 */
116
static inline void down(struct semaphore * sem)
117
{
118
#if WAITQUEUE_DEBUG
119
        CHECK_MAGIC(sem->__magic);
120
#endif
121
 
122
        __asm__ __volatile__(
123
                "# atomic down operation\n\t"
124
                LOCK "decl %0\n\t"     /* --sem->count */
125
                "js 2f\n"
126
                "1:\n"
127
                LOCK_SECTION_START("")
128
                "2:\tcall __down_failed\n\t"
129
                "jmp 1b\n"
130
                LOCK_SECTION_END
131
                :"=m" (sem->count)
132
                :"D" (sem)
133
                :"memory");
134
}
135
 
136
/*
137
 * Interruptible try to acquire a semaphore.  If we obtained
138
 * it, return zero.  If we were interrupted, returns -EINTR
139
 */
140
static inline int down_interruptible(struct semaphore * sem)
141
{
142
        int result;
143
 
144
#if WAITQUEUE_DEBUG
145
        CHECK_MAGIC(sem->__magic);
146
#endif
147
 
148
        __asm__ __volatile__(
149
                "# atomic interruptible down operation\n\t"
150
                LOCK "decl %1\n\t"     /* --sem->count */
151
                "js 2f\n\t"
152
                "xorl %0,%0\n"
153
                "1:\n"
154
                LOCK_SECTION_START("")
155
                "2:\tcall __down_failed_interruptible\n\t"
156
                "jmp 1b\n"
157
                LOCK_SECTION_END
158
                :"=a" (result), "=m" (sem->count)
159
                :"D" (sem)
160
                :"memory");
161
        return result;
162
}
163
 
164
/*
165
 * Non-blockingly attempt to down() a semaphore.
166
 * Returns zero if we acquired it
167
 */
168
static inline int down_trylock(struct semaphore * sem)
169
{
170
        int result;
171
 
172
#if WAITQUEUE_DEBUG
173
        CHECK_MAGIC(sem->__magic);
174
#endif
175
 
176
        __asm__ __volatile__(
177
                "# atomic interruptible down operation\n\t"
178
                LOCK "decl %1\n\t"     /* --sem->count */
179
                "js 2f\n\t"
180
                "xorl %0,%0\n"
181
                "1:\n"
182
                LOCK_SECTION_START("")
183
                "2:\tcall __down_failed_trylock\n\t"
184
                "jmp 1b\n"
185
                LOCK_SECTION_END
186
                :"=a" (result), "=m" (sem->count)
187
                :"D" (sem)
188
                :"memory","cc");
189
        return result;
190
}
191
 
192
/*
193
 * Note! This is subtle. We jump to wake people up only if
194
 * the semaphore was negative (== somebody was waiting on it).
195
 * The default case (no contention) will result in NO
196
 * jumps for both down() and up().
197
 */
198
static inline void up(struct semaphore * sem)
199
{
200
#if WAITQUEUE_DEBUG
201
        CHECK_MAGIC(sem->__magic);
202
#endif
203
        __asm__ __volatile__(
204
                "# atomic up operation\n\t"
205
                LOCK "incl %0\n\t"     /* ++sem->count */
206
                "jle 2f\n"
207
                "1:\n"
208
                LOCK_SECTION_START("")
209
                "2:\tcall __up_wakeup\n\t"
210
                "jmp 1b\n"
211
                LOCK_SECTION_END
212
                :"=m" (sem->count)
213
                :"D" (sem)
214
                :"memory");
215
}
216
 
217
static inline int sem_getcount(struct semaphore *sem)
218
{
219
        return atomic_read(&sem->count);
220
}
221
 
222
#endif /* __KERNEL__ */
223
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.