OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [mips64/] [kernel/] [semaphore.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Copyright (C) 1999, 2001, 02, 03 Ralf Baechle
3
 *
4
 * Heavily inspired by the Alpha implementation
5
 */
6
#include <linux/config.h>
7
#include <linux/errno.h>
8
#include <linux/module.h>
9
#include <linux/sched.h>
10
 
11
#ifdef CONFIG_CPU_HAS_LLDSCD
12
/*
13
 * On machines without lld/scd we need a spinlock to make the manipulation of
14
 * sem->count and sem->waking atomic.  Scalability isn't an issue because
15
 * this lock is used on UP only so it's just an empty variable.
16
 */
17
spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
18
 
19
EXPORT_SYMBOL(semaphore_lock);
20
#endif
21
 
22
/*
23
 * Semaphores are implemented using a two-way counter: The "count" variable is
24
 * decremented for each process that tries to sleep, while the "waking" variable
25
 * is incremented when the "up()" code goes to wake up waiting processes.
26
 *
27
 * Notably, the inline "up()" and "down()" functions can efficiently test if
28
 * they need to do any extra work (up needs to do something only if count was
29
 * negative before the increment operation.
30
 *
31
 * waking_non_zero() must execute atomically.
32
 *
33
 * When __up() is called, the count was negative before incrementing it, and we
34
 * need to wake up somebody.
35
 *
36
 * This routine adds one to the count of processes that need to wake up and
37
 * exit.  ALL waiting processes actually wake up but only the one that gets to
38
 * the "waking" field first will gate through and acquire the semaphore.  The
39
 * others will go back to sleep.
40
 *
41
 * Note that these functions are only called when there is contention on the
42
 * lock, and as such all this is the "non-critical" part of the whole semaphore
43
 * business. The critical part is the inline stuff in <asm/semaphore.h> where
44
 * we want to avoid any extra jumps and calls.
45
 */
46
void __up_wakeup(struct semaphore *sem)
47
{
48
        wake_up(&sem->wait);
49
}
50
 
51
EXPORT_SYMBOL(__up_wakeup);
52
 
53
#ifdef CONFIG_CPU_HAS_LLSC
54
 
55
static inline int waking_non_zero(struct semaphore *sem)
56
{
57
        int ret, tmp;
58
 
59
        __asm__ __volatile__(
60
        "1:     ll      %1, %2                  # waking_non_zero       \n"
61
        "       blez    %1, 2f                                          \n"
62
        "       subu    %0, %1, 1                                       \n"
63
        "       sc      %0, %2                                          \n"
64
        "       beqz    %0, 1b                                          \n"
65
        "2:                                                             \n"
66
        : "=r" (ret), "=r" (tmp), "+m" (sem->waking)
67
        : "0" (0));
68
 
69
        return ret;
70
}
71
 
72
#else /* !CONFIG_CPU_HAS_LLSC */
73
 
74
static inline int waking_non_zero(struct semaphore *sem)
75
{
76
        unsigned long flags;
77
        int waking, ret = 0;
78
 
79
        spin_lock_irqsave(&semaphore_lock, flags);
80
        waking = atomic_read(&sem->waking);
81
        if (waking > 0) {
82
                atomic_set(&sem->waking, waking - 1);
83
                ret = 1;
84
        }
85
        spin_unlock_irqrestore(&semaphore_lock, flags);
86
 
87
        return ret;
88
}
89
 
90
#endif /* !CONFIG_CPU_HAS_LLSC */
91
 
92
/*
93
 * Perform the "down" function.  Return zero for semaphore acquired, return
94
 * negative for signalled out of the function.
95
 *
96
 * If called from down, the return is ignored and the wait loop is not
97
 * interruptible.  This means that a task waiting on a semaphore using "down()"
98
 * cannot be killed until someone does an "up()" on the semaphore.
99
 *
100
 * If called from down_interruptible, the return value gets checked upon return.
101
 * If the return value is negative then the task continues with the negative
102
 * value in the return register (it can be tested by the caller).
103
 *
104
 * Either form may be used in conjunction with "up()".
105
 */
106
 
107
void __down_failed(struct semaphore * sem)
108
{
109
        struct task_struct *tsk = current;
110
        wait_queue_t wait;
111
 
112
        init_waitqueue_entry(&wait, tsk);
113
        __set_current_state(TASK_UNINTERRUPTIBLE);
114
        add_wait_queue_exclusive(&sem->wait, &wait);
115
 
116
        /*
117
         * Ok, we're set up.  sem->count is known to be less than zero
118
         * so we must wait.
119
         *
120
         * We can let go the lock for purposes of waiting.
121
         * We re-acquire it after awaking so as to protect
122
         * all semaphore operations.
123
         *
124
         * If "up()" is called before we call waking_non_zero() then
125
         * we will catch it right away.  If it is called later then
126
         * we will have to go through a wakeup cycle to catch it.
127
         *
128
         * Multiple waiters contend for the semaphore lock to see
129
         * who gets to gate through and who has to wait some more.
130
         */
131
        for (;;) {
132
                if (waking_non_zero(sem))
133
                        break;
134
                schedule();
135
                __set_current_state(TASK_UNINTERRUPTIBLE);
136
        }
137
        __set_current_state(TASK_RUNNING);
138
        remove_wait_queue(&sem->wait, &wait);
139
}
140
 
141
EXPORT_SYMBOL(__down_failed);
142
 
143
#ifdef CONFIG_CPU_HAS_LLDSCD
144
 
145
/*
146
 * waking_non_zero_interruptible:
147
 *      1       got the lock
148
 *      0        go to sleep
149
 *      -EINTR  interrupted
150
 *
151
 * We must undo the sem->count down_interruptible decrement
152
 * simultaneously and atomically with the sem->waking adjustment,
153
 * otherwise we can race with wake_one_more.
154
 *
155
 * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
156
 *
157
 * This is crazy.  Normally it's strictly forbidden to use 64-bit operations
158
 * in the 32-bit MIPS kernel.  In this case it's however ok because if an
159
 * interrupt has destroyed the upper half of registers sc will fail.
160
 * Note also that this will not work for MIPS32 CPUs!
161
 *
162
 * Pseudocode:
163
 *
164
 * If(sem->waking > 0) {
165
 *      Decrement(sem->waking)
166
 *      Return(SUCCESS)
167
 * } else If(signal_pending(tsk)) {
168
 *      Increment(sem->count)
169
 *      Return(-EINTR)
170
 * } else {
171
 *      Return(SLEEP)
172
 * }
173
 */
174
 
175
static inline int
176
waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
177
{
178
        long ret, tmp;
179
 
180
        __asm__ __volatile__(
181
        "       .set    push            # waking_non_zero_interruptible \n"
182
        "       .set    mips3                                           \n"
183
        "       .set    noat                                            \n"
184
        "0:     lld     %1, %2                                          \n"
185
        "       li      %0, 0                                           \n"
186
        "       sll     $1, %1, 0                                       \n"
187
        "       blez    $1, 1f                                          \n"
188
        "       daddiu  %1, %1, -1                                      \n"
189
        "       li      %0, 1                                           \n"
190
        "       b       2f                                              \n"
191
        "1:     beqz    %3, 2f                                          \n"
192
        "       li      %0, %4                                          \n"
193
        "       dli     $1, 0x0000000100000000                          \n"
194
        "       daddu   %1, %1, $1                                      \n"
195
        "2:     scd     %1, %2                                          \n"
196
        "       beqz    %1, 0b                                          \n"
197
        "       .set    pop                                             \n"
198
        : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
199
        : "r" (signal_pending(tsk)), "i" (-EINTR));
200
 
201
        return ret;
202
}
203
 
204
#else /* !CONFIG_CPU_HAS_LLDSCD */
205
 
206
static inline int waking_non_zero_interruptible(struct semaphore *sem,
207
                                                struct task_struct *tsk)
208
{
209
        int waking, pending, ret = 0;
210
        unsigned long flags;
211
 
212
        pending = signal_pending(tsk);
213
 
214
        spin_lock_irqsave(&semaphore_lock, flags);
215
        waking = atomic_read(&sem->waking);
216
        if (waking > 0) {
217
                atomic_set(&sem->waking, waking - 1);
218
                ret = 1;
219
        } else if (pending) {
220
                atomic_set(&sem->count, atomic_read(&sem->count) + 1);
221
                ret = -EINTR;
222
        }
223
        spin_unlock_irqrestore(&semaphore_lock, flags);
224
 
225
        return ret;
226
}
227
 
228
#endif /* !CONFIG_CPU_HAS_LLDSCD */
229
 
230
int __down_failed_interruptible(struct semaphore * sem)
231
{
232
        struct task_struct *tsk = current;
233
        wait_queue_t wait;
234
        int ret = 0;
235
 
236
        init_waitqueue_entry(&wait, tsk);
237
        __set_current_state(TASK_INTERRUPTIBLE);
238
        add_wait_queue_exclusive(&sem->wait, &wait);
239
 
240
        /*
241
         * Ok, we're set up.  sem->count is known to be less than zero
242
         * so we must wait.
243
         *
244
         * We can let go the lock for purposes of waiting.
245
         * We re-acquire it after awaking so as to protect
246
         * all semaphore operations.
247
         *
248
         * If "up()" is called before we call waking_non_zero() then
249
         * we will catch it right away.  If it is called later then
250
         * we will have to go through a wakeup cycle to catch it.
251
         *
252
         * Multiple waiters contend for the semaphore lock to see
253
         * who gets to gate through and who has to wait some more.
254
         */
255
        for (;;) {
256
                ret = waking_non_zero_interruptible(sem, tsk);
257
                if (ret) {
258
                        if (ret == 1)
259
                                /* ret != 0 only if we get interrupted -arca */
260
                                ret = 0;
261
                        break;
262
                }
263
                schedule();
264
                __set_current_state(TASK_INTERRUPTIBLE);
265
        }
266
        __set_current_state(TASK_RUNNING);
267
        remove_wait_queue(&sem->wait, &wait);
268
 
269
        return ret;
270
}
271
 
272
EXPORT_SYMBOL(__down_failed_interruptible);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.