OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [x86_64/] [kernel/] [semaphore.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * x86_64 semaphore implementation.
3
 *
4
 * (C) Copyright 1999 Linus Torvalds
5
 *
6
 * Portions Copyright 1999 Red Hat, Inc.
7
 *
8
 *      This program is free software; you can redistribute it and/or
9
 *      modify it under the terms of the GNU General Public License
10
 *      as published by the Free Software Foundation; either version
11
 *      2 of the License, or (at your option) any later version.
12
 *
13
 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@redhat.com>
14
 */
15
#include <linux/config.h>
16
#include <linux/sched.h>
17
 
18
#include <asm/semaphore.h>
19
 
20
/*
21
 * Semaphores are implemented using a two-way counter:
22
 * The "count" variable is decremented for each process
23
 * that tries to acquire the semaphore, while the "sleeping"
24
 * variable is a count of such acquires.
25
 *
26
 * Notably, the inline "up()" and "down()" functions can
27
 * efficiently test if they need to do any extra work (up
28
 * needs to do something only if count was negative before
29
 * the increment operation.
30
 *
31
 * "sleeping" and the contention routine ordering is
32
 * protected by the semaphore spinlock.
33
 *
34
 * Note that these functions are only called when there is
35
 * contention on the lock, and as such all this is the
36
 * "non-critical" part of the whole semaphore business. The
37
 * critical part is the inline stuff in <asm/semaphore.h>
38
 * where we want to avoid any extra jumps and calls.
39
 */
40
 
41
/*
42
 * Logic:
43
 *  - only on a boundary condition do we need to care. When we go
44
 *    from a negative count to a non-negative, we wake people up.
45
 *  - when we go from a non-negative count to a negative do we
46
 *    (a) synchronize with the "sleeper" count and (b) make sure
47
 *    that we're on the wakeup list before we synchronize so that
48
 *    we cannot lose wakeup events.
49
 */
50
 
51
void __up(struct semaphore *sem)
52
{
53
        wake_up(&sem->wait);
54
}
55
 
56
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
57
 
58
void __down(struct semaphore * sem)
59
{
60
        struct task_struct *tsk = current;
61
        DECLARE_WAITQUEUE(wait, tsk);
62
        tsk->state = TASK_UNINTERRUPTIBLE;
63
        add_wait_queue_exclusive(&sem->wait, &wait);
64
 
65
        spin_lock_irq(&semaphore_lock);
66
        sem->sleepers++;
67
        for (;;) {
68
                int sleepers = sem->sleepers;
69
 
70
                /*
71
                 * Add "everybody else" into it. They aren't
72
                 * playing, because we own the spinlock.
73
                 */
74
                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
75
                        sem->sleepers = 0;
76
                        break;
77
                }
78
                sem->sleepers = 1;      /* us - see -1 above */
79
                spin_unlock_irq(&semaphore_lock);
80
 
81
                schedule();
82
                tsk->state = TASK_UNINTERRUPTIBLE;
83
                spin_lock_irq(&semaphore_lock);
84
        }
85
        spin_unlock_irq(&semaphore_lock);
86
        remove_wait_queue(&sem->wait, &wait);
87
        tsk->state = TASK_RUNNING;
88
        wake_up(&sem->wait);
89
}
90
 
91
int __down_interruptible(struct semaphore * sem)
92
{
93
        int retval = 0;
94
        struct task_struct *tsk = current;
95
        DECLARE_WAITQUEUE(wait, tsk);
96
        tsk->state = TASK_INTERRUPTIBLE;
97
        add_wait_queue_exclusive(&sem->wait, &wait);
98
 
99
        spin_lock_irq(&semaphore_lock);
100
        sem->sleepers ++;
101
        for (;;) {
102
                int sleepers = sem->sleepers;
103
 
104
                /*
105
                 * With signals pending, this turns into
106
                 * the trylock failure case - we won't be
107
                 * sleeping, and we* can't get the lock as
108
                 * it has contention. Just correct the count
109
                 * and exit.
110
                 */
111
                if (signal_pending(current)) {
112
                        retval = -EINTR;
113
                        sem->sleepers = 0;
114
                        atomic_add(sleepers, &sem->count);
115
                        break;
116
                }
117
 
118
                /*
119
                 * Add "everybody else" into it. They aren't
120
                 * playing, because we own the spinlock. The
121
                 * "-1" is because we're still hoping to get
122
                 * the lock.
123
                 */
124
                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
125
                        sem->sleepers = 0;
126
                        break;
127
                }
128
                sem->sleepers = 1;      /* us - see -1 above */
129
                spin_unlock_irq(&semaphore_lock);
130
 
131
                schedule();
132
                tsk->state = TASK_INTERRUPTIBLE;
133
                spin_lock_irq(&semaphore_lock);
134
        }
135
        spin_unlock_irq(&semaphore_lock);
136
        tsk->state = TASK_RUNNING;
137
        remove_wait_queue(&sem->wait, &wait);
138
        wake_up(&sem->wait);
139
        return retval;
140
}
141
 
142
/*
143
 * Trylock failed - make sure we correct for
144
 * having decremented the count.
145
 *
146
 * We could have done the trylock with a
147
 * single "cmpxchg" without failure cases,
148
 * but then it wouldn't work on a 386.
149
 */
150
int __down_trylock(struct semaphore * sem)
151
{
152
        int sleepers;
153
        unsigned long flags;
154
 
155
        spin_lock_irqsave(&semaphore_lock, flags);
156
        sleepers = sem->sleepers + 1;
157
        sem->sleepers = 0;
158
 
159
        /*
160
         * Add "everybody else" and us into it. They aren't
161
         * playing, because we own the spinlock.
162
         */
163
        if (!atomic_add_negative(sleepers, &sem->count))
164
                wake_up(&sem->wait);
165
 
166
        spin_unlock_irqrestore(&semaphore_lock, flags);
167
        return 1;
168
}
169
 
170
 

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.