OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [s390x/] [kernel/] [semaphore.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/arch/S390/kernel/semaphore.c
3
 *
4
 *  S390 version
5
 *    Copyright (C) 1998-2000 IBM Corporation
6
 *    Author(s): Martin Schwidefsky
7
 *
8
 *  Derived from "linux/arch/i386/kernel/semaphore.c
9
 *    Copyright (C) 1999, Linus Torvalds
10
 *
11
 */
12
#include <linux/sched.h>
13
 
14
#include <asm/semaphore.h>
15
 
16
/*
17
 * Semaphores are implemented using a two-way counter:
18
 * The "count" variable is decremented for each process
19
 * that tries to acquire the semaphore, while the "sleeping"
20
 * variable is a count of such acquires.
21
 *
22
 * Notably, the inline "up()" and "down()" functions can
23
 * efficiently test if they need to do any extra work (up
24
 * needs to do something only if count was negative before
25
 * the increment operation.
26
 *
27
 * "sleeping" and the contention routine ordering is
28
 * protected by the semaphore spinlock.
29
 *
30
 * Note that these functions are only called when there is
31
 * contention on the lock, and as such all this is the
32
 * "non-critical" part of the whole semaphore business. The
33
 * critical part is the inline stuff in <asm/semaphore.h>
34
 * where we want to avoid any extra jumps and calls.
35
 */
36
 
37
/*
38
 * Logic:
39
 *  - only on a boundary condition do we need to care. When we go
40
 *    from a negative count to a non-negative, we wake people up.
41
 *  - when we go from a non-negative count to a negative do we
42
 *    (a) synchronize with the "sleeper" count and (b) make sure
43
 *    that we're on the wakeup list before we synchronize so that
44
 *    we cannot lose wakeup events.
45
 */
46
 
47
void __up(struct semaphore *sem)
48
{
49
        wake_up(&sem->wait);
50
}
51
 
52
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
53
 
54
void __down(struct semaphore * sem)
55
{
56
        struct task_struct *tsk = current;
57
        DECLARE_WAITQUEUE(wait, tsk);
58
        tsk->state = TASK_UNINTERRUPTIBLE;
59
        add_wait_queue_exclusive(&sem->wait, &wait);
60
 
61
        spin_lock_irq(&semaphore_lock);
62
        sem->sleepers++;
63
        for (;;) {
64
                int sleepers = sem->sleepers;
65
 
66
                /*
67
                 * Add "everybody else" into it. They aren't
68
                 * playing, because we own the spinlock.
69
                 */
70
                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
71
                        sem->sleepers = 0;
72
                        break;
73
                }
74
                sem->sleepers = 1;      /* us - see -1 above */
75
                spin_unlock_irq(&semaphore_lock);
76
 
77
                schedule();
78
                tsk->state = TASK_UNINTERRUPTIBLE;
79
                spin_lock_irq(&semaphore_lock);
80
        }
81
        spin_unlock_irq(&semaphore_lock);
82
        remove_wait_queue(&sem->wait, &wait);
83
        tsk->state = TASK_RUNNING;
84
        wake_up(&sem->wait);
85
}
86
 
87
int __down_interruptible(struct semaphore * sem)
88
{
89
        int retval = 0;
90
        struct task_struct *tsk = current;
91
        DECLARE_WAITQUEUE(wait, tsk);
92
        tsk->state = TASK_INTERRUPTIBLE;
93
        add_wait_queue_exclusive(&sem->wait, &wait);
94
 
95
        spin_lock_irq(&semaphore_lock);
96
        sem->sleepers ++;
97
        for (;;) {
98
                int sleepers = sem->sleepers;
99
 
100
                /*
101
                 * With signals pending, this turns into
102
                 * the trylock failure case - we won't be
103
                 * sleeping, and we* can't get the lock as
104
                 * it has contention. Just correct the count
105
                 * and exit.
106
                 */
107
                if (signal_pending(current)) {
108
                        retval = -EINTR;
109
                        sem->sleepers = 0;
110
                        atomic_add(sleepers, &sem->count);
111
                        break;
112
                }
113
 
114
                /*
115
                 * Add "everybody else" into it. They aren't
116
                 * playing, because we own the spinlock. The
117
                 * "-1" is because we're still hoping to get
118
                 * the lock.
119
                 */
120
                if (!atomic_add_negative(sleepers - 1, &sem->count)) {
121
                        sem->sleepers = 0;
122
                        break;
123
                }
124
                sem->sleepers = 1;      /* us - see -1 above */
125
                spin_unlock_irq(&semaphore_lock);
126
 
127
                schedule();
128
                tsk->state = TASK_INTERRUPTIBLE;
129
                spin_lock_irq(&semaphore_lock);
130
        }
131
        spin_unlock_irq(&semaphore_lock);
132
        tsk->state = TASK_RUNNING;
133
        remove_wait_queue(&sem->wait, &wait);
134
        wake_up(&sem->wait);
135
        return retval;
136
}
137
 
138
/*
139
 * Trylock failed - make sure we correct for
140
 * having decremented the count.
141
 */
142
int __down_trylock(struct semaphore * sem)
143
{
144
        unsigned long flags;
145
        int sleepers;
146
 
147
        spin_lock_irqsave(&semaphore_lock, flags);
148
        sleepers = sem->sleepers + 1;
149
        sem->sleepers = 0;
150
 
151
        /*
152
         * Add "everybody else" and us into it. They aren't
153
         * playing, because we own the spinlock.
154
         */
155
        if (!atomic_add_negative(sleepers, &sem->count))
156
                wake_up(&sem->wait);
157
 
158
        spin_unlock_irqrestore(&semaphore_lock, flags);
159
        return 1;
160
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.