OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [lib/] [wait.c] - Blame information for rev 7

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Implementation of wakeup/wait for processes.
3
 *
4
 * Copyright (C) 2007, 2008 Bahadir Balban
5
 */
6
#include <l4/generic/scheduler.h>
7
#include <l4/lib/wait.h>
8
#include <l4/lib/spinlock.h>
9
#include <l4/api/errno.h>
10
 
11
/*
12
 * This sets any wait details of a task so that any arbitrary
13
 * wakers can know where the task is sleeping.
14
 */
15
void task_set_wqh(struct ktcb *task, struct waitqueue_head *wqh,
16
                  struct waitqueue *wq)
17
{
18
        unsigned long irqflags;
19
 
20
        spin_lock_irq(&task->waitlock, &irqflags);
21
        task->waiting_on = wqh;
22
        task->wq = wq;
23
        spin_unlock_irq(&task->waitlock, irqflags);
24
}
25
 
26
 
27
/*
28
 * This clears all wait details of a task. Used as the
29
 * task is removed from its queue and is about to wake up.
30
 */
31
void task_unset_wqh(struct ktcb *task)
32
{
33
        unsigned long irqflags;
34
 
35
        spin_lock_irq(&task->waitlock, &irqflags);
36
        task->waiting_on = 0;
37
        task->wq = 0;
38
        spin_unlock_irq(&task->waitlock, irqflags);
39
 
40
}
41
 
42
/*
43
 * Initiate wait on current task that
44
 * has already been placed in a waitqueue
45
 *
46
 * NOTE: This enables preemption and wait_on_prepare()
47
 * should be called first.
48
 */
49
int wait_on_prepared_wait(void)
50
{
51
        /* Now safe to sleep by preemption */
52
        preempt_enable();
53
 
54
        /* Sleep voluntarily to initiate wait */
55
        schedule();
56
 
57
        /* Did we wake up normally or get interrupted */
58
        if (current->flags & TASK_INTERRUPTED) {
59
                current->flags &= ~TASK_INTERRUPTED;
60
                return -EINTR;
61
        }
62
        /* No errors */
63
        return 0;
64
}
65
 
66
/*
67
 * Do all preparations to sleep but return without sleeping.
68
 * This is useful if the task needs to get in the waitqueue before
69
 * it releases a lock.
70
 *
71
 * NOTE: This disables preemption and it should be enabled by a
72
 * call to wait_on_prepared_wait() - the other function of the pair.
73
 */
74
int wait_on_prepare(struct waitqueue_head *wqh, struct waitqueue *wq)
75
{
76
        unsigned long irqflags;
77
 
78
        /* Disable to protect from sleeping by preemption */
79
        preempt_disable();
80
 
81
        spin_lock_irq(&wqh->slock, &irqflags);
82
        wqh->sleepers++;
83
        list_insert_tail(&wq->task_list, &wqh->task_list);
84
        task_set_wqh(current, wqh, wq);
85
        sched_prepare_sleep();
86
        //printk("(%d) waiting on wqh at: 0x%p\n",
87
        //       current->tid, wqh);
88
        spin_unlock_irq(&wqh->slock, irqflags);
89
 
90
        return 0;
91
}
92
 
93
/* Sleep without any condition */
94
int wait_on(struct waitqueue_head *wqh)
95
{
96
        unsigned long irqsave;
97
 
98
        CREATE_WAITQUEUE_ON_STACK(wq, current);
99
        spin_lock_irq(&wqh->slock, &irqsave);
100
        wqh->sleepers++;
101
        list_insert_tail(&wq.task_list, &wqh->task_list);
102
        task_set_wqh(current, wqh, &wq);
103
        sched_prepare_sleep();
104
        //printk("(%d) waiting on wqh at: 0x%p\n",
105
        //       current->tid, wqh);
106
        spin_unlock_irq(&wqh->slock, irqsave);
107
        schedule();
108
 
109
        /* Did we wake up normally or get interrupted */
110
        if (current->flags & TASK_INTERRUPTED) {
111
                current->flags &= ~TASK_INTERRUPTED;
112
                return -EINTR;
113
        }
114
 
115
        return 0;
116
}
117
 
118
/* Wake up all in the queue */
119
void wake_up_all(struct waitqueue_head *wqh, unsigned int flags)
120
{
121
        unsigned long irqsave;
122
 
123
        spin_lock_irq(&wqh->slock, &irqsave);
124
        BUG_ON(wqh->sleepers < 0);
125
        while (wqh->sleepers > 0) {
126
                struct waitqueue *wq = link_to_struct(wqh->task_list.next,
127
                                                  struct waitqueue,
128
                                                  task_list);
129
                struct ktcb *sleeper = wq->task;
130
                task_unset_wqh(sleeper);
131
                BUG_ON(list_empty(&wqh->task_list));
132
                list_remove_init(&wq->task_list);
133
                wqh->sleepers--;
134
                if (flags & WAKEUP_INTERRUPT)
135
                        sleeper->flags |= TASK_INTERRUPTED;
136
                // printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
137
                spin_unlock_irq(&wqh->slock, irqsave);
138
 
139
                if (flags & WAKEUP_SYNC)
140
                        sched_resume_sync(sleeper);
141
                else
142
                        sched_resume_async(sleeper);
143
 
144
                spin_lock_irq(&wqh->slock, &irqsave);
145
        }
146
        spin_unlock_irq(&wqh->slock, irqsave);
147
}
148
 
149
/* Wake up single waiter */
150
void wake_up(struct waitqueue_head *wqh, unsigned int flags)
151
{
152
        unsigned long irqflags;
153
 
154
        BUG_ON(wqh->sleepers < 0);
155
 
156
        spin_lock_irq(&wqh->slock, &irqflags);
157
 
158
        if (wqh->sleepers > 0) {
159
                struct waitqueue *wq = link_to_struct(wqh->task_list.next,
160
                                                      struct waitqueue,
161
                                                      task_list);
162
                struct ktcb *sleeper = wq->task;
163
                BUG_ON(list_empty(&wqh->task_list));
164
                list_remove_init(&wq->task_list);
165
                wqh->sleepers--;
166
                task_unset_wqh(sleeper);
167
                if (flags & WAKEUP_INTERRUPT)
168
                        sleeper->flags |= TASK_INTERRUPTED;
169
                // printk("(%d) Waking up (%d)\n", current->tid, sleeper->tid);
170
                spin_unlock_irq(&wqh->slock, irqflags);
171
 
172
                if (flags & WAKEUP_SYNC)
173
                        sched_resume_sync(sleeper);
174
                else
175
                        sched_resume_async(sleeper);
176
                return;
177
        }
178
        spin_unlock_irq(&wqh->slock, irqflags);
179
}
180
 
181
/*
182
 * Wakes up a task. If task is not waiting, or has been woken up
183
 * as we were peeking on it, returns -1. @sync makes us immediately
184
 * yield or else leave it to scheduler's discretion.
185
 */
186
int wake_up_task(struct ktcb *task, unsigned int flags)
187
{
188
        unsigned long irqflags[2];
189
        struct waitqueue_head *wqh;
190
        struct waitqueue *wq;
191
 
192
        spin_lock_irq(&task->waitlock, &irqflags[0]);
193
        if (!task->waiting_on) {
194
                spin_unlock_irq(&task->waitlock, irqflags[0]);
195
                return -1;
196
        }
197
        wqh = task->waiting_on;
198
        wq = task->wq;
199
 
200
        /*
201
         * We have found the waitqueue head.
202
         *
203
         * That needs to be locked first to conform with
204
         * lock order and avoid deadlocks. Release task's
205
         * waitlock and take the wqh's one.
206
         */
207
        spin_unlock_irq(&task->waitlock, irqflags[0]);
208
 
209
        /*
210
         * Task can be woken up by someone else here.
211
         */
212
 
213
        spin_lock_irq(&wqh->slock, &irqflags[0]);
214
 
215
        /*
216
         * Now lets check if the task is still
217
         * waiting and in the same queue. Not irq version
218
         * as we called that once already (so irqs are disabled)
219
         */
220
        spin_lock_irq(&task->waitlock, &irqflags[1]);
221
        if (task->waiting_on != wqh) {
222
                /* No, task has been woken by someone else */
223
                spin_unlock_irq(&wqh->slock, irqflags[0]);
224
                spin_unlock_irq(&task->waitlock, irqflags[1]);
225
                return -1;
226
        }
227
 
228
        /* Now we can remove the task from its waitqueue */
229
        list_remove_init(&wq->task_list);
230
        wqh->sleepers--;
231
        task->waiting_on = 0;
232
        task->wq = 0;
233
        if (flags & WAKEUP_INTERRUPT)
234
                task->flags |= TASK_INTERRUPTED;
235
        spin_unlock_irq(&wqh->slock, irqflags[0]);
236
        spin_unlock_irq(&task->waitlock, irqflags[1]);
237
 
238
        /*
239
         * Task is removed from its waitqueue. Now we can
240
         * safely resume it without locks as this is the only
241
         * code path that can resume the task.
242
         */
243
        if (flags & WAKEUP_SYNC)
244
                sched_resume_sync(task);
245
        else
246
                sched_resume_async(task);
247
 
248
        return 0;
249
}
250
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.