OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [lib/] [mutex.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Mutex/Semaphore implementations.
3
 *
4
 * Copyright (c) 2007 Bahadir Balban
5
 */
6
#include <l4/lib/mutex.h>
7
#include <l4/generic/scheduler.h>
8
#include <l4/generic/tcb.h>
9
#include <l4/api/errno.h>
10
 
11
/*
12
 * Semaphore usage:
13
 *
14
 * Producer locks/produces/unlocks data.
15
 * Producer does semaphore up.
16
 * --
17
 * Consumer does semaphore down.
18
 * Consumer locks/consumes/unlocks data.
19
 */
20
 
21
#if 0
22
/* Update it */
23
/*
24
 * Semaphore *up* for multiple producers. If any consumer is waiting, wake them
25
 * up, otherwise, sleep. Effectively producers and consumers use the same
26
 * waitqueue and there's only one kind in the queue at any one time.
27
 */
28
void sem_up(struct mutex *mutex)
29
{
30
        int cnt;
31
 
32
        spin_lock(&mutex->slock);
33
        if ((cnt = mutex_inc(&mutex->lock)) <= 0) {
34
                struct waitqueue *wq;
35
                struct ktcb *sleeper;
36
 
37
                /* Each producer wakes one consumer in queue. */
38
                mutex->sleepers--;
39
                BUG_ON(list_empty(&mutex->wq.task_list));
40
                list_foreach_struct(wq, &mutex->wq.task_list, task_list) {
41
                        list_remove_init(&wq->task_list);
42
                        spin_unlock(&mutex->slock);
43
                        sleeper = wq->task;
44
                        printk("(%d) Waking up consumer (%d)\n", current->tid,
45
                               sleeper->tid);
46
                        sched_resume_task(sleeper);
47
                        return; /* Don't iterate, wake only one task. */
48
                }
49
        } else if (cnt > 0) {
50
                DECLARE_WAITQUEUE(wq, current);
51
                link_init(&wq.task_list);
52
                list_insert_tail(&wq.task_list, &mutex->wq.task_list);
53
                mutex->sleepers++;
54
                sched_prepare_sleep();
55
                printk("(%d) produced, now sleeping...\n", current->tid);
56
                spin_unlock(&mutex->slock);
57
                schedule();
58
        }
59
}
60
 
61
/*
62
 * Semaphore *down* for multiple consumers. If any producer is sleeping, wake them
63
 * up, otherwise, sleep. Effectively producers and consumers use the same
64
 * waitqueue and there's only one kind in the queue at any one time.
65
 */
66
void sem_down(struct mutex *mutex)
67
{
68
        int cnt;
69
 
70
        spin_lock(&mutex->slock);
71
        if ((cnt = mutex_dec(&mutex->lock)) >= 0) {
72
                struct waitqueue *wq;
73
                struct ktcb *sleeper;
74
 
75
                /* Each consumer wakes one producer in queue. */
76
                mutex->sleepers--;
77
                BUG_ON(list_empty(&mutex->wq.task_list));
78
                list_foreach_struct(wq, &mutex->wq.task_list, task_list) {
79
                        list_remove_init(&wq->task_list);
80
                        spin_unlock(&mutex->slock);
81
                        sleeper = wq->task;
82
                        printk("(%d) Waking up producer (%d)\n", current->tid,
83
                               sleeper->tid);
84
                        sched_resume_task(sleeper);
85
                        return; /* Don't iterate, wake only one task. */
86
                }
87
        } else if (cnt < 0) {
88
                DECLARE_WAITQUEUE(wq, current);
89
                link_init(&wq.task_list);
90
                list_insert_tail(&wq.task_list, &mutex->wq.task_list);
91
                mutex->sleepers++;
92
                sched_prepare_sleep();
93
                printk("(%d) Waiting to consume, now sleeping...\n", current->tid);
94
                spin_unlock(&mutex->slock);
95
                schedule();
96
        }
97
}
98
#endif
99
 
100
/* Non-blocking attempt to lock mutex */
101
int mutex_trylock(struct mutex *mutex)
102
{
103
        int success;
104
 
105
        spin_lock(&mutex->wqh.slock);
106
        if ((success = __mutex_lock(&mutex->lock)))
107
                current->nlocks++;
108
        spin_unlock(&mutex->wqh.slock);
109
 
110
        return success;
111
}
112
 
113
int mutex_lock(struct mutex *mutex)
114
{
115
        /* NOTE:
116
         * Everytime we're woken up we retry acquiring the mutex. It is
117
         * undeterministic as to how many retries will result in success.
118
         * We may need to add priority-based locking.
119
         */
120
        for (;;) {
121
                spin_lock(&mutex->wqh.slock);
122
                if (!__mutex_lock(&mutex->lock)) { /* Could not lock, sleep. */
123
                        CREATE_WAITQUEUE_ON_STACK(wq, current);
124
                        task_set_wqh(current, &mutex->wqh, &wq);
125
                        list_insert_tail(&wq.task_list, &mutex->wqh.task_list);
126
                        mutex->wqh.sleepers++;
127
                        sched_prepare_sleep();
128
                        spin_unlock(&mutex->wqh.slock);
129
                        // printk("(%d) sleeping...\n", current->tid);
130
                        schedule();
131
 
132
                        /* Did we wake up normally or get interrupted */
133
                        if (current->flags & TASK_INTERRUPTED) {
134
                                current->flags &= ~TASK_INTERRUPTED;
135
                                return -EINTR;
136
                        }
137
                } else {
138
                        current->nlocks++;
139
                        break;
140
                }
141
        }
142
        spin_unlock(&mutex->wqh.slock);
143
        return 0;
144
}
145
 
146
static inline void mutex_unlock_common(struct mutex *mutex, int sync)
147
{
148
        struct ktcb *c = current; if (c);
149
        spin_lock(&mutex->wqh.slock);
150
        __mutex_unlock(&mutex->lock);
151
        current->nlocks--;
152
        BUG_ON(current->nlocks < 0);
153
        BUG_ON(mutex->wqh.sleepers < 0);
154
        if (mutex->wqh.sleepers > 0) {
155
                struct waitqueue *wq = link_to_struct(mutex->wqh.task_list.next,
156
                                                      struct waitqueue,
157
                                                      task_list);
158
                struct ktcb *sleeper = wq->task;
159
 
160
                task_unset_wqh(sleeper);
161
                BUG_ON(list_empty(&mutex->wqh.task_list));
162
                list_remove_init(&wq->task_list);
163
                mutex->wqh.sleepers--;
164
                spin_unlock(&mutex->wqh.slock);
165
 
166
                /*
167
                 * TODO:
168
                 * Here someone could grab the mutex, this is fine
169
                 * but it may potentially starve the sleeper causing
170
                 * non-determinism. We may consider priorities here.
171
                 */
172
                if (sync)
173
                        sched_resume_sync(sleeper);
174
                else
175
                        sched_resume_async(sleeper);
176
 
177
                /* Don't iterate, wake only one task. */
178
                return;
179
        }
180
        spin_unlock(&mutex->wqh.slock);
181
}
182
 
183
void mutex_unlock(struct mutex *mutex)
184
{
185
        mutex_unlock_common(mutex, 1);
186
}
187
 
188
void mutex_unlock_async(struct mutex *mutex)
189
{
190
        mutex_unlock_common(mutex, 0);
191
}
192
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.