1 |
2 |
drasko |
/*
|
2 |
|
|
* Userspace mutex implementation
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 2009 Bahadir Bilgehan Balban
|
5 |
|
|
*/
|
6 |
|
|
#include <l4/lib/wait.h>
|
7 |
|
|
#include <l4/lib/mutex.h>
|
8 |
|
|
#include <l4/lib/printk.h>
|
9 |
|
|
#include <l4/generic/scheduler.h>
|
10 |
|
|
#include <l4/generic/container.h>
|
11 |
|
|
#include <l4/generic/tcb.h>
|
12 |
|
|
#include <l4/api/kip.h>
|
13 |
|
|
#include <l4/api/errno.h>
|
14 |
|
|
#include <l4/api/mutex.h>
|
15 |
|
|
#include INC_API(syscall.h)
|
16 |
|
|
#include INC_ARCH(exception.h)
|
17 |
|
|
#include INC_GLUE(memory.h)
|
18 |
|
|
#include INC_GLUE(mapping.h)
|
19 |
|
|
|
20 |
|
|
void init_mutex_queue_head(struct mutex_queue_head *mqhead)
|
21 |
|
|
{
|
22 |
|
|
memset(mqhead, 0, sizeof(*mqhead));
|
23 |
|
|
link_init(&mqhead->list);
|
24 |
|
|
mutex_init(&mqhead->mutex_control_mutex);
|
25 |
|
|
}
|
26 |
|
|
|
27 |
|
|
void mutex_queue_head_lock(struct mutex_queue_head *mqhead)
|
28 |
|
|
{
|
29 |
|
|
mutex_lock(&mqhead->mutex_control_mutex);
|
30 |
|
|
}
|
31 |
|
|
|
32 |
|
|
void mutex_queue_head_unlock(struct mutex_queue_head *mqhead)
|
33 |
|
|
{
|
34 |
|
|
/* Async unlock because in some cases preemption may be disabled here */
|
35 |
|
|
mutex_unlock_async(&mqhead->mutex_control_mutex);
|
36 |
|
|
}
|
37 |
|
|
|
38 |
|
|
|
39 |
|
|
void mutex_queue_init(struct mutex_queue *mq, unsigned long physical)
|
40 |
|
|
{
|
41 |
|
|
/* This is the unique key that describes this mutex */
|
42 |
|
|
mq->physical = physical;
|
43 |
|
|
|
44 |
|
|
link_init(&mq->list);
|
45 |
|
|
waitqueue_head_init(&mq->wqh_holders);
|
46 |
|
|
waitqueue_head_init(&mq->wqh_contenders);
|
47 |
|
|
}
|
48 |
|
|
|
49 |
|
|
void mutex_control_add(struct mutex_queue_head *mqhead, struct mutex_queue *mq)
|
50 |
|
|
{
|
51 |
|
|
BUG_ON(!list_empty(&mq->list));
|
52 |
|
|
|
53 |
|
|
list_insert(&mq->list, &mqhead->list);
|
54 |
|
|
mqhead->count++;
|
55 |
|
|
}
|
56 |
|
|
|
57 |
|
|
void mutex_control_remove(struct mutex_queue_head *mqhead, struct mutex_queue *mq)
|
58 |
|
|
{
|
59 |
|
|
list_remove_init(&mq->list);
|
60 |
|
|
mqhead->count--;
|
61 |
|
|
}
|
62 |
|
|
|
63 |
|
|
/* Note, this has ptr/negative error returns instead of ptr/zero. */
|
64 |
|
|
struct mutex_queue *mutex_control_find(struct mutex_queue_head *mqhead,
|
65 |
|
|
unsigned long mutex_physical)
|
66 |
|
|
{
|
67 |
|
|
struct mutex_queue *mutex_queue;
|
68 |
|
|
|
69 |
|
|
/* Find the mutex queue with this key */
|
70 |
|
|
list_foreach_struct(mutex_queue, &mqhead->list, list)
|
71 |
|
|
if (mutex_queue->physical == mutex_physical)
|
72 |
|
|
return mutex_queue;
|
73 |
|
|
|
74 |
|
|
return 0;
|
75 |
|
|
}
|
76 |
|
|
|
77 |
|
|
struct mutex_queue *mutex_control_create(unsigned long mutex_physical)
|
78 |
|
|
{
|
79 |
|
|
struct mutex_queue *mutex_queue;
|
80 |
|
|
|
81 |
|
|
/* Allocate the mutex queue structure */
|
82 |
|
|
if (!(mutex_queue = alloc_user_mutex()))
|
83 |
|
|
return 0;
|
84 |
|
|
|
85 |
|
|
/* Init and return */
|
86 |
|
|
mutex_queue_init(mutex_queue, mutex_physical);
|
87 |
|
|
|
88 |
|
|
return mutex_queue;
|
89 |
|
|
}
|
90 |
|
|
|
91 |
|
|
void mutex_control_delete(struct mutex_queue *mq)
|
92 |
|
|
{
|
93 |
|
|
BUG_ON(!list_empty(&mq->list));
|
94 |
|
|
|
95 |
|
|
/* Test internals of waitqueue */
|
96 |
|
|
BUG_ON(mq->wqh_contenders.sleepers);
|
97 |
|
|
BUG_ON(mq->wqh_holders.sleepers);
|
98 |
|
|
BUG_ON(!list_empty(&mq->wqh_contenders.task_list));
|
99 |
|
|
BUG_ON(!list_empty(&mq->wqh_holders.task_list));
|
100 |
|
|
|
101 |
|
|
free_user_mutex(mq);
|
102 |
|
|
}
|
103 |
|
|
|
104 |
|
|
/*
|
105 |
|
|
* Here's how this whole mutex implementation works:
|
106 |
|
|
*
|
107 |
|
|
* A thread who locked a user mutex learns how many
|
108 |
|
|
* contentions were on it as it unlocks it. It is obliged to
|
109 |
|
|
* go to the kernel to wake that many threads up.
|
110 |
|
|
*
|
111 |
|
|
* Each contender sleeps in the kernel, but the time
|
112 |
|
|
* of arrival in the kernel by both the unlocker or
|
113 |
|
|
* contenders is asynchronous.
|
114 |
|
|
*
|
115 |
|
|
* Mutex queue scenarios at any one time:
|
116 |
|
|
*
|
117 |
|
|
* 1) There may be multiple contenders waiting for
|
118 |
|
|
* an earlier lock holder:
|
119 |
|
|
*
|
120 |
|
|
* Lock holders waitqueue: Empty
|
121 |
|
|
* Contenders waitqueue: C - C - C - C
|
122 |
|
|
* Contenders to wake up: 0
|
123 |
|
|
*
|
124 |
|
|
* The lock holder would wake up that many contenders that it counted
|
125 |
|
|
* earlier in userspace as it released the lock.
|
126 |
|
|
*
|
127 |
|
|
* 2) There may be one lock holder waiting for contenders to arrive:
|
128 |
|
|
*
|
129 |
|
|
* Lock holders waitqueue: LH
|
130 |
|
|
* Contenders waitqueue: Empty
|
131 |
|
|
* Contenders to wake up: 5
|
132 |
|
|
*
|
133 |
|
|
* As each contender comes in, the contenders value is reduced, and
|
134 |
|
|
* when it becomes zero, the lock holder is woken up and mutex
|
135 |
|
|
* deleted.
|
136 |
|
|
*
|
137 |
|
|
* 3) Occasionally multiple lock holders who just released the lock
|
138 |
|
|
* make it to the kernel before any contenders:
|
139 |
|
|
*
|
140 |
|
|
* Contenders: Empty
|
141 |
|
|
* Lock holders: LH
|
142 |
|
|
* Contenders to wake up: 5
|
143 |
|
|
*
|
144 |
|
|
* -> New Lock holder arrives.
|
145 |
|
|
*
|
146 |
|
|
* As soon as the above occurs, the new LH wakes up the waiting one,
|
147 |
|
|
* increments the contenders by its own contender count and starts
|
148 |
|
|
* waiting. The scenario transitions to Scenario (2) in this case.
|
149 |
|
|
*
|
150 |
|
|
* The asynchronous nature of contender and lock holder arrivals make
|
151 |
|
|
* for many possibilities, but what matters is the same number of
|
152 |
|
|
* wake ups must occur as the number of contended waits.
|
153 |
|
|
*/
|
154 |
|
|
|
155 |
|
|
int mutex_control_lock(struct mutex_queue_head *mqhead,
|
156 |
|
|
unsigned long mutex_address)
|
157 |
|
|
{
|
158 |
|
|
struct mutex_queue *mutex_queue;
|
159 |
|
|
|
160 |
|
|
mutex_queue_head_lock(mqhead);
|
161 |
|
|
|
162 |
|
|
/* Search for the mutex queue */
|
163 |
|
|
if (!(mutex_queue = mutex_control_find(mqhead, mutex_address))) {
|
164 |
|
|
/* Create a new one */
|
165 |
|
|
if (!(mutex_queue = mutex_control_create(mutex_address))) {
|
166 |
|
|
mutex_queue_head_unlock(mqhead);
|
167 |
|
|
return -ENOMEM;
|
168 |
|
|
}
|
169 |
|
|
/* Add the queue to mutex queue list */
|
170 |
|
|
mutex_control_add(mqhead, mutex_queue);
|
171 |
|
|
|
172 |
|
|
} else if (mutex_queue->wqh_holders.sleepers) {
|
173 |
|
|
/*
|
174 |
|
|
* There's a lock holder, so we can consume from
|
175 |
|
|
* number of contenders since we are one of them.
|
176 |
|
|
*/
|
177 |
|
|
mutex_queue->contenders--;
|
178 |
|
|
|
179 |
|
|
/* No contenders left as far as current holder is concerned */
|
180 |
|
|
if (mutex_queue->contenders == 0) {
|
181 |
|
|
/* Wake up current holder */
|
182 |
|
|
wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC);
|
183 |
|
|
|
184 |
|
|
/* There must not be any contenders, delete the mutex */
|
185 |
|
|
mutex_control_remove(mqhead, mutex_queue);
|
186 |
|
|
mutex_control_delete(mutex_queue);
|
187 |
|
|
}
|
188 |
|
|
|
189 |
|
|
/* Release lock and return */
|
190 |
|
|
mutex_queue_head_unlock(mqhead);
|
191 |
|
|
return 0;
|
192 |
|
|
}
|
193 |
|
|
|
194 |
|
|
/* Prepare to wait on the contenders queue */
|
195 |
|
|
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
196 |
|
|
|
197 |
|
|
wait_on_prepare(&mutex_queue->wqh_contenders, &wq);
|
198 |
|
|
|
199 |
|
|
/* Release lock */
|
200 |
|
|
mutex_queue_head_unlock(mqhead);
|
201 |
|
|
|
202 |
|
|
/* Initiate prepared wait */
|
203 |
|
|
return wait_on_prepared_wait();
|
204 |
|
|
}
|
205 |
|
|
|
206 |
|
|
int mutex_control_unlock(struct mutex_queue_head *mqhead,
|
207 |
|
|
unsigned long mutex_address, int contenders)
|
208 |
|
|
{
|
209 |
|
|
struct mutex_queue *mutex_queue;
|
210 |
|
|
|
211 |
|
|
mutex_queue_head_lock(mqhead);
|
212 |
|
|
|
213 |
|
|
/* Search for the mutex queue */
|
214 |
|
|
if (!(mutex_queue = mutex_control_find(mqhead, mutex_address))) {
|
215 |
|
|
|
216 |
|
|
/* No such mutex, create one and sleep on it */
|
217 |
|
|
if (!(mutex_queue = mutex_control_create(mutex_address))) {
|
218 |
|
|
mutex_queue_head_unlock(mqhead);
|
219 |
|
|
return -ENOMEM;
|
220 |
|
|
}
|
221 |
|
|
|
222 |
|
|
/* Set new or increment the contenders value */
|
223 |
|
|
mutex_queue->contenders = contenders;
|
224 |
|
|
|
225 |
|
|
/* Add the queue to mutex queue list */
|
226 |
|
|
mutex_control_add(mqhead, mutex_queue);
|
227 |
|
|
|
228 |
|
|
/* Prepare to wait on the lock holders queue */
|
229 |
|
|
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
230 |
|
|
|
231 |
|
|
/* Prepare to wait */
|
232 |
|
|
wait_on_prepare(&mutex_queue->wqh_holders, &wq);
|
233 |
|
|
|
234 |
|
|
/* Release lock first */
|
235 |
|
|
mutex_queue_head_unlock(mqhead);
|
236 |
|
|
|
237 |
|
|
/* Initiate prepared wait */
|
238 |
|
|
return wait_on_prepared_wait();
|
239 |
|
|
}
|
240 |
|
|
|
241 |
|
|
/* Set new or increment the contenders value */
|
242 |
|
|
mutex_queue->contenders += contenders;
|
243 |
|
|
|
244 |
|
|
/* Wake up holders if any, and take wake up responsibility */
|
245 |
|
|
if (mutex_queue->wqh_holders.sleepers)
|
246 |
|
|
wake_up(&mutex_queue->wqh_holders, WAKEUP_ASYNC);
|
247 |
|
|
|
248 |
|
|
/*
|
249 |
|
|
* Now wake up as many contenders as possible, otherwise
|
250 |
|
|
* go to sleep on holders queue
|
251 |
|
|
*/
|
252 |
|
|
while (mutex_queue->contenders &&
|
253 |
|
|
mutex_queue->wqh_contenders.sleepers) {
|
254 |
|
|
/* Reduce total contenders to be woken up */
|
255 |
|
|
mutex_queue->contenders--;
|
256 |
|
|
|
257 |
|
|
/* Wake up a contender who made it to kernel */
|
258 |
|
|
wake_up(&mutex_queue->wqh_contenders, WAKEUP_ASYNC);
|
259 |
|
|
}
|
260 |
|
|
|
261 |
|
|
/*
|
262 |
|
|
* Are we done with all? Leave.
|
263 |
|
|
*
|
264 |
|
|
* Not enough contenders? Go to sleep and wait for a new
|
265 |
|
|
* contender rendezvous.
|
266 |
|
|
*/
|
267 |
|
|
if (mutex_queue->contenders == 0) {
|
268 |
|
|
/* Delete only if no more contenders */
|
269 |
|
|
if (mutex_queue->wqh_contenders.sleepers == 0) {
|
270 |
|
|
/* Since noone is left, delete the mutex queue */
|
271 |
|
|
mutex_control_remove(mqhead, mutex_queue);
|
272 |
|
|
mutex_control_delete(mutex_queue);
|
273 |
|
|
}
|
274 |
|
|
|
275 |
|
|
/* Release lock and return */
|
276 |
|
|
mutex_queue_head_unlock(mqhead);
|
277 |
|
|
} else {
|
278 |
|
|
/* Prepare to wait on the lock holders queue */
|
279 |
|
|
CREATE_WAITQUEUE_ON_STACK(wq, current);
|
280 |
|
|
|
281 |
|
|
/* Prepare to wait */
|
282 |
|
|
wait_on_prepare(&mutex_queue->wqh_holders, &wq);
|
283 |
|
|
|
284 |
|
|
/* Release lock first */
|
285 |
|
|
mutex_queue_head_unlock(mqhead);
|
286 |
|
|
|
287 |
|
|
/* Initiate prepared wait */
|
288 |
|
|
return wait_on_prepared_wait();
|
289 |
|
|
}
|
290 |
|
|
|
291 |
|
|
return 0;
|
292 |
|
|
}
|
293 |
|
|
|
294 |
|
|
int sys_mutex_control(unsigned long mutex_address, int mutex_flags)
|
295 |
|
|
{
|
296 |
|
|
unsigned long mutex_physical;
|
297 |
|
|
int mutex_op = mutex_operation(mutex_flags);
|
298 |
|
|
int contenders = mutex_contenders(mutex_flags);
|
299 |
|
|
int ret;
|
300 |
|
|
|
301 |
|
|
//printk("%s: Thread %d enters.\n", __FUNCTION__, current->tid);
|
302 |
|
|
|
303 |
|
|
/* Check valid user virtual address */
|
304 |
|
|
if (KERN_ADDR(mutex_address)) {
|
305 |
|
|
printk("Invalid args to %s.\n", __FUNCTION__);
|
306 |
|
|
return -EINVAL;
|
307 |
|
|
}
|
308 |
|
|
|
309 |
|
|
if (mutex_op != MUTEX_CONTROL_LOCK &&
|
310 |
|
|
mutex_op != MUTEX_CONTROL_UNLOCK)
|
311 |
|
|
return -EPERM;
|
312 |
|
|
|
313 |
|
|
if ((ret = cap_mutex_check(mutex_address, mutex_op)) < 0)
|
314 |
|
|
return ret;
|
315 |
|
|
|
316 |
|
|
/*
|
317 |
|
|
* Find and check physical address for virtual mutex address
|
318 |
|
|
*
|
319 |
|
|
* NOTE: This is a shortcut to capability checking on memory
|
320 |
|
|
* capabilities of current task.
|
321 |
|
|
*/
|
322 |
|
|
if (!(mutex_physical =
|
323 |
|
|
virt_to_phys_by_pgd(TASK_PGD(current), mutex_address)))
|
324 |
|
|
return -EINVAL;
|
325 |
|
|
|
326 |
|
|
switch (mutex_op) {
|
327 |
|
|
case MUTEX_CONTROL_LOCK:
|
328 |
|
|
ret = mutex_control_lock(&curcont->mutex_queue_head,
|
329 |
|
|
mutex_physical);
|
330 |
|
|
break;
|
331 |
|
|
case MUTEX_CONTROL_UNLOCK:
|
332 |
|
|
ret = mutex_control_unlock(&curcont->mutex_queue_head,
|
333 |
|
|
mutex_physical, contenders);
|
334 |
|
|
break;
|
335 |
|
|
}
|
336 |
|
|
|
337 |
|
|
return ret;
|
338 |
|
|
}
|
339 |
|
|
|