| 1 |
2 |
drasko |
/*
|
| 2 |
|
|
* Userspace mutex implementation
|
| 3 |
|
|
*
|
| 4 |
|
|
* Copyright (C) 2009 Bahadir Bilgehan Balban
|
| 5 |
|
|
*/
|
| 6 |
|
|
#include <l4lib/mutex.h>
|
| 7 |
|
|
#include <l4lib/types.h>
|
| 8 |
|
|
#include L4LIB_INC_ARCH(syscalls.h)
|
| 9 |
|
|
#include L4LIB_INC_ARCH(syslib.h)
|
| 10 |
|
|
|
| 11 |
|
|
/*
|
| 12 |
|
|
* NOTES:
|
| 13 |
|
|
*
|
| 14 |
|
|
* The design is kept as simple as possible.
|
| 15 |
|
|
*
|
| 16 |
|
|
* l4_mutex_lock() locks an initialized, mutex.
|
| 17 |
|
|
* If it contends, it calls the mutex syscall.
|
| 18 |
|
|
*
|
| 19 |
|
|
* l4_mutex_unlock() releases an acquired mutex.
|
| 20 |
|
|
* If there was contention, mutex syscall is called
|
| 21 |
|
|
* to resolve by the kernel.
|
| 22 |
|
|
*
|
| 23 |
|
|
* Internals:
|
| 24 |
|
|
*
|
| 25 |
|
|
* (1) The kernel creates a waitqueue for every unique
|
| 26 |
|
|
* mutex in the system, i.e. every unique physical
|
| 27 |
|
|
* address that is contended as a mutex. In that respect
|
| 28 |
|
|
* virtual mutex addresses are translated to physical
|
| 29 |
|
|
* and checked for match.
|
| 30 |
|
|
*
|
| 31 |
|
|
* (2) If a mutex is contended, kernel is called by both the
|
| 32 |
|
|
* locker and the unlocker (i.e. the lock holder). The syscall
|
| 33 |
|
|
* results in a rendezvous and both tasks quit the syscall
|
| 34 |
|
|
* synchronised. A rendezvous is necessary because it is not possible
|
| 35 |
|
|
* to check lock status and send a WAIT or WAKEUP request to the
|
| 36 |
|
|
* kernel atomically from userspace. In other words, a WAKEUP call
|
| 37 |
|
|
* would be lost if it arrived before the unsuccessful lock attempt
|
| 38 |
|
|
* resulted in a WAIT.
|
| 39 |
|
|
*
|
| 40 |
|
|
* (3) The unlocker releases the lock after it returns from the syscall.
|
| 41 |
|
|
* (4) The locker continuously tries to acquire the lock
|
| 42 |
|
|
*
|
| 43 |
|
|
* Issues:
|
| 44 |
|
|
* - The kernel action is to merely wake up sleepers. If
|
| 45 |
|
|
* a new thread acquires the lock meanwhile, all those woken
|
| 46 |
|
|
* up threads would have to sleep again.
|
| 47 |
|
|
* - All sleepers are woken up (aka thundering herd). This
|
| 48 |
|
|
* must be done because if a single task is woken up, there
|
| 49 |
|
|
* is no guarantee that that would in turn wake up others.
|
| 50 |
|
|
* It might even quit attempting to take the lock.
|
| 51 |
|
|
* - Whether this is the best design - time will tell.
|
| 52 |
|
|
*/
|
| 53 |
|
|
|
| 54 |
|
|
extern int __l4_mutex_lock(void *word);
|
| 55 |
|
|
extern int __l4_mutex_unlock(void *word);
|
| 56 |
|
|
|
| 57 |
|
|
void l4_mutex_init(struct l4_mutex *m)
|
| 58 |
|
|
{
|
| 59 |
|
|
m->lock = L4_MUTEX_UNLOCKED;
|
| 60 |
|
|
}
|
| 61 |
|
|
|
| 62 |
|
|
int l4_mutex_lock(struct l4_mutex *m)
|
| 63 |
|
|
{
|
| 64 |
|
|
int err;
|
| 65 |
|
|
|
| 66 |
|
|
while(__l4_mutex_lock(&m->lock) != L4_MUTEX_SUCCESS) {
|
| 67 |
|
|
if ((err = l4_mutex_control(&m->lock, L4_MUTEX_LOCK)) < 0) {
|
| 68 |
|
|
printf("%s: Error: %d\n", __FUNCTION__, err);
|
| 69 |
|
|
return err;
|
| 70 |
|
|
}
|
| 71 |
|
|
}
|
| 72 |
|
|
return 0;
|
| 73 |
|
|
}
|
| 74 |
|
|
|
| 75 |
|
|
int l4_mutex_unlock(struct l4_mutex *m)
|
| 76 |
|
|
{
|
| 77 |
|
|
int err, contended;
|
| 78 |
|
|
|
| 79 |
|
|
if ((contended = __l4_mutex_unlock(m))) {
|
| 80 |
|
|
if ((err = l4_mutex_control(&m->lock,
|
| 81 |
|
|
contended | L4_MUTEX_UNLOCK)) < 0) {
|
| 82 |
|
|
printf("%s: Error: %d\n", __FUNCTION__, err);
|
| 83 |
|
|
return err;
|
| 84 |
|
|
}
|
| 85 |
|
|
}
|
| 86 |
|
|
return 0;
|
| 87 |
|
|
}
|