URL
https://opencores.org/ocsvn/or1k/or1k/trunk
Subversion Repositories or1k
Compare Revisions
- This comparison shows the changes necessary to convert path
/or1k/tags/LINUX_2_4_26_OR32/linux/linux-2.4/ipc
- from Rev 1279 to Rev 1765
- ↔ Reverse comparison
Rev 1279 → Rev 1765
/util.h
0,0 → 1,107
/* |
* linux/ipc/util.h |
* Copyright (C) 1999 Christoph Rohland |
* |
* ipc helper functions (c) 1999 Manfred Spraul <manfreds@colorfullife.com> |
*/ |
|
#define USHRT_MAX 0xffff |
#define SEQ_MULTIPLIER (IPCMNI) |
|
void sem_init (void); |
void msg_init (void); |
void shm_init (void); |
|
struct ipc_ids { |
int size; |
int in_use; |
int max_id; |
unsigned short seq; |
unsigned short seq_max; |
struct semaphore sem; |
spinlock_t ary; |
struct ipc_id* entries; |
}; |
|
struct ipc_id { |
struct kern_ipc_perm* p; |
}; |
|
|
void __init ipc_init_ids(struct ipc_ids* ids, int size); |
|
/* must be called with ids->sem acquired.*/ |
int ipc_findkey(struct ipc_ids* ids, key_t key); |
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); |
|
/* must be called with both locks acquired. */ |
struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id); |
|
int ipcperms (struct kern_ipc_perm *ipcp, short flg); |
|
/* for rare, potentially huge allocations. |
* both function can sleep |
*/ |
void* ipc_alloc(int size); |
void ipc_free(void* ptr, int size); |
|
extern inline void ipc_lockall(struct ipc_ids* ids) |
{ |
spin_lock(&ids->ary); |
} |
|
extern inline struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) |
{ |
struct kern_ipc_perm* out; |
int lid = id % SEQ_MULTIPLIER; |
if(lid >= ids->size) |
return NULL; |
|
out = ids->entries[lid].p; |
return out; |
} |
|
extern inline void ipc_unlockall(struct ipc_ids* ids) |
{ |
spin_unlock(&ids->ary); |
} |
extern inline struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) |
{ |
struct kern_ipc_perm* out; |
int lid = id % SEQ_MULTIPLIER; |
if(lid >= ids->size) |
return NULL; |
|
spin_lock(&ids->ary); |
out = ids->entries[lid].p; |
if(out==NULL) |
spin_unlock(&ids->ary); |
return out; |
} |
|
extern inline void ipc_unlock(struct ipc_ids* ids, int id) |
{ |
spin_unlock(&ids->ary); |
} |
|
extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq) |
{ |
return SEQ_MULTIPLIER*seq + id; |
} |
|
extern inline int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid) |
{ |
if(uid/SEQ_MULTIPLIER != ipcp->seq) |
return 1; |
return 0; |
} |
|
void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); |
void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); |
|
#if defined(__ia64__) || defined(__hppa__) |
/* On IA-64 and PA-RISC, we always use the "64-bit version" of the IPC structures. */ |
# define ipc_parse_version(cmd) IPC_64 |
#else |
int ipc_parse_version (int *cmd); |
#endif |
/sem.c
0,0 → 1,1125
/* |
* linux/ipc/sem.c |
* Copyright (C) 1992 Krishna Balasubramanian |
* Copyright (C) 1995 Eric Schenk, Bruno Haible |
* |
* IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): |
* This code underwent a massive rewrite in order to solve some problems |
* with the original code. In particular the original code failed to |
* wake up processes that were waiting for semval to go to 0 if the |
* value went to 0 and was then incremented rapidly enough. In solving |
* this problem I have also modified the implementation so that it |
* processes pending operations in a FIFO manner, thus give a guarantee |
* that processes waiting for a lock on the semaphore won't starve |
* unless another locking process fails to unlock. |
* In addition the following two changes in behavior have been introduced: |
* - The original implementation of semop returned the value |
* last semaphore element examined on success. This does not |
* match the manual page specifications, and effectively |
* allows the user to read the semaphore even if they do not |
* have read permissions. The implementation now returns 0 |
* on success as stated in the manual page. |
* - There is some confusion over whether the set of undo adjustments |
* to be performed at exit should be done in an atomic manner. |
* That is, if we are attempting to decrement the semval should we queue |
* up and wait until we can do so legally? |
* The original implementation attempted to do this. |
* The current implementation does not do so. This is because I don't |
* think it is the right thing (TM) to do, and because I couldn't |
* see a clean way to get the old behavior with the new design. |
* The POSIX standard and SVID should be consulted to determine |
* what behavior is mandated. |
* |
* Further notes on refinement (Christoph Rohland, December 1998): |
* - The POSIX standard says, that the undo adjustments simply should |
* redo. So the current implementation is o.K. |
* - The previous code had two flaws: |
* 1) It actively gave the semaphore to the next waiting process |
* sleeping on the semaphore. Since this process did not have the |
* cpu this led to many unnecessary context switches and bad |
* performance. Now we only check which process should be able to |
* get the semaphore and if this process wants to reduce some |
* semaphore value we simply wake it up without doing the |
* operation. So it has to try to get it later. Thus e.g. the |
* running process may reacquire the semaphore during the current |
* time slice. If it only waits for zero or increases the semaphore, |
* we do the operation in advance and wake it up. |
* 2) It did not wake up all zero waiting processes. We try to do |
* better but only get the semops right which only wait for zero or |
* increase. If there are decrement operations in the operations |
* array we do the same as before. |
* |
* /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> |
* |
* SMP-threaded, sysctl's added |
* (c) 1999 Manfred Spraul <manfreds@colorfullife.com> |
* Enforced range limit on SEM_UNDO |
* (c) 2001 Red Hat Inc <alan@redhat.com> |
*/ |
|
#include <linux/config.h> |
#include <linux/slab.h> |
#include <linux/spinlock.h> |
#include <linux/init.h> |
#include <linux/proc_fs.h> |
#include <linux/time.h> |
#include <asm/uaccess.h> |
#include "util.h" |
|
|
#define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id)) |
#define sem_unlock(id) ipc_unlock(&sem_ids,id) |
#define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id)) |
#define sem_checkid(sma, semid) \ |
ipc_checkid(&sem_ids,&sma->sem_perm,semid) |
#define sem_buildid(id, seq) \ |
ipc_buildid(&sem_ids, id, seq) |
static struct ipc_ids sem_ids; |
|
static int newary (key_t, int, int); |
static void freeary (int id); |
#ifdef CONFIG_PROC_FS |
static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); |
#endif |
|
#define SEMMSL_FAST 256 /* 512 bytes on stack */ |
#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ |
|
/* |
* linked list protection: |
* sem_undo.id_next, |
* sem_array.sem_pending{,last}, |
* sem_array.sem_undo: sem_lock() for read/write |
* sem_undo.proc_next: only "current" is allowed to read/write that field. |
* |
*/ |
|
int sem_ctls[4] = {SEMMSL, SEMMNS, SEMOPM, SEMMNI}; |
#define sc_semmsl (sem_ctls[0]) |
#define sc_semmns (sem_ctls[1]) |
#define sc_semopm (sem_ctls[2]) |
#define sc_semmni (sem_ctls[3]) |
|
static int used_sems; |
|
void __init sem_init (void) |
{ |
used_sems = 0; |
ipc_init_ids(&sem_ids,sc_semmni); |
|
#ifdef CONFIG_PROC_FS |
create_proc_read_entry("sysvipc/sem", 0, 0, sysvipc_sem_read_proc, NULL); |
#endif |
} |
|
static int newary (key_t key, int nsems, int semflg) |
{ |
int id; |
struct sem_array *sma; |
int size; |
|
if (!nsems) |
return -EINVAL; |
if (used_sems + nsems > sc_semmns) |
return -ENOSPC; |
|
size = sizeof (*sma) + nsems * sizeof (struct sem); |
sma = (struct sem_array *) ipc_alloc(size); |
if (!sma) { |
return -ENOMEM; |
} |
memset (sma, 0, size); |
id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); |
if(id == -1) { |
ipc_free(sma, size); |
return -ENOSPC; |
} |
used_sems += nsems; |
|
sma->sem_perm.mode = (semflg & S_IRWXUGO); |
sma->sem_perm.key = key; |
|
sma->sem_base = (struct sem *) &sma[1]; |
/* sma->sem_pending = NULL; */ |
sma->sem_pending_last = &sma->sem_pending; |
/* sma->undo = NULL; */ |
sma->sem_nsems = nsems; |
sma->sem_ctime = CURRENT_TIME; |
sem_unlock(id); |
|
return sem_buildid(id, sma->sem_perm.seq); |
} |
|
asmlinkage long sys_semget (key_t key, int nsems, int semflg) |
{ |
int id, err = -EINVAL; |
struct sem_array *sma; |
|
if (nsems < 0 || nsems > sc_semmsl) |
return -EINVAL; |
down(&sem_ids.sem); |
|
if (key == IPC_PRIVATE) { |
err = newary(key, nsems, semflg); |
} else if ((id = ipc_findkey(&sem_ids, key)) == -1) { /* key not used */ |
if (!(semflg & IPC_CREAT)) |
err = -ENOENT; |
else |
err = newary(key, nsems, semflg); |
} else if (semflg & IPC_CREAT && semflg & IPC_EXCL) { |
err = -EEXIST; |
} else { |
sma = sem_lock(id); |
if(sma==NULL) |
BUG(); |
if (nsems > sma->sem_nsems) |
err = -EINVAL; |
else if (ipcperms(&sma->sem_perm, semflg)) |
err = -EACCES; |
else |
err = sem_buildid(id, sma->sem_perm.seq); |
sem_unlock(id); |
} |
|
up(&sem_ids.sem); |
return err; |
} |
|
/* doesn't acquire the sem_lock on error! */ |
static int sem_revalidate(int semid, struct sem_array* sma, int nsems, short flg) |
{ |
struct sem_array* smanew; |
|
smanew = sem_lock(semid); |
if(smanew==NULL) |
return -EIDRM; |
if(smanew != sma || sem_checkid(sma,semid) || sma->sem_nsems != nsems) { |
sem_unlock(semid); |
return -EIDRM; |
} |
|
if (ipcperms(&sma->sem_perm, flg)) { |
sem_unlock(semid); |
return -EACCES; |
} |
return 0; |
} |
/* Manage the doubly linked list sma->sem_pending as a FIFO: |
* insert new queue elements at the tail sma->sem_pending_last. |
*/ |
static inline void append_to_queue (struct sem_array * sma, |
struct sem_queue * q) |
{ |
*(q->prev = sma->sem_pending_last) = q; |
*(sma->sem_pending_last = &q->next) = NULL; |
} |
|
static inline void prepend_to_queue (struct sem_array * sma, |
struct sem_queue * q) |
{ |
q->next = sma->sem_pending; |
*(q->prev = &sma->sem_pending) = q; |
if (q->next) |
q->next->prev = &q->next; |
else /* sma->sem_pending_last == &sma->sem_pending */ |
sma->sem_pending_last = &q->next; |
} |
|
static inline void remove_from_queue (struct sem_array * sma, |
struct sem_queue * q) |
{ |
*(q->prev) = q->next; |
if (q->next) |
q->next->prev = q->prev; |
else /* sma->sem_pending_last == &q->next */ |
sma->sem_pending_last = q->prev; |
q->prev = NULL; /* mark as removed */ |
} |
|
/* |
* Determine whether a sequence of semaphore operations would succeed |
* all at once. Return 0 if yes, 1 if need to sleep, else return error code. |
*/ |
|
static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, |
int nsops, struct sem_undo *un, int pid, |
int do_undo) |
{ |
int result, sem_op; |
struct sembuf *sop; |
struct sem * curr; |
|
for (sop = sops; sop < sops + nsops; sop++) { |
curr = sma->sem_base + sop->sem_num; |
sem_op = sop->sem_op; |
result = curr->semval; |
|
if (!sem_op && result) |
goto would_block; |
|
result += sem_op; |
if (result < 0) |
goto would_block; |
if (result > SEMVMX) |
goto out_of_range; |
if (sop->sem_flg & SEM_UNDO) { |
int undo = un->semadj[sop->sem_num] - sem_op; |
/* |
* Exceeding the undo range is an error. |
*/ |
if (undo < (-SEMAEM - 1) || undo > SEMAEM) |
goto out_of_range; |
} |
curr->semval = result; |
} |
|
if (do_undo) { |
result = 0; |
goto undo; |
} |
sop--; |
while (sop >= sops) { |
sma->sem_base[sop->sem_num].sempid = pid; |
if (sop->sem_flg & SEM_UNDO) |
un->semadj[sop->sem_num] -= sop->sem_op; |
sop--; |
} |
sma->sem_otime = CURRENT_TIME; |
return 0; |
|
out_of_range: |
result = -ERANGE; |
goto undo; |
|
would_block: |
if (sop->sem_flg & IPC_NOWAIT) |
result = -EAGAIN; |
else |
result = 1; |
|
undo: |
sop--; |
while (sop >= sops) { |
sma->sem_base[sop->sem_num].semval -= sop->sem_op; |
sop--; |
} |
|
return result; |
} |
|
/* Go through the pending queue for the indicated semaphore |
* looking for tasks that can be completed. |
*/ |
static void update_queue (struct sem_array * sma) |
{ |
int error; |
struct sem_queue * q; |
|
for (q = sma->sem_pending; q; q = q->next) { |
|
if (q->status == 1) |
continue; /* this one was woken up before */ |
|
error = try_atomic_semop(sma, q->sops, q->nsops, |
q->undo, q->pid, q->alter); |
|
/* Does q->sleeper still need to sleep? */ |
if (error <= 0) { |
/* Found one, wake it up */ |
wake_up_process(q->sleeper); |
if (error == 0 && q->alter) { |
/* if q-> alter let it self try */ |
q->status = 1; |
return; |
} |
q->status = error; |
remove_from_queue(sma,q); |
} |
} |
} |
|
/* The following counts are associated to each semaphore: |
* semncnt number of tasks waiting on semval being nonzero |
* semzcnt number of tasks waiting on semval being zero |
* This model assumes that a task waits on exactly one semaphore. |
* Since semaphore operations are to be performed atomically, tasks actually |
* wait on a whole sequence of semaphores simultaneously. |
* The counts we return here are a rough approximation, but still |
* warrant that semncnt+semzcnt>0 if the task is on the pending queue. |
*/ |
static int count_semncnt (struct sem_array * sma, ushort semnum) |
{ |
int semncnt; |
struct sem_queue * q; |
|
semncnt = 0; |
for (q = sma->sem_pending; q; q = q->next) { |
struct sembuf * sops = q->sops; |
int nsops = q->nsops; |
int i; |
for (i = 0; i < nsops; i++) |
if (sops[i].sem_num == semnum |
&& (sops[i].sem_op < 0) |
&& !(sops[i].sem_flg & IPC_NOWAIT)) |
semncnt++; |
} |
return semncnt; |
} |
static int count_semzcnt (struct sem_array * sma, ushort semnum) |
{ |
int semzcnt; |
struct sem_queue * q; |
|
semzcnt = 0; |
for (q = sma->sem_pending; q; q = q->next) { |
struct sembuf * sops = q->sops; |
int nsops = q->nsops; |
int i; |
for (i = 0; i < nsops; i++) |
if (sops[i].sem_num == semnum |
&& (sops[i].sem_op == 0) |
&& !(sops[i].sem_flg & IPC_NOWAIT)) |
semzcnt++; |
} |
return semzcnt; |
} |
|
/* Free a semaphore set. */ |
static void freeary (int id) |
{ |
struct sem_array *sma; |
struct sem_undo *un; |
struct sem_queue *q; |
int size; |
|
sma = sem_rmid(id); |
|
/* Invalidate the existing undo structures for this semaphore set. |
* (They will be freed without any further action in sem_exit() |
* or during the next semop.) |
*/ |
for (un = sma->undo; un; un = un->id_next) |
un->semid = -1; |
|
/* Wake up all pending processes and let them fail with EIDRM. */ |
for (q = sma->sem_pending; q; q = q->next) { |
q->status = -EIDRM; |
q->prev = NULL; |
wake_up_process(q->sleeper); /* doesn't sleep */ |
} |
sem_unlock(id); |
|
used_sems -= sma->sem_nsems; |
size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); |
ipc_free(sma, size); |
} |
|
static unsigned long copy_semid_to_user(void *buf, struct semid64_ds *in, int version) |
{ |
switch(version) { |
case IPC_64: |
return copy_to_user(buf, in, sizeof(*in)); |
case IPC_OLD: |
{ |
struct semid_ds out; |
|
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); |
|
out.sem_otime = in->sem_otime; |
out.sem_ctime = in->sem_ctime; |
out.sem_nsems = in->sem_nsems; |
|
return copy_to_user(buf, &out, sizeof(out)); |
} |
default: |
return -EINVAL; |
} |
} |
|
static int semctl_nolock(int semid, int semnum, int cmd, int version, union semun arg) |
{ |
int err = -EINVAL; |
|
switch(cmd) { |
case IPC_INFO: |
case SEM_INFO: |
{ |
struct seminfo seminfo; |
int max_id; |
|
memset(&seminfo,0,sizeof(seminfo)); |
seminfo.semmni = sc_semmni; |
seminfo.semmns = sc_semmns; |
seminfo.semmsl = sc_semmsl; |
seminfo.semopm = sc_semopm; |
seminfo.semvmx = SEMVMX; |
seminfo.semmnu = SEMMNU; |
seminfo.semmap = SEMMAP; |
seminfo.semume = SEMUME; |
down(&sem_ids.sem); |
if (cmd == SEM_INFO) { |
seminfo.semusz = sem_ids.in_use; |
seminfo.semaem = used_sems; |
} else { |
seminfo.semusz = SEMUSZ; |
seminfo.semaem = SEMAEM; |
} |
max_id = sem_ids.max_id; |
up(&sem_ids.sem); |
if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) |
return -EFAULT; |
return (max_id < 0) ? 0: max_id; |
} |
case SEM_STAT: |
{ |
struct sem_array *sma; |
struct semid64_ds tbuf; |
int id; |
|
if(semid >= sem_ids.size) |
return -EINVAL; |
|
memset(&tbuf,0,sizeof(tbuf)); |
|
sma = sem_lock(semid); |
if(sma == NULL) |
return -EINVAL; |
|
err = -EACCES; |
if (ipcperms (&sma->sem_perm, S_IRUGO)) |
goto out_unlock; |
id = sem_buildid(semid, sma->sem_perm.seq); |
|
kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
tbuf.sem_otime = sma->sem_otime; |
tbuf.sem_ctime = sma->sem_ctime; |
tbuf.sem_nsems = sma->sem_nsems; |
sem_unlock(semid); |
if (copy_semid_to_user (arg.buf, &tbuf, version)) |
return -EFAULT; |
return id; |
} |
default: |
return -EINVAL; |
} |
return err; |
out_unlock: |
sem_unlock(semid); |
return err; |
} |
|
static int semctl_main(int semid, int semnum, int cmd, int version, union semun arg) |
{ |
struct sem_array *sma; |
struct sem* curr; |
int err; |
ushort fast_sem_io[SEMMSL_FAST]; |
ushort* sem_io = fast_sem_io; |
int nsems; |
|
sma = sem_lock(semid); |
if(sma==NULL) |
return -EINVAL; |
|
nsems = sma->sem_nsems; |
|
err=-EIDRM; |
if (sem_checkid(sma,semid)) |
goto out_unlock; |
|
err = -EACCES; |
if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) |
goto out_unlock; |
|
switch (cmd) { |
case GETALL: |
{ |
ushort *array = arg.array; |
int i; |
|
if(nsems > SEMMSL_FAST) { |
sem_unlock(semid); |
sem_io = ipc_alloc(sizeof(ushort)*nsems); |
if(sem_io == NULL) |
return -ENOMEM; |
err = sem_revalidate(semid, sma, nsems, S_IRUGO); |
if(err) |
goto out_free; |
} |
|
for (i = 0; i < sma->sem_nsems; i++) |
sem_io[i] = sma->sem_base[i].semval; |
sem_unlock(semid); |
err = 0; |
if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) |
err = -EFAULT; |
goto out_free; |
} |
case SETALL: |
{ |
int i; |
struct sem_undo *un; |
|
sem_unlock(semid); |
|
if(nsems > SEMMSL_FAST) { |
sem_io = ipc_alloc(sizeof(ushort)*nsems); |
if(sem_io == NULL) |
return -ENOMEM; |
} |
|
if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { |
err = -EFAULT; |
goto out_free; |
} |
|
for (i = 0; i < nsems; i++) { |
if (sem_io[i] > SEMVMX) { |
err = -ERANGE; |
goto out_free; |
} |
} |
err = sem_revalidate(semid, sma, nsems, S_IWUGO); |
if(err) |
goto out_free; |
|
for (i = 0; i < nsems; i++) |
sma->sem_base[i].semval = sem_io[i]; |
for (un = sma->undo; un; un = un->id_next) |
for (i = 0; i < nsems; i++) |
un->semadj[i] = 0; |
sma->sem_ctime = CURRENT_TIME; |
/* maybe some queued-up processes were waiting for this */ |
update_queue(sma); |
err = 0; |
goto out_unlock; |
} |
case IPC_STAT: |
{ |
struct semid64_ds tbuf; |
memset(&tbuf,0,sizeof(tbuf)); |
kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
tbuf.sem_otime = sma->sem_otime; |
tbuf.sem_ctime = sma->sem_ctime; |
tbuf.sem_nsems = sma->sem_nsems; |
sem_unlock(semid); |
if (copy_semid_to_user (arg.buf, &tbuf, version)) |
return -EFAULT; |
return 0; |
} |
/* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ |
} |
err = -EINVAL; |
if(semnum < 0 || semnum >= nsems) |
goto out_unlock; |
|
curr = &sma->sem_base[semnum]; |
|
switch (cmd) { |
case GETVAL: |
err = curr->semval; |
goto out_unlock; |
case GETPID: |
err = curr->sempid; |
goto out_unlock; |
case GETNCNT: |
err = count_semncnt(sma,semnum); |
goto out_unlock; |
case GETZCNT: |
err = count_semzcnt(sma,semnum); |
goto out_unlock; |
case SETVAL: |
{ |
int val = arg.val; |
struct sem_undo *un; |
err = -ERANGE; |
if (val > SEMVMX || val < 0) |
goto out_unlock; |
|
for (un = sma->undo; un; un = un->id_next) |
un->semadj[semnum] = 0; |
curr->semval = val; |
curr->sempid = current->pid; |
sma->sem_ctime = CURRENT_TIME; |
/* maybe some queued-up processes were waiting for this */ |
update_queue(sma); |
err = 0; |
goto out_unlock; |
} |
} |
out_unlock: |
sem_unlock(semid); |
out_free: |
if(sem_io != fast_sem_io) |
ipc_free(sem_io, sizeof(ushort)*nsems); |
return err; |
} |
|
struct sem_setbuf { |
uid_t uid; |
gid_t gid; |
mode_t mode; |
}; |
|
static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void *buf, int version) |
{ |
switch(version) { |
case IPC_64: |
{ |
struct semid64_ds tbuf; |
|
if(copy_from_user(&tbuf, buf, sizeof(tbuf))) |
return -EFAULT; |
|
out->uid = tbuf.sem_perm.uid; |
out->gid = tbuf.sem_perm.gid; |
out->mode = tbuf.sem_perm.mode; |
|
return 0; |
} |
case IPC_OLD: |
{ |
struct semid_ds tbuf_old; |
|
if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
return -EFAULT; |
|
out->uid = tbuf_old.sem_perm.uid; |
out->gid = tbuf_old.sem_perm.gid; |
out->mode = tbuf_old.sem_perm.mode; |
|
return 0; |
} |
default: |
return -EINVAL; |
} |
} |
|
static int semctl_down(int semid, int semnum, int cmd, int version, union semun arg) |
{ |
struct sem_array *sma; |
int err; |
struct sem_setbuf setbuf; |
struct kern_ipc_perm *ipcp; |
|
if(cmd == IPC_SET) { |
if(copy_semid_from_user (&setbuf, arg.buf, version)) |
return -EFAULT; |
} |
sma = sem_lock(semid); |
if(sma==NULL) |
return -EINVAL; |
|
if (sem_checkid(sma,semid)) { |
err=-EIDRM; |
goto out_unlock; |
} |
ipcp = &sma->sem_perm; |
|
if (current->euid != ipcp->cuid && |
current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { |
err=-EPERM; |
goto out_unlock; |
} |
|
switch(cmd){ |
case IPC_RMID: |
freeary(semid); |
err = 0; |
break; |
case IPC_SET: |
ipcp->uid = setbuf.uid; |
ipcp->gid = setbuf.gid; |
ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
| (setbuf.mode & S_IRWXUGO); |
sma->sem_ctime = CURRENT_TIME; |
sem_unlock(semid); |
err = 0; |
break; |
default: |
sem_unlock(semid); |
err = -EINVAL; |
break; |
} |
return err; |
|
out_unlock: |
sem_unlock(semid); |
return err; |
} |
|
asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) |
{ |
int err = -EINVAL; |
int version; |
|
if (semid < 0) |
return -EINVAL; |
|
version = ipc_parse_version(&cmd); |
|
switch(cmd) { |
case IPC_INFO: |
case SEM_INFO: |
case SEM_STAT: |
err = semctl_nolock(semid,semnum,cmd,version,arg); |
return err; |
case GETALL: |
case GETVAL: |
case GETPID: |
case GETNCNT: |
case GETZCNT: |
case IPC_STAT: |
case SETVAL: |
case SETALL: |
err = semctl_main(semid,semnum,cmd,version,arg); |
return err; |
case IPC_RMID: |
case IPC_SET: |
down(&sem_ids.sem); |
err = semctl_down(semid,semnum,cmd,version,arg); |
up(&sem_ids.sem); |
return err; |
default: |
return -EINVAL; |
} |
} |
|
static struct sem_undo* freeundos(struct sem_array *sma, struct sem_undo* un) |
{ |
struct sem_undo* u; |
struct sem_undo** up; |
|
for(up = ¤t->semundo;(u=*up);up=&u->proc_next) { |
if(un==u) { |
un=u->proc_next; |
*up=un; |
kfree(u); |
return un; |
} |
} |
printk ("freeundos undo list error id=%d\n", un->semid); |
return un->proc_next; |
} |
|
/* returns without sem_lock on error! */ |
static int alloc_undo(struct sem_array *sma, struct sem_undo** unp, int semid, int alter) |
{ |
int size, nsems, error; |
struct sem_undo *un; |
|
nsems = sma->sem_nsems; |
size = sizeof(struct sem_undo) + sizeof(short)*nsems; |
sem_unlock(semid); |
|
un = (struct sem_undo *) kmalloc(size, GFP_KERNEL); |
if (!un) |
return -ENOMEM; |
|
memset(un, 0, size); |
error = sem_revalidate(semid, sma, nsems, alter ? S_IWUGO : S_IRUGO); |
if(error) { |
kfree(un); |
return error; |
} |
|
un->semadj = (short *) &un[1]; |
un->semid = semid; |
un->proc_next = current->semundo; |
current->semundo = un; |
un->id_next = sma->undo; |
sma->undo = un; |
*unp = un; |
return 0; |
} |
|
asmlinkage long sys_semop (int semid, struct sembuf *tsops, unsigned nsops) |
{ |
return sys_semtimedop(semid, tsops, nsops, NULL); |
} |
|
asmlinkage long sys_semtimedop (int semid, struct sembuf *tsops, |
unsigned nsops, const struct timespec *timeout) |
{ |
int error = -EINVAL; |
struct sem_array *sma; |
struct sembuf fast_sops[SEMOPM_FAST]; |
struct sembuf* sops = fast_sops, *sop; |
struct sem_undo *un; |
int undos = 0, decrease = 0, alter = 0; |
struct sem_queue queue; |
unsigned long jiffies_left = 0; |
|
if (nsops < 1 || semid < 0) |
return -EINVAL; |
if (nsops > sc_semopm) |
return -E2BIG; |
if(nsops > SEMOPM_FAST) { |
sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); |
if(sops==NULL) |
return -ENOMEM; |
} |
if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { |
error=-EFAULT; |
goto out_free; |
} |
if (timeout) { |
struct timespec _timeout; |
if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { |
error = -EFAULT; |
goto out_free; |
} |
if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || |
_timeout.tv_nsec >= 1000000000L) { |
error = -EINVAL; |
goto out_free; |
} |
jiffies_left = timespec_to_jiffies(&_timeout); |
} |
sma = sem_lock(semid); |
error=-EINVAL; |
if(sma==NULL) |
goto out_free; |
error = -EIDRM; |
if (sem_checkid(sma,semid)) |
goto out_unlock_free; |
error = -EFBIG; |
for (sop = sops; sop < sops + nsops; sop++) { |
if (sop->sem_num >= sma->sem_nsems) |
goto out_unlock_free; |
if (sop->sem_flg & SEM_UNDO) |
undos++; |
if (sop->sem_op < 0) |
decrease = 1; |
if (sop->sem_op > 0) |
alter = 1; |
} |
alter |= decrease; |
|
error = -EACCES; |
if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) |
goto out_unlock_free; |
if (undos) { |
/* Make sure we have an undo structure |
* for this process and this semaphore set. |
*/ |
un=current->semundo; |
while(un != NULL) { |
if(un->semid==semid) |
break; |
if(un->semid==-1) |
un=freeundos(sma,un); |
else |
un=un->proc_next; |
} |
if (!un) { |
error = alloc_undo(sma,&un,semid,alter); |
if(error) |
goto out_free; |
} |
} else |
un = NULL; |
|
error = try_atomic_semop (sma, sops, nsops, un, current->pid, 0); |
if (error <= 0) |
goto update; |
|
/* We need to sleep on this operation, so we put the current |
* task into the pending queue and go to sleep. |
*/ |
|
queue.sma = sma; |
queue.sops = sops; |
queue.nsops = nsops; |
queue.undo = un; |
queue.pid = current->pid; |
queue.alter = decrease; |
queue.id = semid; |
if (alter) |
append_to_queue(sma ,&queue); |
else |
prepend_to_queue(sma ,&queue); |
current->semsleeping = &queue; |
|
for (;;) { |
struct sem_array* tmp; |
queue.status = -EINTR; |
queue.sleeper = current; |
current->state = TASK_INTERRUPTIBLE; |
sem_unlock(semid); |
|
if (timeout) |
jiffies_left = schedule_timeout(jiffies_left); |
else |
schedule(); |
|
tmp = sem_lock(semid); |
if(tmp==NULL) { |
if(queue.prev != NULL) |
BUG(); |
current->semsleeping = NULL; |
error = -EIDRM; |
goto out_free; |
} |
/* |
* If queue.status == 1 we where woken up and |
* have to retry else we simply return. |
* If an interrupt occurred we have to clean up the |
* queue |
* |
*/ |
if (queue.status == 1) |
{ |
error = try_atomic_semop (sma, sops, nsops, un, |
current->pid,0); |
if (error <= 0) |
break; |
} else { |
error = queue.status; |
if (error == -EINTR && timeout && jiffies_left == 0) |
error = -EAGAIN; |
if (queue.prev) /* got Interrupt */ |
break; |
/* Everything done by update_queue */ |
current->semsleeping = NULL; |
goto out_unlock_free; |
} |
} |
current->semsleeping = NULL; |
remove_from_queue(sma,&queue); |
update: |
if (alter) |
update_queue (sma); |
out_unlock_free: |
sem_unlock(semid); |
out_free: |
if(sops != fast_sops) |
kfree(sops); |
return error; |
} |
|
/* |
* add semadj values to semaphores, free undo structures. |
* undo structures are not freed when semaphore arrays are destroyed |
* so some of them may be out of date. |
* IMPLEMENTATION NOTE: There is some confusion over whether the |
* set of adjustments that needs to be done should be done in an atomic |
* manner or not. That is, if we are attempting to decrement the semval |
* should we queue up and wait until we can do so legally? |
* The original implementation attempted to do this (queue and wait). |
* The current implementation does not do so. The POSIX standard |
* and SVID should be consulted to determine what behavior is mandated. |
*/ |
void sem_exit (void) |
{ |
struct sem_queue *q; |
struct sem_undo *u, *un = NULL, **up, **unp; |
struct sem_array *sma; |
int nsems, i; |
|
/* If the current process was sleeping for a semaphore, |
* remove it from the queue. |
*/ |
if ((q = current->semsleeping)) { |
int semid = q->id; |
sma = sem_lock(semid); |
current->semsleeping = NULL; |
|
if (q->prev) { |
if(sma==NULL) |
BUG(); |
remove_from_queue(q->sma,q); |
} |
if(sma!=NULL) |
sem_unlock(semid); |
} |
|
for (up = ¤t->semundo; (u = *up); *up = u->proc_next, kfree(u)) { |
int semid = u->semid; |
if(semid == -1) |
continue; |
sma = sem_lock(semid); |
if (sma == NULL) |
continue; |
|
if (u->semid == -1) |
goto next_entry; |
|
if (sem_checkid(sma,u->semid)) |
goto next_entry; |
|
/* remove u from the sma->undo list */ |
for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { |
if (u == un) |
goto found; |
} |
printk ("sem_exit undo list error id=%d\n", u->semid); |
goto next_entry; |
found: |
*unp = un->id_next; |
/* perform adjustments registered in u */ |
nsems = sma->sem_nsems; |
for (i = 0; i < nsems; i++) { |
struct sem * sem = &sma->sem_base[i]; |
sem->semval += u->semadj[i]; |
if (sem->semval < 0) |
sem->semval = 0; /* shouldn't happen */ |
sem->sempid = current->pid; |
} |
sma->sem_otime = CURRENT_TIME; |
/* maybe some queued-up processes were waiting for this */ |
update_queue(sma); |
next_entry: |
sem_unlock(semid); |
} |
current->semundo = NULL; |
} |
|
#ifdef CONFIG_PROC_FS |
static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) |
{ |
off_t pos = 0; |
off_t begin = 0; |
int i, len = 0; |
|
len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n"); |
down(&sem_ids.sem); |
|
for(i = 0; i <= sem_ids.max_id; i++) { |
struct sem_array *sma; |
sma = sem_lock(i); |
if(sma) { |
len += sprintf(buffer + len, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", |
sma->sem_perm.key, |
sem_buildid(i,sma->sem_perm.seq), |
sma->sem_perm.mode, |
sma->sem_nsems, |
sma->sem_perm.uid, |
sma->sem_perm.gid, |
sma->sem_perm.cuid, |
sma->sem_perm.cgid, |
sma->sem_otime, |
sma->sem_ctime); |
sem_unlock(i); |
|
pos += len; |
if(pos < offset) { |
len = 0; |
begin = pos; |
} |
if(pos > offset + length) |
goto done; |
} |
} |
*eof = 1; |
done: |
up(&sem_ids.sem); |
*start = buffer + (offset - begin); |
len -= (offset - begin); |
if(len > length) |
len = length; |
if(len < 0) |
len = 0; |
return len; |
} |
#endif |
/msg.c
0,0 → 1,905
/* |
* linux/ipc/msg.c |
* Copyright (C) 1992 Krishna Balasubramanian |
* |
* Removed all the remaining kerneld mess |
* Catch the -EFAULT stuff properly |
* Use GFP_KERNEL for messages as in 1.2 |
* Fixed up the unchecked user space derefs |
* Copyright (C) 1998 Alan Cox & Andi Kleen |
* |
* /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> |
* |
* mostly rewritten, threaded and wake-one semantics added |
* MSGMAX limit removed, sysctl's added |
* (c) 1999 Manfred Spraul <manfreds@colorfullife.com> |
*/ |
|
#include <linux/config.h> |
#include <linux/slab.h> |
#include <linux/msg.h> |
#include <linux/spinlock.h> |
#include <linux/init.h> |
#include <linux/proc_fs.h> |
#include <linux/list.h> |
#include <asm/uaccess.h> |
#include "util.h" |
|
/* sysctl: */ |
int msg_ctlmax = MSGMAX; |
int msg_ctlmnb = MSGMNB; |
int msg_ctlmni = MSGMNI; |
|
/* one msg_receiver structure for each sleeping receiver */ |
struct msg_receiver { |
struct list_head r_list; |
struct task_struct* r_tsk; |
|
int r_mode; |
long r_msgtype; |
long r_maxsize; |
|
struct msg_msg* volatile r_msg; |
}; |
|
/* one msg_sender for each sleeping sender */ |
struct msg_sender { |
struct list_head list; |
struct task_struct* tsk; |
}; |
|
struct msg_msgseg { |
struct msg_msgseg* next; |
/* the next part of the message follows immediately */ |
}; |
/* one msg_msg structure for each message */ |
struct msg_msg { |
struct list_head m_list; |
long m_type; |
int m_ts; /* message text size */ |
struct msg_msgseg* next; |
/* the actual message follows immediately */ |
}; |
|
#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg)) |
#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg)) |
|
/* one msq_queue structure for each present queue on the system */ |
struct msg_queue { |
struct kern_ipc_perm q_perm; |
time_t q_stime; /* last msgsnd time */ |
time_t q_rtime; /* last msgrcv time */ |
time_t q_ctime; /* last change time */ |
unsigned long q_cbytes; /* current number of bytes on queue */ |
unsigned long q_qnum; /* number of messages in queue */ |
unsigned long q_qbytes; /* max number of bytes on queue */ |
pid_t q_lspid; /* pid of last msgsnd */ |
pid_t q_lrpid; /* last receive pid */ |
|
struct list_head q_messages; |
struct list_head q_receivers; |
struct list_head q_senders; |
}; |
|
#define SEARCH_ANY 1 |
#define SEARCH_EQUAL 2 |
#define SEARCH_NOTEQUAL 3 |
#define SEARCH_LESSEQUAL 4 |
|
static atomic_t msg_bytes = ATOMIC_INIT(0); |
static atomic_t msg_hdrs = ATOMIC_INIT(0); |
|
static struct ipc_ids msg_ids; |
|
#define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id)) |
#define msg_unlock(id) ipc_unlock(&msg_ids,id) |
#define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id)) |
#define msg_checkid(msq, msgid) \ |
ipc_checkid(&msg_ids,&msq->q_perm,msgid) |
#define msg_buildid(id, seq) \ |
ipc_buildid(&msg_ids, id, seq) |
|
static void freeque (int id); |
static int newque (key_t key, int msgflg); |
#ifdef CONFIG_PROC_FS |
static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); |
#endif |
|
void __init msg_init (void) |
{ |
ipc_init_ids(&msg_ids,msg_ctlmni); |
|
#ifdef CONFIG_PROC_FS |
create_proc_read_entry("sysvipc/msg", 0, 0, sysvipc_msg_read_proc, NULL); |
#endif |
} |
|
static int newque (key_t key, int msgflg) |
{ |
int id; |
struct msg_queue *msq; |
|
msq = (struct msg_queue *) kmalloc (sizeof (*msq), GFP_KERNEL); |
if (!msq) |
return -ENOMEM; |
id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni); |
if(id == -1) { |
kfree(msq); |
return -ENOSPC; |
} |
msq->q_perm.mode = (msgflg & S_IRWXUGO); |
msq->q_perm.key = key; |
|
msq->q_stime = msq->q_rtime = 0; |
msq->q_ctime = CURRENT_TIME; |
msq->q_cbytes = msq->q_qnum = 0; |
msq->q_qbytes = msg_ctlmnb; |
msq->q_lspid = msq->q_lrpid = 0; |
INIT_LIST_HEAD(&msq->q_messages); |
INIT_LIST_HEAD(&msq->q_receivers); |
INIT_LIST_HEAD(&msq->q_senders); |
msg_unlock(id); |
|
return msg_buildid(id,msq->q_perm.seq); |
} |
|
static void free_msg(struct msg_msg* msg) |
{ |
struct msg_msgseg* seg; |
seg = msg->next; |
kfree(msg); |
while(seg != NULL) { |
struct msg_msgseg* tmp = seg->next; |
kfree(seg); |
seg = tmp; |
} |
} |
|
static struct msg_msg* load_msg(void* src, int len) |
{ |
struct msg_msg* msg; |
struct msg_msgseg** pseg; |
int err; |
int alen; |
|
alen = len; |
if(alen > DATALEN_MSG) |
alen = DATALEN_MSG; |
|
msg = (struct msg_msg *) kmalloc (sizeof(*msg) + alen, GFP_KERNEL); |
if(msg==NULL) |
return ERR_PTR(-ENOMEM); |
|
msg->next = NULL; |
|
if (copy_from_user(msg+1, src, alen)) { |
err = -EFAULT; |
goto out_err; |
} |
|
len -= alen; |
src = ((char*)src)+alen; |
pseg = &msg->next; |
while(len > 0) { |
struct msg_msgseg* seg; |
alen = len; |
if(alen > DATALEN_SEG) |
alen = DATALEN_SEG; |
seg = (struct msg_msgseg *) kmalloc (sizeof(*seg) + alen, GFP_KERNEL); |
if(seg==NULL) { |
err=-ENOMEM; |
goto out_err; |
} |
*pseg = seg; |
seg->next = NULL; |
if(copy_from_user (seg+1, src, alen)) { |
err = -EFAULT; |
goto out_err; |
} |
pseg = &seg->next; |
len -= alen; |
src = ((char*)src)+alen; |
} |
return msg; |
|
out_err: |
free_msg(msg); |
return ERR_PTR(err); |
} |
|
static int store_msg(void* dest, struct msg_msg* msg, int len) |
{ |
int alen; |
struct msg_msgseg *seg; |
|
alen = len; |
if(alen > DATALEN_MSG) |
alen = DATALEN_MSG; |
if(copy_to_user (dest, msg+1, alen)) |
return -1; |
|
len -= alen; |
dest = ((char*)dest)+alen; |
seg = msg->next; |
while(len > 0) { |
alen = len; |
if(alen > DATALEN_SEG) |
alen = DATALEN_SEG; |
if(copy_to_user (dest, seg+1, alen)) |
return -1; |
len -= alen; |
dest = ((char*)dest)+alen; |
seg=seg->next; |
} |
return 0; |
} |
|
static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss) |
{ |
mss->tsk=current; |
current->state=TASK_INTERRUPTIBLE; |
list_add_tail(&mss->list,&msq->q_senders); |
} |
|
static inline void ss_del(struct msg_sender* mss) |
{ |
if(mss->list.next != NULL) |
list_del(&mss->list); |
} |
|
static void ss_wakeup(struct list_head* h, int kill) |
{ |
struct list_head *tmp; |
|
tmp = h->next; |
while (tmp != h) { |
struct msg_sender* mss; |
|
mss = list_entry(tmp,struct msg_sender,list); |
tmp = tmp->next; |
if(kill) |
mss->list.next=NULL; |
wake_up_process(mss->tsk); |
} |
} |
|
static void expunge_all(struct msg_queue* msq, int res) |
{ |
struct list_head *tmp; |
|
tmp = msq->q_receivers.next; |
while (tmp != &msq->q_receivers) { |
struct msg_receiver* msr; |
|
msr = list_entry(tmp,struct msg_receiver,r_list); |
tmp = tmp->next; |
msr->r_msg = ERR_PTR(res); |
wake_up_process(msr->r_tsk); |
} |
} |
|
static void freeque (int id) |
{ |
struct msg_queue *msq; |
struct list_head *tmp; |
|
msq = msg_rmid(id); |
|
expunge_all(msq,-EIDRM); |
ss_wakeup(&msq->q_senders,1); |
msg_unlock(id); |
|
tmp = msq->q_messages.next; |
while(tmp != &msq->q_messages) { |
struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list); |
tmp = tmp->next; |
atomic_dec(&msg_hdrs); |
free_msg(msg); |
} |
atomic_sub(msq->q_cbytes, &msg_bytes); |
kfree(msq); |
} |
|
asmlinkage long sys_msgget (key_t key, int msgflg) |
{ |
int id, ret = -EPERM; |
struct msg_queue *msq; |
|
down(&msg_ids.sem); |
if (key == IPC_PRIVATE) |
ret = newque(key, msgflg); |
else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ |
if (!(msgflg & IPC_CREAT)) |
ret = -ENOENT; |
else |
ret = newque(key, msgflg); |
} else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) { |
ret = -EEXIST; |
} else { |
msq = msg_lock(id); |
if(msq==NULL) |
BUG(); |
if (ipcperms(&msq->q_perm, msgflg)) |
ret = -EACCES; |
else |
ret = msg_buildid(id, msq->q_perm.seq); |
msg_unlock(id); |
} |
up(&msg_ids.sem); |
return ret; |
} |
|
static inline unsigned long copy_msqid_to_user(void *buf, struct msqid64_ds *in, int version) |
{ |
switch(version) { |
case IPC_64: |
return copy_to_user (buf, in, sizeof(*in)); |
case IPC_OLD: |
{ |
struct msqid_ds out; |
|
memset(&out,0,sizeof(out)); |
|
ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); |
|
out.msg_stime = in->msg_stime; |
out.msg_rtime = in->msg_rtime; |
out.msg_ctime = in->msg_ctime; |
|
if(in->msg_cbytes > USHRT_MAX) |
out.msg_cbytes = USHRT_MAX; |
else |
out.msg_cbytes = in->msg_cbytes; |
out.msg_lcbytes = in->msg_cbytes; |
|
if(in->msg_qnum > USHRT_MAX) |
out.msg_qnum = USHRT_MAX; |
else |
out.msg_qnum = in->msg_qnum; |
|
if(in->msg_qbytes > USHRT_MAX) |
out.msg_qbytes = USHRT_MAX; |
else |
out.msg_qbytes = in->msg_qbytes; |
out.msg_lqbytes = in->msg_qbytes; |
|
out.msg_lspid = in->msg_lspid; |
out.msg_lrpid = in->msg_lrpid; |
|
return copy_to_user (buf, &out, sizeof(out)); |
} |
default: |
return -EINVAL; |
} |
} |
|
struct msq_setbuf { |
unsigned long qbytes; |
uid_t uid; |
gid_t gid; |
mode_t mode; |
}; |
|
static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void *buf, int version) |
{ |
switch(version) { |
case IPC_64: |
{ |
struct msqid64_ds tbuf; |
|
if (copy_from_user (&tbuf, buf, sizeof (tbuf))) |
return -EFAULT; |
|
out->qbytes = tbuf.msg_qbytes; |
out->uid = tbuf.msg_perm.uid; |
out->gid = tbuf.msg_perm.gid; |
out->mode = tbuf.msg_perm.mode; |
|
return 0; |
} |
case IPC_OLD: |
{ |
struct msqid_ds tbuf_old; |
|
if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old))) |
return -EFAULT; |
|
out->uid = tbuf_old.msg_perm.uid; |
out->gid = tbuf_old.msg_perm.gid; |
out->mode = tbuf_old.msg_perm.mode; |
|
if(tbuf_old.msg_qbytes == 0) |
out->qbytes = tbuf_old.msg_lqbytes; |
else |
out->qbytes = tbuf_old.msg_qbytes; |
|
return 0; |
} |
default: |
return -EINVAL; |
} |
} |
|
asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf) |
{ |
int err, version; |
struct msg_queue *msq; |
struct msq_setbuf setbuf; |
struct kern_ipc_perm *ipcp; |
|
if (msqid < 0 || cmd < 0) |
return -EINVAL; |
|
version = ipc_parse_version(&cmd); |
|
switch (cmd) { |
case IPC_INFO: |
case MSG_INFO: |
{ |
struct msginfo msginfo; |
int max_id; |
if (!buf) |
return -EFAULT; |
/* We must not return kernel stack data. |
* due to padding, it's not enough |
* to set all member fields. |
*/ |
memset(&msginfo,0,sizeof(msginfo)); |
msginfo.msgmni = msg_ctlmni; |
msginfo.msgmax = msg_ctlmax; |
msginfo.msgmnb = msg_ctlmnb; |
msginfo.msgssz = MSGSSZ; |
msginfo.msgseg = MSGSEG; |
down(&msg_ids.sem); |
if (cmd == MSG_INFO) { |
msginfo.msgpool = msg_ids.in_use; |
msginfo.msgmap = atomic_read(&msg_hdrs); |
msginfo.msgtql = atomic_read(&msg_bytes); |
} else { |
msginfo.msgmap = MSGMAP; |
msginfo.msgpool = MSGPOOL; |
msginfo.msgtql = MSGTQL; |
} |
max_id = msg_ids.max_id; |
up(&msg_ids.sem); |
if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) |
return -EFAULT; |
return (max_id < 0) ? 0: max_id; |
} |
case MSG_STAT: |
case IPC_STAT: |
{ |
struct msqid64_ds tbuf; |
int success_return; |
if (!buf) |
return -EFAULT; |
if(cmd == MSG_STAT && msqid >= msg_ids.size) |
return -EINVAL; |
|
memset(&tbuf,0,sizeof(tbuf)); |
|
msq = msg_lock(msqid); |
if (msq == NULL) |
return -EINVAL; |
|
if(cmd == MSG_STAT) { |
success_return = msg_buildid(msqid, msq->q_perm.seq); |
} else { |
err = -EIDRM; |
if (msg_checkid(msq,msqid)) |
goto out_unlock; |
success_return = 0; |
} |
err = -EACCES; |
if (ipcperms (&msq->q_perm, S_IRUGO)) |
goto out_unlock; |
|
kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); |
tbuf.msg_stime = msq->q_stime; |
tbuf.msg_rtime = msq->q_rtime; |
tbuf.msg_ctime = msq->q_ctime; |
tbuf.msg_cbytes = msq->q_cbytes; |
tbuf.msg_qnum = msq->q_qnum; |
tbuf.msg_qbytes = msq->q_qbytes; |
tbuf.msg_lspid = msq->q_lspid; |
tbuf.msg_lrpid = msq->q_lrpid; |
msg_unlock(msqid); |
if (copy_msqid_to_user(buf, &tbuf, version)) |
return -EFAULT; |
return success_return; |
} |
case IPC_SET: |
if (!buf) |
return -EFAULT; |
if (copy_msqid_from_user (&setbuf, buf, version)) |
return -EFAULT; |
break; |
case IPC_RMID: |
break; |
default: |
return -EINVAL; |
} |
|
down(&msg_ids.sem); |
msq = msg_lock(msqid); |
err=-EINVAL; |
if (msq == NULL) |
goto out_up; |
|
err = -EIDRM; |
if (msg_checkid(msq,msqid)) |
goto out_unlock_up; |
ipcp = &msq->q_perm; |
err = -EPERM; |
if (current->euid != ipcp->cuid && |
current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) |
/* We _could_ check for CAP_CHOWN above, but we don't */ |
goto out_unlock_up; |
|
switch (cmd) { |
case IPC_SET: |
{ |
if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) |
goto out_unlock_up; |
msq->q_qbytes = setbuf.qbytes; |
|
ipcp->uid = setbuf.uid; |
ipcp->gid = setbuf.gid; |
ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | |
(S_IRWXUGO & setbuf.mode); |
msq->q_ctime = CURRENT_TIME; |
/* sleeping receivers might be excluded by |
* stricter permissions. |
*/ |
expunge_all(msq,-EAGAIN); |
/* sleeping senders might be able to send |
* due to a larger queue size. |
*/ |
ss_wakeup(&msq->q_senders,0); |
msg_unlock(msqid); |
break; |
} |
case IPC_RMID: |
freeque (msqid); |
break; |
} |
err = 0; |
out_up: |
up(&msg_ids.sem); |
return err; |
out_unlock_up: |
msg_unlock(msqid); |
goto out_up; |
out_unlock: |
msg_unlock(msqid); |
return err; |
} |
|
static int testmsg(struct msg_msg* msg,long type,int mode) |
{ |
switch(mode) |
{ |
case SEARCH_ANY: |
return 1; |
case SEARCH_LESSEQUAL: |
if(msg->m_type <=type) |
return 1; |
break; |
case SEARCH_EQUAL: |
if(msg->m_type == type) |
return 1; |
break; |
case SEARCH_NOTEQUAL: |
if(msg->m_type != type) |
return 1; |
break; |
} |
return 0; |
} |
|
static int inline pipelined_send(struct msg_queue* msq, struct msg_msg* msg) |
{ |
struct list_head* tmp; |
|
tmp = msq->q_receivers.next; |
while (tmp != &msq->q_receivers) { |
struct msg_receiver* msr; |
msr = list_entry(tmp,struct msg_receiver,r_list); |
tmp = tmp->next; |
if(testmsg(msg,msr->r_msgtype,msr->r_mode)) { |
list_del(&msr->r_list); |
if(msr->r_maxsize < msg->m_ts) { |
msr->r_msg = ERR_PTR(-E2BIG); |
wake_up_process(msr->r_tsk); |
} else { |
msr->r_msg = msg; |
msq->q_lrpid = msr->r_tsk->pid; |
msq->q_rtime = CURRENT_TIME; |
wake_up_process(msr->r_tsk); |
return 1; |
} |
} |
} |
return 0; |
} |
|
asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg) |
{ |
struct msg_queue *msq; |
struct msg_msg *msg; |
long mtype; |
int err; |
|
if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0) |
return -EINVAL; |
if (get_user(mtype, &msgp->mtype)) |
return -EFAULT; |
if (mtype < 1) |
return -EINVAL; |
|
msg = load_msg(msgp->mtext, msgsz); |
if(IS_ERR(msg)) |
return PTR_ERR(msg); |
|
msg->m_type = mtype; |
msg->m_ts = msgsz; |
|
msq = msg_lock(msqid); |
err=-EINVAL; |
if(msq==NULL) |
goto out_free; |
retry: |
err= -EIDRM; |
if (msg_checkid(msq,msqid)) |
goto out_unlock_free; |
|
err=-EACCES; |
if (ipcperms(&msq->q_perm, S_IWUGO)) |
goto out_unlock_free; |
|
if(msgsz + msq->q_cbytes > msq->q_qbytes || |
1 + msq->q_qnum > msq->q_qbytes) { |
struct msg_sender s; |
|
if(msgflg&IPC_NOWAIT) { |
err=-EAGAIN; |
goto out_unlock_free; |
} |
ss_add(msq, &s); |
msg_unlock(msqid); |
schedule(); |
current->state= TASK_RUNNING; |
|
msq = msg_lock(msqid); |
err = -EIDRM; |
if(msq==NULL) |
goto out_free; |
ss_del(&s); |
|
if (signal_pending(current)) { |
err=-EINTR; |
goto out_unlock_free; |
} |
goto retry; |
} |
|
msq->q_lspid = current->pid; |
msq->q_stime = CURRENT_TIME; |
|
if(!pipelined_send(msq,msg)) { |
/* noone is waiting for this message, enqueue it */ |
list_add_tail(&msg->m_list,&msq->q_messages); |
msq->q_cbytes += msgsz; |
msq->q_qnum++; |
atomic_add(msgsz,&msg_bytes); |
atomic_inc(&msg_hdrs); |
} |
|
err = 0; |
msg = NULL; |
|
out_unlock_free: |
msg_unlock(msqid); |
out_free: |
if(msg!=NULL) |
free_msg(msg); |
return err; |
} |
|
static int inline convert_mode(long* msgtyp, int msgflg) |
{ |
/* |
* find message of correct type. |
* msgtyp = 0 => get first. |
* msgtyp > 0 => get first message of matching type. |
* msgtyp < 0 => get message with least type must be < abs(msgtype). |
*/ |
if(*msgtyp==0) |
return SEARCH_ANY; |
if(*msgtyp<0) { |
*msgtyp=-(*msgtyp); |
return SEARCH_LESSEQUAL; |
} |
if(msgflg & MSG_EXCEPT) |
return SEARCH_NOTEQUAL; |
return SEARCH_EQUAL; |
} |
|
asmlinkage long sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, |
long msgtyp, int msgflg) |
{ |
struct msg_queue *msq; |
struct msg_receiver msr_d; |
struct list_head* tmp; |
struct msg_msg* msg, *found_msg; |
int err; |
int mode; |
|
if (msqid < 0 || (long) msgsz < 0) |
return -EINVAL; |
mode = convert_mode(&msgtyp,msgflg); |
|
msq = msg_lock(msqid); |
if(msq==NULL) |
return -EINVAL; |
retry: |
err = -EIDRM; |
if (msg_checkid(msq,msqid)) |
goto out_unlock; |
|
err=-EACCES; |
if (ipcperms (&msq->q_perm, S_IRUGO)) |
goto out_unlock; |
|
tmp = msq->q_messages.next; |
found_msg=NULL; |
while (tmp != &msq->q_messages) { |
msg = list_entry(tmp,struct msg_msg,m_list); |
if(testmsg(msg,msgtyp,mode)) { |
found_msg = msg; |
if(mode == SEARCH_LESSEQUAL && msg->m_type != 1) { |
found_msg=msg; |
msgtyp=msg->m_type-1; |
} else { |
found_msg=msg; |
break; |
} |
} |
tmp = tmp->next; |
} |
if(found_msg) { |
msg=found_msg; |
if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { |
err=-E2BIG; |
goto out_unlock; |
} |
list_del(&msg->m_list); |
msq->q_qnum--; |
msq->q_rtime = CURRENT_TIME; |
msq->q_lrpid = current->pid; |
msq->q_cbytes -= msg->m_ts; |
atomic_sub(msg->m_ts,&msg_bytes); |
atomic_dec(&msg_hdrs); |
ss_wakeup(&msq->q_senders,0); |
msg_unlock(msqid); |
out_success: |
msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; |
if (put_user (msg->m_type, &msgp->mtype) || |
store_msg(msgp->mtext, msg, msgsz)) { |
msgsz = -EFAULT; |
} |
free_msg(msg); |
return msgsz; |
} else |
{ |
struct msg_queue *t; |
/* no message waiting. Prepare for pipelined |
* receive. |
*/ |
if (msgflg & IPC_NOWAIT) { |
err=-ENOMSG; |
goto out_unlock; |
} |
list_add_tail(&msr_d.r_list,&msq->q_receivers); |
msr_d.r_tsk = current; |
msr_d.r_msgtype = msgtyp; |
msr_d.r_mode = mode; |
if(msgflg & MSG_NOERROR) |
msr_d.r_maxsize = INT_MAX; |
else |
msr_d.r_maxsize = msgsz; |
msr_d.r_msg = ERR_PTR(-EAGAIN); |
current->state = TASK_INTERRUPTIBLE; |
msg_unlock(msqid); |
|
schedule(); |
current->state = TASK_RUNNING; |
|
/* This introduces a race so we must always take |
the slow path |
msg = (struct msg_msg*) msr_d.r_msg; |
if(!IS_ERR(msg)) |
goto out_success; |
*/ |
t = msg_lock(msqid); |
if(t==NULL) |
msqid=-1; |
msg = (struct msg_msg*)msr_d.r_msg; |
if(!IS_ERR(msg)) { |
/* our message arived while we waited for |
* the spinlock. Process it. |
*/ |
if(msqid!=-1) |
msg_unlock(msqid); |
goto out_success; |
} |
err = PTR_ERR(msg); |
if(err == -EAGAIN) { |
if(msqid==-1) |
BUG(); |
list_del(&msr_d.r_list); |
if (signal_pending(current)) |
err=-EINTR; |
else |
goto retry; |
} |
} |
out_unlock: |
if(msqid!=-1) |
msg_unlock(msqid); |
return err; |
} |
|
#ifdef CONFIG_PROC_FS |
static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) |
{ |
off_t pos = 0; |
off_t begin = 0; |
int i, len = 0; |
|
down(&msg_ids.sem); |
len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n"); |
|
for(i = 0; i <= msg_ids.max_id; i++) { |
struct msg_queue * msq; |
msq = msg_lock(i); |
if(msq != NULL) { |
len += sprintf(buffer + len, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", |
msq->q_perm.key, |
msg_buildid(i,msq->q_perm.seq), |
msq->q_perm.mode, |
msq->q_cbytes, |
msq->q_qnum, |
msq->q_lspid, |
msq->q_lrpid, |
msq->q_perm.uid, |
msq->q_perm.gid, |
msq->q_perm.cuid, |
msq->q_perm.cgid, |
msq->q_stime, |
msq->q_rtime, |
msq->q_ctime); |
msg_unlock(i); |
|
pos += len; |
if(pos < offset) { |
len = 0; |
begin = pos; |
} |
if(pos > offset + length) |
goto done; |
} |
|
} |
*eof = 1; |
done: |
up(&msg_ids.sem); |
*start = buffer + (offset - begin); |
len -= (offset - begin); |
if(len > length) |
len = length; |
if(len < 0) |
len = 0; |
return len; |
} |
#endif |
/shm.c
0,0 → 1,751
/* |
* linux/ipc/shm.c |
* Copyright (C) 1992, 1993 Krishna Balasubramanian |
* Many improvements/fixes by Bruno Haible. |
* Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. |
* Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. |
* |
* /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> |
* BIGMEM support, Andrea Arcangeli <andrea@suse.de> |
* SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> |
* HIGHMEM support, Ingo Molnar <mingo@redhat.com> |
* Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> |
* Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> |
* Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> |
* |
*/ |
|
#include <linux/config.h> |
#include <linux/slab.h> |
#include <linux/shm.h> |
#include <linux/init.h> |
#include <linux/file.h> |
#include <linux/mman.h> |
#include <linux/proc_fs.h> |
#include <asm/uaccess.h> |
|
#include "util.h" |
|
struct shmid_kernel /* private to the kernel */ |
{ |
struct kern_ipc_perm shm_perm; |
struct file * shm_file; |
int id; |
unsigned long shm_nattch; |
unsigned long shm_segsz; |
time_t shm_atim; |
time_t shm_dtim; |
time_t shm_ctim; |
pid_t shm_cprid; |
pid_t shm_lprid; |
}; |
|
#define shm_flags shm_perm.mode |
|
static struct file_operations shm_file_operations; |
static struct vm_operations_struct shm_vm_ops; |
|
static struct ipc_ids shm_ids; |
|
#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id)) |
#define shm_unlock(id) ipc_unlock(&shm_ids,id) |
#define shm_lockall() ipc_lockall(&shm_ids) |
#define shm_unlockall() ipc_unlockall(&shm_ids) |
#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id)) |
#define shm_buildid(id, seq) \ |
ipc_buildid(&shm_ids, id, seq) |
|
static int newseg (key_t key, int shmflg, size_t size); |
static void shm_open (struct vm_area_struct *shmd); |
static void shm_close (struct vm_area_struct *shmd); |
#ifdef CONFIG_PROC_FS |
static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); |
#endif |
|
size_t shm_ctlmax = SHMMAX; |
size_t shm_ctlall = SHMALL; |
int shm_ctlmni = SHMMNI; |
|
static int shm_tot; /* total number of shared memory pages */ |
|
void __init shm_init (void) |
{ |
ipc_init_ids(&shm_ids, 1); |
#ifdef CONFIG_PROC_FS |
create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc, NULL); |
#endif |
} |
|
static inline int shm_checkid(struct shmid_kernel *s, int id) |
{ |
if (ipc_checkid(&shm_ids,&s->shm_perm,id)) |
return -EIDRM; |
return 0; |
} |
|
static inline struct shmid_kernel *shm_rmid(int id) |
{ |
return (struct shmid_kernel *)ipc_rmid(&shm_ids,id); |
} |
|
static inline int shm_addid(struct shmid_kernel *shp) |
{ |
return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni+1); |
} |
|
|
|
static inline void shm_inc (int id) { |
struct shmid_kernel *shp; |
|
if(!(shp = shm_lock(id))) |
BUG(); |
shp->shm_atim = CURRENT_TIME; |
shp->shm_lprid = current->pid; |
shp->shm_nattch++; |
shm_unlock(id); |
} |
|
/* This is called by fork, once for every shm attach. */ |
static void shm_open (struct vm_area_struct *shmd) |
{ |
shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino); |
} |
|
/* |
* shm_destroy - free the struct shmid_kernel |
* |
* @shp: struct to free |
* |
* It has to be called with shp and shm_ids.sem locked, |
* but returns with shp unlocked and freed. |
*/ |
static void shm_destroy (struct shmid_kernel *shp) |
{ |
shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; |
shm_rmid (shp->id); |
shm_unlock(shp->id); |
shmem_lock(shp->shm_file, 0); |
fput (shp->shm_file); |
kfree (shp); |
} |
|
/* |
* remove the attach descriptor shmd. |
* free memory for segment if it is marked destroyed. |
* The descriptor has already been removed from the current->mm->mmap list |
* and will later be kfree()d. |
*/ |
static void shm_close (struct vm_area_struct *shmd) |
{ |
struct file * file = shmd->vm_file; |
int id = file->f_dentry->d_inode->i_ino; |
struct shmid_kernel *shp; |
|
down (&shm_ids.sem); |
/* remove from the list of attaches of the shm segment */ |
if(!(shp = shm_lock(id))) |
BUG(); |
shp->shm_lprid = current->pid; |
shp->shm_dtim = CURRENT_TIME; |
shp->shm_nattch--; |
if(shp->shm_nattch == 0 && |
shp->shm_flags & SHM_DEST) |
shm_destroy (shp); |
else |
shm_unlock(id); |
up (&shm_ids.sem); |
} |
|
static int shm_mmap(struct file * file, struct vm_area_struct * vma) |
{ |
UPDATE_ATIME(file->f_dentry->d_inode); |
vma->vm_ops = &shm_vm_ops; |
shm_inc(file->f_dentry->d_inode->i_ino); |
return 0; |
} |
|
static struct file_operations shm_file_operations = { |
mmap: shm_mmap |
}; |
|
static struct vm_operations_struct shm_vm_ops = { |
open: shm_open, /* callback for a new vm-area open */ |
close: shm_close, /* callback for when the vm-area is released */ |
nopage: shmem_nopage, |
}; |
|
static int newseg (key_t key, int shmflg, size_t size) |
{ |
int error; |
struct shmid_kernel *shp; |
int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; |
struct file * file; |
char name[13]; |
int id; |
|
if (size < SHMMIN || size > shm_ctlmax) |
return -EINVAL; |
|
if (shm_tot + numpages >= shm_ctlall) |
return -ENOSPC; |
|
shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_USER); |
if (!shp) |
return -ENOMEM; |
sprintf (name, "SYSV%08x", key); |
file = shmem_file_setup(name, size); |
error = PTR_ERR(file); |
if (IS_ERR(file)) |
goto no_file; |
|
error = -ENOSPC; |
id = shm_addid(shp); |
if(id == -1) |
goto no_id; |
shp->shm_perm.key = key; |
shp->shm_flags = (shmflg & S_IRWXUGO); |
shp->shm_cprid = current->pid; |
shp->shm_lprid = 0; |
shp->shm_atim = shp->shm_dtim = 0; |
shp->shm_ctim = CURRENT_TIME; |
shp->shm_segsz = size; |
shp->shm_nattch = 0; |
shp->id = shm_buildid(id,shp->shm_perm.seq); |
shp->shm_file = file; |
file->f_dentry->d_inode->i_ino = shp->id; |
file->f_op = &shm_file_operations; |
shm_tot += numpages; |
shm_unlock (id); |
return shp->id; |
|
no_id: |
fput(file); |
no_file: |
kfree(shp); |
return error; |
} |
|
asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) |
{ |
struct shmid_kernel *shp; |
int err, id = 0; |
|
down(&shm_ids.sem); |
if (key == IPC_PRIVATE) { |
err = newseg(key, shmflg, size); |
} else if ((id = ipc_findkey(&shm_ids, key)) == -1) { |
if (!(shmflg & IPC_CREAT)) |
err = -ENOENT; |
else |
err = newseg(key, shmflg, size); |
} else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) { |
err = -EEXIST; |
} else { |
shp = shm_lock(id); |
if(shp==NULL) |
BUG(); |
if (shp->shm_segsz < size) |
err = -EINVAL; |
else if (ipcperms(&shp->shm_perm, shmflg)) |
err = -EACCES; |
else |
err = shm_buildid(id, shp->shm_perm.seq); |
shm_unlock(id); |
} |
up(&shm_ids.sem); |
return err; |
} |
|
static inline unsigned long copy_shmid_to_user(void *buf, struct shmid64_ds *in, int version) |
{ |
switch(version) { |
case IPC_64: |
return copy_to_user(buf, in, sizeof(*in)); |
case IPC_OLD: |
{ |
struct shmid_ds out; |
|
ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); |
out.shm_segsz = in->shm_segsz; |
out.shm_atime = in->shm_atime; |
out.shm_dtime = in->shm_dtime; |
out.shm_ctime = in->shm_ctime; |
out.shm_cpid = in->shm_cpid; |
out.shm_lpid = in->shm_lpid; |
out.shm_nattch = in->shm_nattch; |
|
return copy_to_user(buf, &out, sizeof(out)); |
} |
default: |
return -EINVAL; |
} |
} |
|
struct shm_setbuf { |
uid_t uid; |
gid_t gid; |
mode_t mode; |
}; |
|
static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void *buf, int version) |
{ |
switch(version) { |
case IPC_64: |
{ |
struct shmid64_ds tbuf; |
|
if (copy_from_user(&tbuf, buf, sizeof(tbuf))) |
return -EFAULT; |
|
out->uid = tbuf.shm_perm.uid; |
out->gid = tbuf.shm_perm.gid; |
out->mode = tbuf.shm_flags; |
|
return 0; |
} |
case IPC_OLD: |
{ |
struct shmid_ds tbuf_old; |
|
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
return -EFAULT; |
|
out->uid = tbuf_old.shm_perm.uid; |
out->gid = tbuf_old.shm_perm.gid; |
out->mode = tbuf_old.shm_flags; |
|
return 0; |
} |
default: |
return -EINVAL; |
} |
} |
|
static inline unsigned long copy_shminfo_to_user(void *buf, struct shminfo64 *in, int version) |
{ |
switch(version) { |
case IPC_64: |
return copy_to_user(buf, in, sizeof(*in)); |
case IPC_OLD: |
{ |
struct shminfo out; |
|
if(in->shmmax > INT_MAX) |
out.shmmax = INT_MAX; |
else |
out.shmmax = (int)in->shmmax; |
|
out.shmmin = in->shmmin; |
out.shmmni = in->shmmni; |
out.shmseg = in->shmseg; |
out.shmall = in->shmall; |
|
return copy_to_user(buf, &out, sizeof(out)); |
} |
default: |
return -EINVAL; |
} |
} |
|
static void shm_get_stat (unsigned long *rss, unsigned long *swp) |
{ |
struct shmem_inode_info *info; |
int i; |
|
*rss = 0; |
*swp = 0; |
|
for(i = 0; i <= shm_ids.max_id; i++) { |
struct shmid_kernel* shp; |
struct inode * inode; |
|
shp = shm_get(i); |
if(shp == NULL) |
continue; |
inode = shp->shm_file->f_dentry->d_inode; |
info = SHMEM_I(inode); |
spin_lock (&info->lock); |
*rss += inode->i_mapping->nrpages; |
*swp += info->swapped; |
spin_unlock (&info->lock); |
} |
} |
|
asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf) |
{ |
struct shm_setbuf setbuf; |
struct shmid_kernel *shp; |
int err, version; |
|
if (cmd < 0 || shmid < 0) |
return -EINVAL; |
|
version = ipc_parse_version(&cmd); |
|
switch (cmd) { /* replace with proc interface ? */ |
case IPC_INFO: |
{ |
struct shminfo64 shminfo; |
|
memset(&shminfo,0,sizeof(shminfo)); |
shminfo.shmmni = shminfo.shmseg = shm_ctlmni; |
shminfo.shmmax = shm_ctlmax; |
shminfo.shmall = shm_ctlall; |
|
shminfo.shmmin = SHMMIN; |
if(copy_shminfo_to_user (buf, &shminfo, version)) |
return -EFAULT; |
/* reading a integer is always atomic */ |
err= shm_ids.max_id; |
if(err<0) |
err = 0; |
return err; |
} |
case SHM_INFO: |
{ |
struct shm_info shm_info; |
|
memset(&shm_info,0,sizeof(shm_info)); |
down(&shm_ids.sem); |
shm_lockall(); |
shm_info.used_ids = shm_ids.in_use; |
shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); |
shm_info.shm_tot = shm_tot; |
shm_info.swap_attempts = 0; |
shm_info.swap_successes = 0; |
err = shm_ids.max_id; |
shm_unlockall(); |
up(&shm_ids.sem); |
if(copy_to_user (buf, &shm_info, sizeof(shm_info))) |
return -EFAULT; |
|
return err < 0 ? 0 : err; |
} |
case SHM_STAT: |
case IPC_STAT: |
{ |
struct shmid64_ds tbuf; |
int result; |
memset(&tbuf, 0, sizeof(tbuf)); |
shp = shm_lock(shmid); |
if(shp==NULL) |
return -EINVAL; |
if(cmd==SHM_STAT) { |
err = -EINVAL; |
if (shmid > shm_ids.max_id) |
goto out_unlock; |
result = shm_buildid(shmid, shp->shm_perm.seq); |
} else { |
err = shm_checkid(shp,shmid); |
if(err) |
goto out_unlock; |
result = 0; |
} |
err=-EACCES; |
if (ipcperms (&shp->shm_perm, S_IRUGO)) |
goto out_unlock; |
kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); |
tbuf.shm_segsz = shp->shm_segsz; |
tbuf.shm_atime = shp->shm_atim; |
tbuf.shm_dtime = shp->shm_dtim; |
tbuf.shm_ctime = shp->shm_ctim; |
tbuf.shm_cpid = shp->shm_cprid; |
tbuf.shm_lpid = shp->shm_lprid; |
tbuf.shm_nattch = shp->shm_nattch; |
shm_unlock(shmid); |
if(copy_shmid_to_user (buf, &tbuf, version)) |
return -EFAULT; |
return result; |
} |
case SHM_LOCK: |
case SHM_UNLOCK: |
{ |
/* Allow superuser to lock segment in memory */ |
/* Should the pages be faulted in here or leave it to user? */ |
/* need to determine interaction with current->swappable */ |
if (!capable(CAP_IPC_LOCK)) |
return -EPERM; |
|
shp = shm_lock(shmid); |
if(shp==NULL) |
return -EINVAL; |
err = shm_checkid(shp,shmid); |
if(err) |
goto out_unlock; |
if(cmd==SHM_LOCK) { |
shmem_lock(shp->shm_file, 1); |
shp->shm_flags |= SHM_LOCKED; |
} else { |
shmem_lock(shp->shm_file, 0); |
shp->shm_flags &= ~SHM_LOCKED; |
} |
shm_unlock(shmid); |
return err; |
} |
case IPC_RMID: |
{ |
/* |
* We cannot simply remove the file. The SVID states |
* that the block remains until the last person |
* detaches from it, then is deleted. A shmat() on |
* an RMID segment is legal in older Linux and if |
* we change it apps break... |
* |
* Instead we set a destroyed flag, and then blow |
* the name away when the usage hits zero. |
*/ |
down(&shm_ids.sem); |
shp = shm_lock(shmid); |
err = -EINVAL; |
if (shp == NULL) |
goto out_up; |
err = shm_checkid(shp, shmid); |
if(err) |
goto out_unlock_up; |
if (current->euid != shp->shm_perm.uid && |
current->euid != shp->shm_perm.cuid && |
!capable(CAP_SYS_ADMIN)) { |
err=-EPERM; |
goto out_unlock_up; |
} |
if (shp->shm_nattch){ |
shp->shm_flags |= SHM_DEST; |
/* Do not find it any more */ |
shp->shm_perm.key = IPC_PRIVATE; |
shm_unlock(shmid); |
} else |
shm_destroy (shp); |
up(&shm_ids.sem); |
return err; |
} |
|
case IPC_SET: |
{ |
if(copy_shmid_from_user (&setbuf, buf, version)) |
return -EFAULT; |
down(&shm_ids.sem); |
shp = shm_lock(shmid); |
err=-EINVAL; |
if(shp==NULL) |
goto out_up; |
err = shm_checkid(shp,shmid); |
if(err) |
goto out_unlock_up; |
err=-EPERM; |
if (current->euid != shp->shm_perm.uid && |
current->euid != shp->shm_perm.cuid && |
!capable(CAP_SYS_ADMIN)) { |
goto out_unlock_up; |
} |
|
shp->shm_perm.uid = setbuf.uid; |
shp->shm_perm.gid = setbuf.gid; |
shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO) |
| (setbuf.mode & S_IRWXUGO); |
shp->shm_ctim = CURRENT_TIME; |
break; |
} |
|
default: |
return -EINVAL; |
} |
|
err = 0; |
out_unlock_up: |
shm_unlock(shmid); |
out_up: |
up(&shm_ids.sem); |
return err; |
out_unlock: |
shm_unlock(shmid); |
return err; |
} |
|
/* |
* Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. |
*/ |
asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr) |
{ |
struct shmid_kernel *shp; |
unsigned long addr; |
unsigned long size; |
struct file * file; |
int err; |
unsigned long flags; |
unsigned long prot; |
unsigned long o_flags; |
int acc_mode; |
void *user_addr; |
|
if (shmid < 0) |
return -EINVAL; |
|
if ((addr = (ulong)shmaddr)) { |
if (addr & (SHMLBA-1)) { |
if (shmflg & SHM_RND) |
addr &= ~(SHMLBA-1); /* round down */ |
else |
return -EINVAL; |
} |
flags = MAP_SHARED | MAP_FIXED; |
} else { |
if ((shmflg & SHM_REMAP)) |
return -EINVAL; |
|
flags = MAP_SHARED; |
} |
|
if (shmflg & SHM_RDONLY) { |
prot = PROT_READ; |
o_flags = O_RDONLY; |
acc_mode = S_IRUGO; |
} else { |
prot = PROT_READ | PROT_WRITE; |
o_flags = O_RDWR; |
acc_mode = S_IRUGO | S_IWUGO; |
} |
|
/* |
* We cannot rely on the fs check since SYSV IPC does have an |
* additional creator id... |
*/ |
shp = shm_lock(shmid); |
if(shp == NULL) |
return -EINVAL; |
err = shm_checkid(shp,shmid); |
if (err) { |
shm_unlock(shmid); |
return err; |
} |
if (ipcperms(&shp->shm_perm, acc_mode)) { |
shm_unlock(shmid); |
return -EACCES; |
} |
file = shp->shm_file; |
size = file->f_dentry->d_inode->i_size; |
shp->shm_nattch++; |
shm_unlock(shmid); |
|
down_write(¤t->mm->mmap_sem); |
if (addr && !(shmflg & SHM_REMAP)) { |
user_addr = ERR_PTR(-EINVAL); |
if (find_vma_intersection(current->mm, addr, addr + size)) |
goto invalid; |
/* |
* If shm segment goes below stack, make sure there is some |
* space left for the stack to grow (at least 4 pages). |
*/ |
if (addr < current->mm->start_stack && |
addr > current->mm->start_stack - size - PAGE_SIZE * 5) |
goto invalid; |
} |
|
user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0); |
|
invalid: |
up_write(¤t->mm->mmap_sem); |
|
down (&shm_ids.sem); |
if(!(shp = shm_lock(shmid))) |
BUG(); |
shp->shm_nattch--; |
if(shp->shm_nattch == 0 && |
shp->shm_flags & SHM_DEST) |
shm_destroy (shp); |
else |
shm_unlock(shmid); |
up (&shm_ids.sem); |
|
*raddr = (unsigned long) user_addr; |
err = 0; |
if (IS_ERR(user_addr)) |
err = PTR_ERR(user_addr); |
return err; |
|
} |
|
/* |
* detach and kill segment if marked destroyed. |
* The work is done in shm_close. |
*/ |
asmlinkage long sys_shmdt (char *shmaddr) |
{ |
struct mm_struct *mm = current->mm; |
struct vm_area_struct *shmd, *shmdnext; |
int retval = -EINVAL; |
|
down_write(&mm->mmap_sem); |
for (shmd = mm->mmap; shmd; shmd = shmdnext) { |
shmdnext = shmd->vm_next; |
if (shmd->vm_ops == &shm_vm_ops |
&& shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) { |
do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start); |
retval = 0; |
} |
} |
up_write(&mm->mmap_sem); |
return retval; |
} |
|
#ifdef CONFIG_PROC_FS |
static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) |
{ |
off_t pos = 0; |
off_t begin = 0; |
int i, len = 0; |
|
down(&shm_ids.sem); |
len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n"); |
|
for(i = 0; i <= shm_ids.max_id; i++) { |
struct shmid_kernel* shp; |
|
shp = shm_lock(i); |
if(shp!=NULL) { |
#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" |
#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" |
char *format; |
|
if (sizeof(size_t) <= sizeof(int)) |
format = SMALL_STRING; |
else |
format = BIG_STRING; |
len += sprintf(buffer + len, format, |
shp->shm_perm.key, |
shm_buildid(i, shp->shm_perm.seq), |
shp->shm_flags, |
shp->shm_segsz, |
shp->shm_cprid, |
shp->shm_lprid, |
shp->shm_nattch, |
shp->shm_perm.uid, |
shp->shm_perm.gid, |
shp->shm_perm.cuid, |
shp->shm_perm.cgid, |
shp->shm_atim, |
shp->shm_dtim, |
shp->shm_ctim); |
shm_unlock(i); |
|
pos += len; |
if(pos < offset) { |
len = 0; |
begin = pos; |
} |
if(pos > offset + length) |
goto done; |
} |
} |
*eof = 1; |
done: |
up(&shm_ids.sem); |
*start = buffer + (offset - begin); |
len -= (offset - begin); |
if(len > length) |
len = length; |
if(len < 0) |
len = 0; |
return len; |
} |
#endif |
/Makefile
0,0 → 1,16
# |
# Makefile for the linux ipc. |
# |
# Note! Dependencies are done automagically by 'make dep', which also |
# removes any old dependencies. DON'T put your own dependencies here |
# unless it's something special (ie not a .c file). |
# |
# Note 2! The CFLAGS definition is now in the main makefile... |
|
O_TARGET := ipc.o |
|
obj-y := util.o |
|
obj-$(CONFIG_SYSVIPC) += msg.o sem.o shm.o |
|
include $(TOPDIR)/Rules.make |
/util.c
0,0 → 1,414
/* |
* linux/ipc/util.c |
* Copyright (C) 1992 Krishna Balasubramanian |
* |
* Sep 1997 - Call suser() last after "normal" permission checks so we |
* get BSD style process accounting right. |
* Occurs in several places in the IPC code. |
* Chris Evans, <chris@ferret.lmh.ox.ac.uk> |
* Nov 1999 - ipc helper functions, unified SMP locking |
* Manfred Spraul <manfreds@colorfullife.com> |
*/ |
|
#include <linux/config.h> |
#include <linux/mm.h> |
#include <linux/shm.h> |
#include <linux/init.h> |
#include <linux/msg.h> |
#include <linux/smp_lock.h> |
#include <linux/vmalloc.h> |
#include <linux/slab.h> |
#include <linux/highuid.h> |
|
#if defined(CONFIG_SYSVIPC) |
|
#include "util.h" |
|
/** |
* ipc_init - initialise IPC subsystem |
* |
* The various system5 IPC resources (semaphores, messages and shared |
* memory are initialised |
*/ |
|
void __init ipc_init (void) |
{ |
sem_init(); |
msg_init(); |
shm_init(); |
return; |
} |
|
/** |
* ipc_init_ids - initialise IPC identifiers |
* @ids: Identifier set |
* @size: Number of identifiers |
* |
* Given a size for the ipc identifier range (limited below IPCMNI) |
* set up the sequence range to use then allocate and initialise the |
* array itself. |
*/ |
|
void __init ipc_init_ids(struct ipc_ids* ids, int size) |
{ |
int i; |
sema_init(&ids->sem,1); |
|
if(size > IPCMNI) |
size = IPCMNI; |
ids->size = size; |
ids->in_use = 0; |
ids->max_id = -1; |
ids->seq = 0; |
{ |
int seq_limit = INT_MAX/SEQ_MULTIPLIER; |
if(seq_limit > USHRT_MAX) |
ids->seq_max = USHRT_MAX; |
else |
ids->seq_max = seq_limit; |
} |
|
ids->entries = ipc_alloc(sizeof(struct ipc_id)*size); |
|
if(ids->entries == NULL) { |
printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); |
ids->size = 0; |
} |
ids->ary = SPIN_LOCK_UNLOCKED; |
for(i=0;i<ids->size;i++) |
ids->entries[i].p = NULL; |
} |
|
/** |
* ipc_findkey - find a key in an ipc identifier set |
* @ids: Identifier set |
* @key: The key to find |
* |
* Returns the identifier if found or -1 if not. |
*/ |
|
int ipc_findkey(struct ipc_ids* ids, key_t key) |
{ |
int id; |
struct kern_ipc_perm* p; |
|
for (id = 0; id <= ids->max_id; id++) { |
p = ids->entries[id].p; |
if(p==NULL) |
continue; |
if (key == p->key) |
return id; |
} |
return -1; |
} |
|
static int grow_ary(struct ipc_ids* ids, int newsize) |
{ |
struct ipc_id* new; |
struct ipc_id* old; |
int i; |
|
if(newsize > IPCMNI) |
newsize = IPCMNI; |
if(newsize <= ids->size) |
return newsize; |
|
new = ipc_alloc(sizeof(struct ipc_id)*newsize); |
if(new == NULL) |
return ids->size; |
memcpy(new, ids->entries, sizeof(struct ipc_id)*ids->size); |
for(i=ids->size;i<newsize;i++) { |
new[i].p = NULL; |
} |
spin_lock(&ids->ary); |
|
old = ids->entries; |
ids->entries = new; |
i = ids->size; |
ids->size = newsize; |
spin_unlock(&ids->ary); |
ipc_free(old, sizeof(struct ipc_id)*i); |
return ids->size; |
} |
|
/** |
* ipc_addid - add an IPC identifier |
* @ids: IPC identifier set |
* @new: new IPC permission set |
* @size: new size limit for the id array |
* |
* Add an entry 'new' to the IPC arrays. The permissions object is |
* initialised and the first free entry is set up and the id assigned |
* is returned. The list is returned in a locked state on success. |
* On failure the list is not locked and -1 is returned. |
*/ |
|
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) |
{ |
int id; |
|
size = grow_ary(ids,size); |
for (id = 0; id < size; id++) { |
if(ids->entries[id].p == NULL) |
goto found; |
} |
return -1; |
found: |
ids->in_use++; |
if (id > ids->max_id) |
ids->max_id = id; |
|
new->cuid = new->uid = current->euid; |
new->gid = new->cgid = current->egid; |
|
new->seq = ids->seq++; |
if(ids->seq > ids->seq_max) |
ids->seq = 0; |
|
spin_lock(&ids->ary); |
ids->entries[id].p = new; |
return id; |
} |
|
/** |
* ipc_rmid - remove an IPC identifier |
* @ids: identifier set |
* @id: Identifier to remove |
* |
* The identifier must be valid, and in use. The kernel will panic if |
* fed an invalid identifier. The entry is removed and internal |
* variables recomputed. The object associated with the identifier |
* is returned. |
*/ |
|
struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) |
{ |
struct kern_ipc_perm* p; |
int lid = id % SEQ_MULTIPLIER; |
if(lid >= ids->size) |
BUG(); |
p = ids->entries[lid].p; |
ids->entries[lid].p = NULL; |
if(p==NULL) |
BUG(); |
ids->in_use--; |
|
if (lid == ids->max_id) { |
do { |
lid--; |
if(lid == -1) |
break; |
} while (ids->entries[lid].p == NULL); |
ids->max_id = lid; |
} |
return p; |
} |
|
/** |
* ipc_alloc - allocate ipc space |
* @size: size desired |
* |
* Allocate memory from the appropriate pools and return a pointer to it. |
* NULL is returned if the allocation fails |
*/ |
|
void* ipc_alloc(int size) |
{ |
void* out; |
if(size > PAGE_SIZE) |
out = vmalloc(size); |
else |
out = kmalloc(size, GFP_KERNEL); |
return out; |
} |
|
/** |
* ipc_free - free ipc space |
* @ptr: pointer returned by ipc_alloc |
* @size: size of block |
* |
* Free a block created with ipc_alloc. The caller must know the size |
* used in the allocation call. |
*/ |
|
void ipc_free(void* ptr, int size) |
{ |
if(size > PAGE_SIZE) |
vfree(ptr); |
else |
kfree(ptr); |
} |
|
/** |
* ipcperms - check IPC permissions |
* @ipcp: IPC permission set |
* @flag: desired permission set. |
* |
* Check user, group, other permissions for access |
* to ipc resources. return 0 if allowed |
*/ |
|
int ipcperms (struct kern_ipc_perm *ipcp, short flag) |
{ /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */ |
int requested_mode, granted_mode; |
|
requested_mode = (flag >> 6) | (flag >> 3) | flag; |
granted_mode = ipcp->mode; |
if (current->euid == ipcp->cuid || current->euid == ipcp->uid) |
granted_mode >>= 6; |
else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) |
granted_mode >>= 3; |
/* is there some bit set in requested_mode but not in granted_mode? */ |
if ((requested_mode & ~granted_mode & 0007) && |
!capable(CAP_IPC_OWNER)) |
return -1; |
|
return 0; |
} |
|
/* |
* Functions to convert between the kern_ipc_perm structure and the |
* old/new ipc_perm structures |
*/ |
|
/** |
* kernel_to_ipc64_perm - convert kernel ipc permissions to user |
* @in: kernel permissions |
* @out: new style IPC permissions |
* |
* Turn the kernel object 'in' into a set of permissions descriptions |
* for returning to userspace (out). |
*/ |
|
|
void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) |
{ |
out->key = in->key; |
out->uid = in->uid; |
out->gid = in->gid; |
out->cuid = in->cuid; |
out->cgid = in->cgid; |
out->mode = in->mode; |
out->seq = in->seq; |
} |
|
/** |
* ipc64_perm_to_ipc_perm - convert old ipc permissions to new |
* @in: new style IPC permissions |
* @out: old style IPC permissions |
* |
* Turn the new style permissions object in into a compatibility |
* object and store it into the 'out' pointer. |
*/ |
|
void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) |
{ |
out->key = in->key; |
out->uid = NEW_TO_OLD_UID(in->uid); |
out->gid = NEW_TO_OLD_GID(in->gid); |
out->cuid = NEW_TO_OLD_UID(in->cuid); |
out->cgid = NEW_TO_OLD_GID(in->cgid); |
out->mode = in->mode; |
out->seq = in->seq; |
} |
|
#if !defined(__ia64__) && !defined(__hppa__) |
|
/** |
* ipc_parse_version - IPC call version |
* @cmd: pointer to command |
* |
* Return IPC_64 for new style IPC and IPC_OLD for old style IPC. |
* The cmd value is turned from an encoding command and version into |
* just the command code. |
*/ |
|
int ipc_parse_version (int *cmd) |
{ |
#ifdef __x86_64__ |
if (!(current->thread.flags & THREAD_IA32)) |
return IPC_64; |
#endif |
if (*cmd & IPC_64) { |
*cmd ^= IPC_64; |
return IPC_64; |
} else { |
return IPC_OLD; |
} |
} |
|
#endif /* __ia64__ */ |
|
#else |
/* |
* Dummy functions when SYSV IPC isn't configured |
*/ |
|
void sem_exit (void) |
{ |
return; |
} |
|
asmlinkage long sys_semget (key_t key, int nsems, int semflg) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_semop (int semid, struct sembuf *sops, unsigned nsops) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_semtimedop(int semid, struct sembuf *sops, unsigned nsops, |
const struct timespec *timeout) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_msgget (key_t key, int msgflg) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_msgsnd (int msqid, struct msgbuf *msgp, size_t msgsz, int msgflg) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_msgrcv (int msqid, struct msgbuf *msgp, size_t msgsz, long msgtyp, |
int msgflg) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds *buf) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_shmget (key_t key, size_t size, int shmflag) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *addr) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_shmdt (char *shmaddr) |
{ |
return -ENOSYS; |
} |
|
asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf) |
{ |
return -ENOSYS; |
} |
|
#endif /* CONFIG_SYSVIPC */ |