OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [conts/] [posix/] [mm0/] [mm/] [shm.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Copyright (C) 2007, 2008 Bahadir Balban
3
 *
4
 * Posix shared memory implementation
5
 */
6
#include <shm.h>
7
#include <stdio.h>
8
#include <task.h>
9
#include <mmap.h>
10
#include <memory.h>
11
#include <vm_area.h>
12
#include <globals.h>
13
#include <malloc/malloc.h>
14
#include L4LIB_INC_ARCH(syscalls.h)
15
#include L4LIB_INC_ARCH(syslib.h)
16
#include <lib/idpool.h>
17
#include <lib/addr.h>
18
#include <lib/spinlock.h>
19
#include <l4/api/errno.h>
20
#include <l4/lib/list.h>
21
#include <l4/macros.h>
22
#include <l4/config.h>
23
#include <l4/types.h>
24
#include INC_GLUE(memlayout.h)
25
#include <posix/sys/ipc.h>
26
#include <posix/sys/shm.h>
27
#include <posix/sys/types.h>
28
 
29
/*
30
 * FIXME:
31
 *
32
 * All this stuff is stored as file_private_data in the vm_file.
33
 * However they need to have a pseudo-fs infrastructure that
34
 * stores all internals under the vnode->inode field.
35
 */
36
#define shm_file_to_desc(shm_file)      \
37
        ((struct shm_descriptor *)(shm_file)->private_file_data)
38
 
39
/* Unique shared memory ids */
40
static struct id_pool *shm_ids;
41
 
42
/* Globally disjoint shm virtual address pool */
43
static struct address_pool shm_vaddr_pool;
44
 
45
void *shm_new_address(int npages)
46
{
47
        return address_new(&shm_vaddr_pool, npages);
48
}
49
 
50
int shm_delete_address(void *shm_addr, int npages)
51
{
52
        return address_del(&shm_vaddr_pool, shm_addr, npages);
53
}
54
 
55
int shm_pool_init()
56
{
57
        int err;
58
 
59
        /* Initialise shm id pool */
60
        if(IS_ERR(shm_ids = id_pool_new_init(SHM_AREA_MAX))) {
61
                printf("SHM id pool initialisation failed.\n");
62
                return (int)shm_ids;
63
        }
64
 
65
        /* Initialise the global shm virtual address pool */
66
        if ((err =
67
             address_pool_init(&shm_vaddr_pool,
68
                               __pfn_to_addr(cont_mem_regions.shmem->start),
69
                               __pfn_to_addr(cont_mem_regions.shmem->end)))
70
             < 0) {
71
                printf("SHM Address pool initialisation failed.\n");
72
                return err;
73
        }
74
        return 0;
75
}
76
 
77
/*
78
 * Attaches to given shm segment mapped at shm_addr if the shm descriptor
79
 * does not already have a base address assigned. If neither shm_addr nor
80
 * the descriptor has an address, allocates one from the shm address pool.
81
 */
82
static void *do_shmat(struct vm_file *shm_file, void *shm_addr, int shmflg,
83
                      struct tcb *task)
84
{
85
        struct shm_descriptor *shm = shm_file_to_desc(shm_file);
86
        unsigned int vmflags;
87
        void *mapped;
88
 
89
        if (!task) {
90
                printf("%s:%s: Cannot find caller task with tid %d\n",
91
                       __TASKNAME__, __FUNCTION__, task->tid);
92
                BUG();
93
        }
94
 
95
        if ((unsigned long)shm_addr & PAGE_MASK) {
96
                if (shmflg & SHM_RND)
97
                        shm_addr = (void *)page_align(shm_addr);
98
                else
99
                        return PTR_ERR(-EINVAL);
100
        }
101
 
102
        /* Set mmap flags for segment */
103
        vmflags = VM_READ | VMA_SHARED | VMA_ANONYMOUS;
104
        vmflags |= (shmflg & SHM_RDONLY) ? 0 : VM_WRITE;
105
 
106
        /*
107
         * Currently all tasks use the same address for each unique segment.
108
         * If address is already assigned, the supplied address must match
109
         * the original address. We don't look for object map count because
110
         * utcb addresses are assigned before being mapped. NOTE: We may do
111
         * all this in a specific shm_mmap() call in do_mmap() in the future.
112
         */
113
        if (shm_file_to_desc(shm_file)->shm_addr) {
114
                if (shm_addr && (shm->shm_addr != shm_addr))
115
                        return PTR_ERR(-EINVAL);
116
        }
117
 
118
        /*
119
         * mmap the area to the process as shared. Page fault
120
         * handler would handle allocating and paging-in the
121
         * shared pages.
122
         */
123
        if (IS_ERR(mapped = do_mmap(shm_file, 0, task,
124
                                    (unsigned long)shm_addr,
125
                                    vmflags, shm->npages))) {
126
                printf("do_mmap: Mapping shm area failed with %d.\n",
127
                       (int)mapped);
128
                return PTR_ERR(mapped);
129
        }
130
 
131
        /* Assign new shm address if not assigned */
132
        if (!shm->shm_addr)
133
                shm->shm_addr = mapped;
134
        else
135
                BUG_ON(shm->shm_addr != mapped);
136
 
137
        return shm->shm_addr;
138
}
139
 
140
void *sys_shmat(struct tcb *task, l4id_t shmid, void *shmaddr, int shmflg)
141
{
142
        struct vm_file *shm_file, *n;
143
 
144
        list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list) {
145
                if (shm_file->type == VM_FILE_SHM &&
146
                    shm_file_to_desc(shm_file)->shmid == shmid)
147
                        return do_shmat(shm_file, shmaddr,
148
                                        shmflg, task);
149
        }
150
 
151
        return PTR_ERR(-EINVAL);
152
}
153
 
154
int do_shmdt(struct tcb *task, struct vm_file *shm)
155
{
156
        int err;
157
 
158
        if ((err = do_munmap(task,
159
                             (unsigned long)shm_file_to_desc(shm)->shm_addr,
160
                             shm_file_to_desc(shm)->npages)) < 0)
161
                return err;
162
 
163
        return 0;
164
}
165
 
166
int sys_shmdt(struct tcb *task, const void *shmaddr)
167
{
168
        struct vm_file *shm_file, *n;
169
 
170
        list_foreach_removable_struct(shm_file, n, &global_vm_files.list, list)
171
                if (shm_file->type == VM_FILE_SHM &&
172
                    shm_file_to_desc(shm_file)->shm_addr == shmaddr)
173
                        return do_shmdt(task, shm_file);
174
 
175
        return -EINVAL;
176
}
177
 
178
/*
179
 * This finds out what address pool the shm area came from and
180
 * returns the address back to that pool. There are 2 pools,
181
 * one for utcbs and one for regular shm segments.
182
 */
183
void shm_destroy_priv_data(struct vm_file *shm_file)
184
{
185
        struct shm_descriptor *shm_desc = shm_file_to_desc(shm_file);
186
 
187
        /* Release the shared memory address */
188
        BUG_ON(shm_delete_address(shm_desc->shm_addr,
189
                                  shm_file->vm_obj.npages) < 0);
190
 
191
        /* Release the shared memory id */
192
        BUG_ON(id_del(shm_ids, shm_desc->shmid) < 0);
193
 
194
        /* Now delete the private data itself */
195
        kfree(shm_file_to_desc(shm_file));
196
}
197
 
198
/* Creates an shm area and glues its details with shm pager and devzero */
199
struct vm_file *shm_new(key_t key, unsigned long npages)
200
{
201
        struct shm_descriptor *shm_desc;
202
        struct vm_file *shm_file;
203
 
204
        BUG_ON(!npages);
205
 
206
        /* Allocate file and shm structures */
207
        if (IS_ERR(shm_file = vm_file_create()))
208
                return PTR_ERR(shm_file);
209
 
210
        if (!(shm_desc = kzalloc(sizeof(struct shm_descriptor)))) {
211
                kfree(shm_file);
212
                return PTR_ERR(-ENOMEM);
213
        }
214
 
215
        /* Initialise the shm descriptor */
216
        if (IS_ERR(shm_desc->shmid = id_new(shm_ids))) {
217
                kfree(shm_file);
218
                kfree(shm_desc);
219
                return PTR_ERR(shm_desc->shmid);
220
        }
221
        shm_desc->key = (int)key;
222
        shm_desc->npages = npages;
223
 
224
        /* Initialise the file */
225
        shm_file->length = __pfn_to_addr(npages);
226
        shm_file->type = VM_FILE_SHM;
227
        shm_file->private_file_data = shm_desc;
228
        shm_file->destroy_priv_data = shm_destroy_priv_data;
229
 
230
        /* Initialise the vm object */
231
        shm_file->vm_obj.pager = &swap_pager;
232
        shm_file->vm_obj.flags = VM_OBJ_FILE | VM_WRITE;
233
 
234
        /* Add to shm file and global object list */
235
        global_add_vm_file(shm_file);
236
 
237
        return shm_file;
238
}
239
 
240
/*
241
 * FIXME: Make sure hostile tasks don't subvert other tasks' shared pages
242
 * by early-registring their shared page address here.
243
 */
244
int sys_shmget(key_t key, int size, int shmflg)
245
{
246
        unsigned long npages = __pfn(page_align_up(size));
247
        struct shm_descriptor *shm_desc;
248
        struct vm_file *shm;
249
 
250
        /* First check argument validity */
251
        if (npages > SHM_SHMMAX || npages < SHM_SHMMIN)
252
                return -EINVAL;
253
 
254
        /*
255
         * IPC_PRIVATE means create a no-key shm area, i.e. private to this
256
         * process so that it would only share it with its forked children.
257
         */
258
        if (key == IPC_PRIVATE) {
259
                key = -1;               /* Our meaning of no key */
260
                if (!(shm = shm_new(key, npages)))
261
                        return -ENOSPC;
262
                else
263
                        return shm_file_to_desc(shm)->shmid;
264
        }
265
 
266
        list_foreach_struct(shm, &global_vm_files.list, list) {
267
                if (shm->type != VM_FILE_SHM)
268
                        continue;
269
 
270
                shm_desc = shm_file_to_desc(shm);
271
 
272
                if (shm_desc->key == key) {
273
                        /*
274
                         * Exclusive means a create request
275
                         * on an existing key should fail.
276
                         */
277
                        if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
278
                                return -EEXIST;
279
                        else
280
                                /* Found it but do we have a size problem? */
281
                                if (shm_desc->npages < npages)
282
                                        return -EINVAL;
283
                                else /* Return shmid of the existing key */
284
                                        return shm_desc->shmid;
285
                }
286
        }
287
 
288
        /* Key doesn't exist and create is set, so we create */
289
        if (shmflg & IPC_CREAT)
290
                if (!(shm = shm_new(key, npages)))
291
                        return -ENOSPC;
292
                else
293
                        return shm_file_to_desc(shm)->shmid;
294
        else    /* Key doesn't exist, yet create isn't set, its an -ENOENT */
295
                return -ENOENT;
296
}
297
 
298
 
299
 
300
#if 0
301
 
302
/*
303
 * Fast internal path to do shmget/shmat() together for mm0's
304
 * convenience. Works for existing areas.
305
 */
306
void *shmat_shmget_internal(struct tcb *task, key_t key, void *shmaddr)
307
{
308
        struct vm_file *shm_file;
309
        struct shm_descriptor *shm_desc;
310
 
311
        list_foreach_struct(shm_file, &global_vm_files.list, list) {
312
                if(shm_file->type == VM_FILE_SHM) {
313
                        shm_desc = shm_file_to_desc(shm_file);
314
                        /* Found the key, shmat that area */
315
                        if (shm_desc->key == key)
316
                                return do_shmat(shm_file, shmaddr,
317
                                                0, task);
318
                }
319
        }
320
 
321
        return PTR_ERR(-EEXIST);
322
}
323
 
324
/*
325
 * Currently, a default shm page is allocated to every thread in the system
326
 * for efficient ipc communication. This part below provides the allocation
327
 * and mapping of this page using shmat/get/dt call semantics.
328
 */
329
 
330
/*
331
 * Sends shpage address information to requester. The requester then uses
332
 * this address as a shm key and maps it via shmget/shmat.
333
 */
334
void *task_send_shpage_address(struct tcb *sender, l4id_t taskid)
335
{
336
        struct tcb *task = find_task(taskid);
337
 
338
        /* Is the task asking for its own utcb address */
339
        if (sender->tid == taskid) {
340
                /* It hasn't got one allocated. */
341
                BUG_ON(!task->shared_page);
342
 
343
                /* Return it to requester */
344
                return task->shared_page;
345
 
346
        /* A task is asking for someone else's utcb */
347
        } else {
348
                /* Only vfs is allowed to do so yet, because its a server */
349
                if (sender->tid == VFS_TID) {
350
                        /*
351
                         * Return shpage address to requester. Note if there's
352
                         * none allocated so far, requester gets 0. We don't
353
                         * allocate one here.
354
                         */
355
                        return task->shared_page;
356
                }
357
        }
358
        return 0;
359
}
360
 
361
int shpage_map_to_task(struct tcb *owner, struct tcb *mapper, unsigned int flags)
362
{
363
        struct vm_file *default_shm;
364
 
365
        /* Allocate a new shared page address */
366
        if (flags & SHPAGE_NEW_ADDRESS)
367
                owner->shared_page =
368
                        shm_new_address(DEFAULT_SHPAGE_SIZE/PAGE_SIZE);
369
        else if (!owner->shared_page)
370
                BUG();
371
 
372
        /* Create a new shared memory segment */
373
        if (flags & SHPAGE_NEW_SHM)
374
                if (IS_ERR(default_shm = shm_new((key_t)owner->shared_page,
375
                                              __pfn(DEFAULT_SHPAGE_SIZE))))
376
                return (int)default_shm;
377
 
378
        /* Map the shared page to mapper */
379
        if (IS_ERR(shmat_shmget_internal(mapper, (key_t)owner->shared_page,
380
                                         owner->shared_page)))
381
                BUG();
382
 
383
        /* Prefault the owner's shared page to mapper's address space */
384
        if (flags & SHPAGE_PREFAULT)
385
                for (int i = 0; i < __pfn(DEFAULT_SHPAGE_SIZE); i++)
386
                        task_prefault_page(mapper, (unsigned long)owner->shared_page +
387
                                      __pfn_to_addr(i), VM_READ | VM_WRITE);
388
        return 0;
389
}
390
 
391
int shpage_unmap_from_task(struct tcb *owner, struct tcb *mapper)
392
{
393
        return sys_shmdt(mapper, owner->shared_page);
394
}
395
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.