OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [ipc/] [shm.c] - Blame information for rev 1777

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1634 jcastillo
/*
2
 * linux/ipc/shm.c
3
 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4
 *         Many improvements/fixes by Bruno Haible.
5
 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6
 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7
 */
8
 
9
/*
10
 * uClinux revisions for NO_MM
11
 * Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
12
 *                     The Silver Hammer Group, Ltd.
13
 */
14
 
15
#include <linux/errno.h>
16
#include <linux/sched.h>
17
#include <linux/mm.h>
18
#include <linux/ipc.h>
19
#include <linux/shm.h>
20
#include <linux/stat.h>
21
#include <linux/malloc.h>
22
#include <linux/swap.h>
23
#include <linux/swapctl.h>
24
 
25
#include <asm/segment.h>
26
#include <asm/pgtable.h>
27
 
28
#ifndef NO_MM
29
 
30
extern int ipcperms (struct ipc_perm *ipcp, short shmflg);
31
extern unsigned long get_swap_page (void);
32
static int findkey (key_t key);
33
static int newseg (key_t key, int shmflg, int size);
34
static int shm_map (struct vm_area_struct *shmd);
35
static void killseg (int id);
36
static void shm_open (struct vm_area_struct *shmd);
37
static void shm_close (struct vm_area_struct *shmd);
38
static pte_t shm_swap_in(struct vm_area_struct *, unsigned long, unsigned long);
39
 
40
static int shm_tot = 0; /* total number of shared memory pages */
41
static int shm_rss = 0; /* number of shared memory pages that are in memory */
42
static int shm_swp = 0; /* number of shared memory pages that are in swap */
43
static int max_shmid = 0; /* every used id is <= max_shmid */
44
static struct wait_queue *shm_lock = NULL; /* calling findkey() may need to wait */
45
static struct shmid_ds *shm_segs[SHMMNI];
46
 
47
static unsigned short shm_seq = 0; /* incremented, for recognizing stale ids */
48
 
49
/* some statistics */
50
static ulong swap_attempts = 0;
51
static ulong swap_successes = 0;
52
static ulong used_segs = 0;
53
 
54
void shm_init (void)
55
{
56
        int id;
57
 
58
        for (id = 0; id < SHMMNI; id++)
59
                shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
60
        shm_tot = shm_rss = shm_seq = max_shmid = used_segs = 0;
61
        shm_lock = NULL;
62
        return;
63
}
64
 
65
static int findkey (key_t key)
66
{
67
        int id;
68
        struct shmid_ds *shp;
69
 
70
        for (id = 0; id <= max_shmid; id++) {
71
                while ((shp = shm_segs[id]) == IPC_NOID)
72
                        sleep_on (&shm_lock);
73
                if (shp == IPC_UNUSED)
74
                        continue;
75
                if (key == shp->shm_perm.key)
76
                        return id;
77
        }
78
        return -1;
79
}
80
 
81
/*
82
 * allocate new shmid_ds and pgtable. protected by shm_segs[id] = NOID.
83
 */
84
static int newseg (key_t key, int shmflg, int size)
85
{
86
        struct shmid_ds *shp;
87
        int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
88
        int id, i;
89
 
90
        if (size < SHMMIN)
91
                return -EINVAL;
92
        if (shm_tot + numpages >= SHMALL)
93
                return -ENOSPC;
94
        for (id = 0; id < SHMMNI; id++)
95
                if (shm_segs[id] == IPC_UNUSED) {
96
                        shm_segs[id] = (struct shmid_ds *) IPC_NOID;
97
                        goto found;
98
                }
99
        return -ENOSPC;
100
 
101
found:
102
        shp = (struct shmid_ds *) kmalloc (sizeof (*shp), GFP_KERNEL);
103
        if (!shp) {
104
                shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
105
                wake_up (&shm_lock);
106
                return -ENOMEM;
107
        }
108
 
109
        shp->shm_pages = (ulong *) kmalloc (numpages*sizeof(ulong),GFP_KERNEL);
110
        if (!shp->shm_pages) {
111
                shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
112
                wake_up (&shm_lock);
113
                kfree(shp);
114
                return -ENOMEM;
115
        }
116
 
117
        for (i = 0; i < numpages; shp->shm_pages[i++] = 0);
118
        shm_tot += numpages;
119
        shp->shm_perm.key = key;
120
        shp->shm_perm.mode = (shmflg & S_IRWXUGO);
121
        shp->shm_perm.cuid = shp->shm_perm.uid = current->euid;
122
        shp->shm_perm.cgid = shp->shm_perm.gid = current->egid;
123
        shp->shm_perm.seq = shm_seq;
124
        shp->shm_segsz = size;
125
        shp->shm_cpid = current->pid;
126
        shp->attaches = NULL;
127
        shp->shm_lpid = shp->shm_nattch = 0;
128
        shp->shm_atime = shp->shm_dtime = 0;
129
        shp->shm_ctime = CURRENT_TIME;
130
        shp->shm_npages = numpages;
131
 
132
        if (id > max_shmid)
133
                max_shmid = id;
134
        shm_segs[id] = shp;
135
        used_segs++;
136
        wake_up (&shm_lock);
137
        return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
138
}
139
 
140
asmlinkage int sys_shmget (key_t key, int size, int shmflg)
141
{
142
        struct shmid_ds *shp;
143
        int id = 0;
144
 
145
        if (size < 0 || size > SHMMAX)
146
                return -EINVAL;
147
        if (key == IPC_PRIVATE)
148
                return newseg(key, shmflg, size);
149
        if ((id = findkey (key)) == -1) {
150
                if (!(shmflg & IPC_CREAT))
151
                        return -ENOENT;
152
                return newseg(key, shmflg, size);
153
        }
154
        if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL))
155
                return -EEXIST;
156
        shp = shm_segs[id];
157
        if (shp->shm_perm.mode & SHM_DEST)
158
                return -EIDRM;
159
        if (size > shp->shm_segsz)
160
                return -EINVAL;
161
        if (ipcperms (&shp->shm_perm, shmflg))
162
                return -EACCES;
163
        return (unsigned int) shp->shm_perm.seq * SHMMNI + id;
164
}
165
 
166
/*
167
 * Only called after testing nattch and SHM_DEST.
168
 * Here pages, pgtable and shmid_ds are freed.
169
 */
170
static void killseg (int id)
171
{
172
        struct shmid_ds *shp;
173
        int i, numpages;
174
 
175
        shp = shm_segs[id];
176
        if (shp == IPC_NOID || shp == IPC_UNUSED) {
177
                printk ("shm nono: killseg called on unused seg id=%d\n", id);
178
                return;
179
        }
180
        shp->shm_perm.seq++;     /* for shmat */
181
        shm_seq = (shm_seq+1) % ((unsigned)(1<<31)/SHMMNI); /* increment, but avoid overflow */
182
        shm_segs[id] = (struct shmid_ds *) IPC_UNUSED;
183
        used_segs--;
184
        if (id == max_shmid)
185
                while (max_shmid && (shm_segs[--max_shmid] == IPC_UNUSED));
186
        if (!shp->shm_pages) {
187
                printk ("shm nono: killseg shp->pages=NULL. id=%d\n", id);
188
                return;
189
        }
190
        numpages = shp->shm_npages;
191
        for (i = 0; i < numpages ; i++) {
192
                pte_t pte;
193
                pte_val(pte) = shp->shm_pages[i];
194
                if (pte_none(pte))
195
                        continue;
196
                if (pte_present(pte)) {
197
                        free_page (pte_page(pte));
198
                        shm_rss--;
199
                } else {
200
                        swap_free(pte_val(pte));
201
                        shm_swp--;
202
                }
203
        }
204
        kfree(shp->shm_pages);
205
        shm_tot -= numpages;
206
        kfree(shp);
207
        return;
208
}
209
 
210
asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
211
{
212
        struct shmid_ds tbuf;
213
        struct shmid_ds *shp;
214
        struct ipc_perm *ipcp;
215
        int id, err;
216
 
217
        if (cmd < 0 || shmid < 0)
218
                return -EINVAL;
219
        if (cmd == IPC_SET) {
220
                if (!buf)
221
                        return -EFAULT;
222
                err = verify_area (VERIFY_READ, buf, sizeof (*buf));
223
                if (err)
224
                        return err;
225
                memcpy_fromfs (&tbuf, buf, sizeof (*buf));
226
        }
227
 
228
        switch (cmd) { /* replace with proc interface ? */
229
        case IPC_INFO:
230
        {
231
                struct shminfo shminfo;
232
                if (!buf)
233
                        return -EFAULT;
234
                shminfo.shmmni = SHMMNI;
235
                shminfo.shmmax = SHMMAX;
236
                shminfo.shmmin = SHMMIN;
237
                shminfo.shmall = SHMALL;
238
                shminfo.shmseg = SHMSEG;
239
                err = verify_area (VERIFY_WRITE, buf, sizeof (struct shminfo));
240
                if (err)
241
                        return err;
242
                memcpy_tofs (buf, &shminfo, sizeof(struct shminfo));
243
                return max_shmid;
244
        }
245
        case SHM_INFO:
246
        {
247
                struct shm_info shm_info;
248
                if (!buf)
249
                        return -EFAULT;
250
                err = verify_area (VERIFY_WRITE, buf, sizeof (shm_info));
251
                if (err)
252
                        return err;
253
                shm_info.used_ids = used_segs;
254
                shm_info.shm_rss = shm_rss;
255
                shm_info.shm_tot = shm_tot;
256
                shm_info.shm_swp = shm_swp;
257
                shm_info.swap_attempts = swap_attempts;
258
                shm_info.swap_successes = swap_successes;
259
                memcpy_tofs (buf, &shm_info, sizeof(shm_info));
260
                return max_shmid;
261
        }
262
        case SHM_STAT:
263
                if (!buf)
264
                        return -EFAULT;
265
                err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
266
                if (err)
267
                        return err;
268
                if (shmid > max_shmid)
269
                        return -EINVAL;
270
                shp = shm_segs[shmid];
271
                if (shp == IPC_UNUSED || shp == IPC_NOID)
272
                        return -EINVAL;
273
                if (ipcperms (&shp->shm_perm, S_IRUGO))
274
                        return -EACCES;
275
                id = (unsigned int) shp->shm_perm.seq * SHMMNI + shmid;
276
                tbuf.shm_perm   = shp->shm_perm;
277
                tbuf.shm_segsz  = shp->shm_segsz;
278
                tbuf.shm_atime  = shp->shm_atime;
279
                tbuf.shm_dtime  = shp->shm_dtime;
280
                tbuf.shm_ctime  = shp->shm_ctime;
281
                tbuf.shm_cpid   = shp->shm_cpid;
282
                tbuf.shm_lpid   = shp->shm_lpid;
283
                tbuf.shm_nattch = shp->shm_nattch;
284
                memcpy_tofs (buf, &tbuf, sizeof(*buf));
285
                return id;
286
        }
287
 
288
        shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
289
        if (shp == IPC_UNUSED || shp == IPC_NOID)
290
                return -EINVAL;
291
        if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
292
                return -EIDRM;
293
        ipcp = &shp->shm_perm;
294
 
295
        switch (cmd) {
296
        case SHM_UNLOCK:
297
                if (!suser())
298
                        return -EPERM;
299
                if (!(ipcp->mode & SHM_LOCKED))
300
                        return -EINVAL;
301
                ipcp->mode &= ~SHM_LOCKED;
302
                break;
303
        case SHM_LOCK:
304
/* Allow superuser to lock segment in memory */
305
/* Should the pages be faulted in here or leave it to user? */
306
/* need to determine interaction with current->swappable */
307
                if (!suser())
308
                        return -EPERM;
309
                if (ipcp->mode & SHM_LOCKED)
310
                        return -EINVAL;
311
                ipcp->mode |= SHM_LOCKED;
312
                break;
313
        case IPC_STAT:
314
                if (ipcperms (ipcp, S_IRUGO))
315
                        return -EACCES;
316
                if (!buf)
317
                        return -EFAULT;
318
                err = verify_area (VERIFY_WRITE, buf, sizeof (*buf));
319
                if (err)
320
                        return err;
321
                tbuf.shm_perm   = shp->shm_perm;
322
                tbuf.shm_segsz  = shp->shm_segsz;
323
                tbuf.shm_atime  = shp->shm_atime;
324
                tbuf.shm_dtime  = shp->shm_dtime;
325
                tbuf.shm_ctime  = shp->shm_ctime;
326
                tbuf.shm_cpid   = shp->shm_cpid;
327
                tbuf.shm_lpid   = shp->shm_lpid;
328
                tbuf.shm_nattch = shp->shm_nattch;
329
                memcpy_tofs (buf, &tbuf, sizeof(*buf));
330
                break;
331
        case IPC_SET:
332
                if (suser() || current->euid == shp->shm_perm.uid ||
333
                    current->euid == shp->shm_perm.cuid) {
334
                        ipcp->uid = tbuf.shm_perm.uid;
335
                        ipcp->gid = tbuf.shm_perm.gid;
336
                        ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
337
                                | (tbuf.shm_perm.mode & S_IRWXUGO);
338
                        shp->shm_ctime = CURRENT_TIME;
339
                        break;
340
                }
341
                return -EPERM;
342
        case IPC_RMID:
343
                if (suser() || current->euid == shp->shm_perm.uid ||
344
                    current->euid == shp->shm_perm.cuid) {
345
                        shp->shm_perm.mode |= SHM_DEST;
346
                        if (shp->shm_nattch <= 0)
347
                                killseg (id);
348
                        break;
349
                }
350
                return -EPERM;
351
        default:
352
                return -EINVAL;
353
        }
354
        return 0;
355
}
356
 
357
/*
358
 * The per process internal structure for managing segments is
359
 * `struct vm_area_struct'.
360
 * A shmat will add to and shmdt will remove from the list.
361
 * shmd->vm_mm          the attacher
362
 * shmd->vm_start       virt addr of attach, multiple of SHMLBA
363
 * shmd->vm_end         multiple of SHMLBA
364
 * shmd->vm_next        next attach for task
365
 * shmd->vm_next_share  next attach for segment
366
 * shmd->vm_offset      offset into segment
367
 * shmd->vm_pte         signature for this attach
368
 */
369
 
370
static struct vm_operations_struct shm_vm_ops = {
371
        shm_open,               /* open - callback for a new vm-area open */
372
        shm_close,              /* close - callback for when the vm-area is released */
373
        NULL,                   /* no need to sync pages at unmap */
374
        NULL,                   /* protect */
375
        NULL,                   /* sync */
376
        NULL,                   /* advise */
377
        NULL,                   /* nopage (done with swapin) */
378
        NULL,                   /* wppage */
379
        NULL,                   /* swapout (hardcoded right now) */
380
        shm_swap_in             /* swapin */
381
};
382
 
383
/* Insert shmd into the circular list shp->attaches */
384
static inline void insert_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
385
{
386
        struct vm_area_struct * attaches;
387
 
388
        if ((attaches = shp->attaches)) {
389
                shmd->vm_next_share = attaches;
390
                shmd->vm_prev_share = attaches->vm_prev_share;
391
                shmd->vm_prev_share->vm_next_share = shmd;
392
                attaches->vm_prev_share = shmd;
393
        } else
394
                shp->attaches = shmd->vm_next_share = shmd->vm_prev_share = shmd;
395
}
396
 
397
/* Remove shmd from circular list shp->attaches */
398
static inline void remove_attach (struct shmid_ds * shp, struct vm_area_struct * shmd)
399
{
400
        if (shmd->vm_next_share == shmd) {
401
                if (shp->attaches != shmd) {
402
                        printk("shm_close: shm segment (id=%ld) attach list inconsistent\n",
403
                               SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
404
                        printk("shm_close: %08lx-%08lx %c%c%c%c %08lx %08lx\n",
405
                                shmd->vm_start, shmd->vm_end,
406
                                shmd->vm_flags & VM_READ ? 'r' : '-',
407
                                shmd->vm_flags & VM_WRITE ? 'w' : '-',
408
                                shmd->vm_flags & VM_EXEC ? 'x' : '-',
409
                                shmd->vm_flags & VM_MAYSHARE ? 's' : 'p',
410
                                shmd->vm_offset, shmd->vm_pte);
411
                }
412
                shp->attaches = NULL;
413
        } else {
414
                if (shp->attaches == shmd)
415
                        shp->attaches = shmd->vm_next_share;
416
                shmd->vm_prev_share->vm_next_share = shmd->vm_next_share;
417
                shmd->vm_next_share->vm_prev_share = shmd->vm_prev_share;
418
        }
419
}
420
 
421
/*
422
 * ensure page tables exist
423
 * mark page table entries with shm_sgn.
424
 */
425
static int shm_map (struct vm_area_struct *shmd)
426
{
427
        pgd_t *page_dir;
428
        pmd_t *page_middle;
429
        pte_t *page_table;
430
        unsigned long tmp, shm_sgn;
431
        int error;
432
 
433
        /* clear old mappings */
434
        do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
435
 
436
        /* add new mapping */
437
        tmp = shmd->vm_end - shmd->vm_start;
438
        if((current->mm->total_vm << PAGE_SHIFT) + tmp
439
           > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
440
                return -ENOMEM;
441
        current->mm->total_vm += tmp >> PAGE_SHIFT;
442
        insert_vm_struct(current->mm, shmd);
443
        merge_segments(current->mm, shmd->vm_start, shmd->vm_end);
444
 
445
        /* map page range */
446
        error = 0;
447
        shm_sgn = shmd->vm_pte +
448
          SWP_ENTRY(0, (shmd->vm_offset >> PAGE_SHIFT) << SHM_IDX_SHIFT);
449
        flush_cache_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
450
        for (tmp = shmd->vm_start;
451
             tmp < shmd->vm_end;
452
             tmp += PAGE_SIZE, shm_sgn += SWP_ENTRY(0, 1 << SHM_IDX_SHIFT))
453
        {
454
                page_dir = pgd_offset(shmd->vm_mm,tmp);
455
                page_middle = pmd_alloc(page_dir,tmp);
456
                if (!page_middle) {
457
                        error = -ENOMEM;
458
                        break;
459
                }
460
                page_table = pte_alloc(page_middle,tmp);
461
                if (!page_table) {
462
                        error = -ENOMEM;
463
                        break;
464
                }
465
                set_pte(page_table, __pte(shm_sgn));
466
        }
467
        flush_tlb_range(shmd->vm_mm, shmd->vm_start, shmd->vm_end);
468
        return error;
469
}
470
 
471
/*
472
 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
473
 */
474
asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
475
{
476
        struct shmid_ds *shp;
477
        struct vm_area_struct *shmd;
478
        int err;
479
        unsigned int id;
480
        unsigned long addr;
481
        unsigned long len;
482
 
483
        if (shmid < 0) {
484
                /* printk("shmat() -> EINVAL because shmid = %d < 0\n",shmid); */
485
                return -EINVAL;
486
        }
487
 
488
        shp = shm_segs[id = (unsigned int) shmid % SHMMNI];
489
        if (shp == IPC_UNUSED || shp == IPC_NOID) {
490
                /* printk("shmat() -> EINVAL because shmid = %d is invalid\n",shmid); */
491
                return -EINVAL;
492
        }
493
 
494
        if (!(addr = (ulong) shmaddr)) {
495
                if (shmflg & SHM_REMAP)
496
                        return -EINVAL;
497
                if (!(addr = get_unmapped_area(0, shp->shm_segsz)))
498
                        return -ENOMEM;
499
        } else if (addr & (SHMLBA-1)) {
500
                if (shmflg & SHM_RND)
501
                        addr &= ~(SHMLBA-1);       /* round down */
502
                else
503
                        return -EINVAL;
504
        }
505
        /*
506
         * Check if addr exceeds MAX_USER_ADDR (from do_mmap)
507
         */
508
        len = PAGE_SIZE*shp->shm_npages;
509
       if (addr >= MAX_USER_ADDR || len > MAX_USER_ADDR  || addr > MAX_USER_ADDR - len)
510
                return -EINVAL;
511
        /*
512
         * If shm segment goes below stack, make sure there is some
513
         * space left for the stack to grow (presently 4 pages).
514
         */
515
        if (addr < current->mm->start_stack &&
516
            addr > current->mm->start_stack - PAGE_SIZE*(shp->shm_npages + 4))
517
        {
518
                /* printk("shmat() -> EINVAL because segment intersects stack\n"); */
519
                return -EINVAL;
520
        }
521
        if (!(shmflg & SHM_REMAP))
522
                if ((shmd = find_vma_intersection(current->mm, addr, addr + shp->shm_segsz))) {
523
                        /* printk("shmat() -> EINVAL because the interval [0x%lx,0x%lx) intersects an already mapped interval [0x%lx,0x%lx).\n",
524
                                addr, addr + shp->shm_segsz, shmd->vm_start, shmd->vm_end); */
525
                        return -EINVAL;
526
                }
527
 
528
        if (ipcperms(&shp->shm_perm, shmflg & SHM_RDONLY ? S_IRUGO : S_IRUGO|S_IWUGO))
529
                return -EACCES;
530
        if (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)
531
                return -EIDRM;
532
 
533
        shmd = (struct vm_area_struct *) kmalloc (sizeof(*shmd), GFP_KERNEL);
534
        if (!shmd)
535
                return -ENOMEM;
536
        if ((shp != shm_segs[id]) || (shp->shm_perm.seq != (unsigned int) shmid / SHMMNI)) {
537
                kfree(shmd);
538
                return -EIDRM;
539
        }
540
 
541
        shmd->vm_pte = SWP_ENTRY(SHM_SWP_TYPE, id);
542
        shmd->vm_start = addr;
543
        shmd->vm_end = addr + shp->shm_npages * PAGE_SIZE;
544
        shmd->vm_mm = current->mm;
545
        shmd->vm_page_prot = (shmflg & SHM_RDONLY) ? PAGE_READONLY : PAGE_SHARED;
546
        shmd->vm_flags = VM_SHM | VM_MAYSHARE | VM_SHARED
547
                         | VM_MAYREAD | VM_MAYEXEC | VM_READ | VM_EXEC
548
                         | ((shmflg & SHM_RDONLY) ? 0 : VM_MAYWRITE | VM_WRITE);
549
        shmd->vm_next_share = shmd->vm_prev_share = NULL;
550
        shmd->vm_inode = NULL;
551
        shmd->vm_offset = 0;
552
        shmd->vm_ops = &shm_vm_ops;
553
 
554
        shp->shm_nattch++;            /* prevent destruction */
555
        if ((err = shm_map (shmd))) {
556
                if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
557
                        killseg(id);
558
                kfree(shmd);
559
                return err;
560
        }
561
 
562
        insert_attach(shp,shmd);  /* insert shmd into shp->attaches */
563
 
564
        shp->shm_lpid = current->pid;
565
        shp->shm_atime = CURRENT_TIME;
566
 
567
        *raddr = addr;
568
        return 0;
569
}
570
 
571
/* This is called by fork, once for every shm attach. */
572
static void shm_open (struct vm_area_struct *shmd)
573
{
574
        unsigned int id;
575
        struct shmid_ds *shp;
576
 
577
        id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
578
        shp = shm_segs[id];
579
        if (shp == IPC_UNUSED) {
580
                printk("shm_open: unused id=%d PANIC\n", id);
581
                return;
582
        }
583
        insert_attach(shp,shmd);  /* insert shmd into shp->attaches */
584
        shp->shm_nattch++;
585
        shp->shm_atime = CURRENT_TIME;
586
        shp->shm_lpid = current->pid;
587
}
588
 
589
/*
590
 * remove the attach descriptor shmd.
591
 * free memory for segment if it is marked destroyed.
592
 * The descriptor has already been removed from the current->mm->mmap list
593
 * and will later be kfree()d.
594
 */
595
static void shm_close (struct vm_area_struct *shmd)
596
{
597
        struct shmid_ds *shp;
598
        int id;
599
 
600
        /* remove from the list of attaches of the shm segment */
601
        id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK;
602
        shp = shm_segs[id];
603
        remove_attach(shp,shmd);  /* remove from shp->attaches */
604
        shp->shm_lpid = current->pid;
605
        shp->shm_dtime = CURRENT_TIME;
606
        if (--shp->shm_nattch <= 0 && shp->shm_perm.mode & SHM_DEST)
607
                killseg (id);
608
}
609
 
610
/*
611
 * detach and kill segment if marked destroyed.
612
 * The work is done in shm_close.
613
 */
614
asmlinkage int sys_shmdt (char *shmaddr)
615
{
616
        struct vm_area_struct *shmd, *shmdnext;
617
 
618
        for (shmd = current->mm->mmap; shmd; shmd = shmdnext) {
619
                shmdnext = shmd->vm_next;
620
                if (shmd->vm_ops == &shm_vm_ops
621
                    && shmd->vm_start - shmd->vm_offset == (ulong) shmaddr)
622
                        do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);
623
        }
624
        return 0;
625
}
626
 
627
/*
628
 * page not present ... go through shm_pages
629
 */
630
static pte_t shm_swap_in(struct vm_area_struct * shmd, unsigned long offset, unsigned long code)
631
{
632
        pte_t pte;
633
        struct shmid_ds *shp;
634
        unsigned int id, idx;
635
 
636
        id = SWP_OFFSET(code) & SHM_ID_MASK;
637
        if (id != (SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK)) {
638
                printk ("shm_swap_in: code id = %d and shmd id = %ld differ\n",
639
                        id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
640
                return BAD_PAGE;
641
        }
642
        if (id > max_shmid) {
643
                printk ("shm_swap_in: id=%d too big. proc mem corrupted\n", id);
644
                return BAD_PAGE;
645
        }
646
        shp = shm_segs[id];
647
        if (shp == IPC_UNUSED || shp == IPC_NOID) {
648
                printk ("shm_swap_in: id=%d invalid. Race.\n", id);
649
                return BAD_PAGE;
650
        }
651
        idx = (SWP_OFFSET(code) >> SHM_IDX_SHIFT) & SHM_IDX_MASK;
652
        if (idx != (offset >> PAGE_SHIFT)) {
653
                printk ("shm_swap_in: code idx = %u and shmd idx = %lu differ\n",
654
                        idx, offset >> PAGE_SHIFT);
655
                return BAD_PAGE;
656
        }
657
        if (idx >= shp->shm_npages) {
658
                printk ("shm_swap_in : too large page index. id=%d\n", id);
659
                return BAD_PAGE;
660
        }
661
 
662
        pte_val(pte) = shp->shm_pages[idx];
663
        if (!pte_present(pte)) {
664
                unsigned long page = get_free_page(GFP_KERNEL);
665
                if (!page) {
666
                        oom(current);
667
                        return BAD_PAGE;
668
                }
669
        repeat:
670
                pte_val(pte) = shp->shm_pages[idx];
671
                if (pte_present(pte)) {
672
                        free_page (page); /* doesn't sleep */
673
                        goto done;
674
                }
675
                if (!pte_none(pte)) {
676
                        read_swap_page(pte_val(pte), (char *) page);
677
                        if (pte_val(pte) != shp->shm_pages[idx])
678
                                goto repeat;
679
                        swap_free(pte_val(pte));
680
                        shm_swp--;
681
                }
682
                shm_rss++;
683
 
684
                /* Give the physical reallocated page a bigger start */
685
                if (shm_rss < (MAP_NR(high_memory) >> 3))
686
                        mem_map[MAP_NR(page)].age = (PAGE_INITIAL_AGE + PAGE_ADVANCE);
687
 
688
                pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
689
                shp->shm_pages[idx] = pte_val(pte);
690
        } else
691
                --current->maj_flt;  /* was incremented in do_no_page */
692
 
693
done:   /* pte_val(pte) == shp->shm_pages[idx] */
694
        current->min_flt++;
695
        mem_map[MAP_NR(pte_page(pte))].count++;
696
        return pte_modify(pte, shmd->vm_page_prot);
697
}
698
 
699
/*
700
 * Goes through counter = (shm_rss >> prio) present shm pages.
701
 */
702
static unsigned long swap_id = 0; /* currently being swapped */
703
static unsigned long swap_idx = 0; /* next to swap */
704
 
705
int shm_swap (int prio, int dma)
706
{
707
        pte_t page;
708
        struct page *page_map;
709
        struct shmid_ds *shp;
710
        struct vm_area_struct *shmd;
711
        unsigned long swap_nr;
712
        unsigned long id, idx;
713
        int loop = 0;
714
        int counter;
715
 
716
        counter = shm_rss >> prio;
717
        if (!counter || !(swap_nr = get_swap_page()))
718
                return 0;
719
 
720
 check_id:
721
        shp = shm_segs[swap_id];
722
        if (shp == IPC_UNUSED || shp == IPC_NOID || shp->shm_perm.mode & SHM_LOCKED ) {
723
                next_id:
724
                swap_idx = 0;
725
                if (++swap_id > max_shmid) {
726
                        if (loop)
727
                                goto failed;
728
                        loop = 1;
729
                        swap_id = 0;
730
                }
731
                goto check_id;
732
        }
733
        id = swap_id;
734
 
735
 check_table:
736
        idx = swap_idx++;
737
        if (idx >= shp->shm_npages)
738
                goto next_id;
739
 
740
        pte_val(page) = shp->shm_pages[idx];
741
        if (!pte_present(page))
742
                goto check_table;
743
        page_map = &mem_map[MAP_NR(pte_page(page))];
744
        if (PageLocked(page_map))
745
                goto check_table;
746
        if (dma && !PageDMA(page_map))
747
                goto check_table;
748
        swap_attempts++;
749
 
750
        if (--counter < 0) { /* failed */
751
                failed:
752
                swap_free (swap_nr);
753
                return 0;
754
        }
755
        if (shp->attaches)
756
          for (shmd = shp->attaches; ; ) {
757
            do {
758
                pgd_t *page_dir;
759
                pmd_t *page_middle;
760
                pte_t *page_table, pte;
761
                unsigned long tmp;
762
 
763
                if ((SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK) != id) {
764
                        printk ("shm_swap: id=%ld does not match shmd->vm_pte.id=%ld\n",
765
                                id, SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK);
766
                        continue;
767
                }
768
                tmp = shmd->vm_start + (idx << PAGE_SHIFT) - shmd->vm_offset;
769
                if (!(tmp >= shmd->vm_start && tmp < shmd->vm_end))
770
                        continue;
771
                page_dir = pgd_offset(shmd->vm_mm,tmp);
772
                if (pgd_none(*page_dir) || pgd_bad(*page_dir)) {
773
                        printk("shm_swap: bad pgtbl! id=%ld start=%lx idx=%ld\n",
774
                                        id, shmd->vm_start, idx);
775
                        pgd_clear(page_dir);
776
                        continue;
777
                }
778
                page_middle = pmd_offset(page_dir,tmp);
779
                if (pmd_none(*page_middle) || pmd_bad(*page_middle)) {
780
                        printk("shm_swap: bad pgmid! id=%ld start=%lx idx=%ld\n",
781
                                        id, shmd->vm_start, idx);
782
                        pmd_clear(page_middle);
783
                        continue;
784
                }
785
                page_table = pte_offset(page_middle,tmp);
786
                pte = *page_table;
787
                if (!pte_present(pte))
788
                        continue;
789
                if (pte_young(pte)) {
790
                        set_pte(page_table, pte_mkold(pte));
791
                        continue;
792
                }
793
                if (pte_page(pte) != pte_page(page))
794
                        printk("shm_swap_out: page and pte mismatch\n");
795
                flush_cache_page(shmd, tmp);
796
                set_pte(page_table,
797
                  __pte(shmd->vm_pte + SWP_ENTRY(0, idx << SHM_IDX_SHIFT)));
798
                mem_map[MAP_NR(pte_page(pte))].count--;
799
                if (shmd->vm_mm->rss > 0)
800
                        shmd->vm_mm->rss--;
801
                flush_tlb_page(shmd, tmp);
802
            /* continue looping through circular list */
803
            } while (0);
804
            if ((shmd = shmd->vm_next_share) == shp->attaches)
805
                break;
806
        }
807
 
808
        if (mem_map[MAP_NR(pte_page(page))].count != 1)
809
                goto check_table;
810
        shp->shm_pages[idx] = swap_nr;
811
        write_swap_page (swap_nr, (char *) pte_page(page));
812
        free_page(pte_page(page));
813
        swap_successes++;
814
        shm_swp++;
815
        shm_rss--;
816
        return 1;
817
}
818
 
819
#else /* NO_MM */
820
 
821
/* FIXME: shm _is_ feasible under NO_MM, but requires more advanced memory
822
   accounting then we currently have available. */
823
 
824
void shm_init (void)
825
{
826
        return;
827
}
828
 
829
asmlinkage int sys_shmget (key_t key, int size, int shmflg)
830
{
831
        return -ENOSYS;
832
}
833
 
834
asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
835
{
836
        return -ENOSYS;
837
}
838
 
839
asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
840
{
841
        return -ENOSYS;
842
}
843
 
844
asmlinkage int sys_shmdt (char *shmaddr)
845
{
846
        return -ENOSYS;
847
}
848
 
849
int shm_swap (int prio, int dma)
850
{
851
        return 0;
852
}
853
 
854
#endif /* NO_MM */
855
 
856
 
857
#ifndef NO_MM
858
/*
859
 * Free the swap entry and set the new pte for the shm page.
860
 */
861
static void shm_unuse_page(struct shmid_ds *shp, unsigned long idx,
862
                           unsigned long type)
863
{
864
        pte_t pte = __pte(shp->shm_pages[idx]);
865
        unsigned long page, entry = shp->shm_pages[idx];
866
 
867
        if (pte_none(pte))
868
                return;
869
        if (pte_present(pte))
870
        {
871
                /*
872
                 * Security check. Should be not needed...
873
                 */
874
                unsigned long page_nr = MAP_NR(pte_page(pte));
875
                if (page_nr >= MAP_NR(high_memory))
876
                {
877
                        printk("shm page mapped in virtual memory\n");
878
                        return;
879
                }
880
                if (!in_swap_cache(page_nr))
881
                        return;
882
                if (SWP_TYPE(in_swap_cache(page_nr)) != type)
883
                        return;
884
                printk("shm page in swap cache, trying to remove it!\n");
885
                delete_from_swap_cache(page_nr);
886
 
887
                shp->shm_pages[idx] = pte_val(pte_mkdirty(pte));
888
                return;
889
        }
890
 
891
        if (SWP_TYPE(pte_val(pte)) != type)
892
                return;
893
 
894
        /*
895
         * Here we must swapin the pte and free the swap.
896
         */
897
        page = get_free_page(GFP_KERNEL);
898
        read_swap_page(pte_val(pte), (char *) page);
899
        pte = pte_mkdirty(mk_pte(page, PAGE_SHARED));
900
        shp->shm_pages[idx] = pte_val(pte);
901
        shm_rss++;
902
 
903
        swap_free(entry);
904
        shm_swp--;
905
}
906
 
907
/*
908
 * unuse_shm() search for an eventually swapped out shm page.
909
 */
910
void shm_unuse(unsigned int type)
911
{
912
        int i, n;
913
 
914
        for (i = 0; i < SHMMNI; i++)
915
                if (shm_segs[i] != IPC_UNUSED && shm_segs[i] != IPC_NOID)
916
                        for (n = 0; n < shm_segs[i]->shm_npages; n++)
917
                                shm_unuse_page(shm_segs[i], n, type);
918
}
919
 
920
#endif /* NO_MM */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.