OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [LINUX_2_4_26_OR32/] [linux/] [linux-2.4/] [mm/] [shmem.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Resizable virtual memory filesystem for Linux.
3
 *
4
 * Copyright (C) 2000 Linus Torvalds.
5
 *               2000 Transmeta Corp.
6
 *               2000-2001 Christoph Rohland
7
 *               2000-2001 SAP AG
8
 *               2002 Red Hat Inc.
9
 * Copyright (C) 2002-2003 Hugh Dickins.
10
 * Copyright (C) 2002-2003 VERITAS Software Corporation.
11
 *
12
 * This file is released under the GPL.
13
 */
14
 
15
/*
16
 * This virtual memory filesystem is heavily based on the ramfs. It
17
 * extends ramfs by the ability to use swap and honor resource limits
18
 * which makes it a completely usable filesystem.
19
 */
20
 
21
#include <linux/config.h>
22
#include <linux/module.h>
23
#include <linux/init.h>
24
#include <linux/devfs_fs_kernel.h>
25
#include <linux/fs.h>
26
#include <linux/mm.h>
27
#include <linux/file.h>
28
#include <linux/swap.h>
29
#include <linux/pagemap.h>
30
#include <linux/string.h>
31
#include <linux/locks.h>
32
#include <linux/smp_lock.h>
33
 
34
#include <asm/uaccess.h>
35
#include <asm/div64.h>
36
 
37
/* This magic number is used in glibc for posix shared memory */
38
#define TMPFS_MAGIC     0x01021994
39
 
40
#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
41
#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
42
#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
43
 
44
#define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
45
#define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
46
 
47
#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
48
 
49
/* info->flags needs VM_flags to handle pagein/truncate race efficiently */
50
#define SHMEM_PAGEIN     VM_READ
51
#define SHMEM_TRUNCATE   VM_WRITE
52
 
53
/* Pretend that each entry is of this size in directory's i_size */
54
#define BOGO_DIRENT_SIZE 20
55
 
56
#define SHMEM_SB(sb) (&sb->u.shmem_sb)
57
 
58
/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
59
enum sgp_type {
60
        SGP_READ,       /* don't exceed i_size, don't allocate page */
61
        SGP_CACHE,      /* don't exceed i_size, may allocate page */
62
        SGP_WRITE,      /* may exceed i_size, may allocate page */
63
};
64
 
65
static int shmem_getpage(struct inode *inode, unsigned long idx,
66
                         struct page **pagep, enum sgp_type sgp);
67
 
68
static struct super_operations shmem_ops;
69
static struct address_space_operations shmem_aops;
70
static struct file_operations shmem_file_operations;
71
static struct inode_operations shmem_inode_operations;
72
static struct inode_operations shmem_dir_inode_operations;
73
static struct vm_operations_struct shmem_vm_ops;
74
 
75
LIST_HEAD(shmem_inodes);
76
static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED;
77
 
78
static void shmem_free_block(struct inode *inode)
79
{
80
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
81
        spin_lock(&sbinfo->stat_lock);
82
        sbinfo->free_blocks++;
83
        inode->i_blocks -= BLOCKS_PER_PAGE;
84
        spin_unlock(&sbinfo->stat_lock);
85
}
86
 
87
static void shmem_removepage(struct page *page)
88
{
89
        if (!PageLaunder(page))
90
                shmem_free_block(page->mapping->host);
91
}
92
 
93
/*
94
 * shmem_swp_entry - find the swap vector position in the info structure
95
 *
96
 * @info:  info structure for the inode
97
 * @index: index of the page to find
98
 * @page:  optional page to add to the structure. Has to be preset to
99
 *         all zeros
100
 *
101
 * If there is no space allocated yet it will return NULL when
102
 * page is 0, else it will use the page for the needed block,
103
 * setting it to 0 on return to indicate that it has been used.
104
 *
105
 * The swap vector is organized the following way:
106
 *
107
 * There are SHMEM_NR_DIRECT entries directly stored in the
108
 * shmem_inode_info structure. So small files do not need an addional
109
 * allocation.
110
 *
111
 * For pages with index > SHMEM_NR_DIRECT there is the pointer
112
 * i_indirect which points to a page which holds in the first half
113
 * doubly indirect blocks, in the second half triple indirect blocks:
114
 *
115
 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
116
 * following layout (for SHMEM_NR_DIRECT == 16):
117
 *
118
 * i_indirect -> dir --> 16-19
119
 *            |      +-> 20-23
120
 *            |
121
 *            +-->dir2 --> 24-27
122
 *            |        +-> 28-31
123
 *            |        +-> 32-35
124
 *            |        +-> 36-39
125
 *            |
126
 *            +-->dir3 --> 40-43
127
 *                     +-> 44-47
128
 *                     +-> 48-51
129
 *                     +-> 52-55
130
 */
131
static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, unsigned long *page)
132
{
133
        unsigned long offset;
134
        void **dir;
135
 
136
        if (index < SHMEM_NR_DIRECT)
137
                return info->i_direct+index;
138
        if (!info->i_indirect) {
139
                if (page) {
140
                        info->i_indirect = (void **) *page;
141
                        *page = 0;
142
                }
143
                return NULL;                    /* need another page */
144
        }
145
 
146
        index -= SHMEM_NR_DIRECT;
147
        offset = index % ENTRIES_PER_PAGE;
148
        index /= ENTRIES_PER_PAGE;
149
        dir = info->i_indirect;
150
 
151
        if (index >= ENTRIES_PER_PAGE/2) {
152
                index -= ENTRIES_PER_PAGE/2;
153
                dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
154
                index %= ENTRIES_PER_PAGE;
155
                if (!*dir) {
156
                        if (page) {
157
                                *dir = (void *) *page;
158
                                *page = 0;
159
                        }
160
                        return NULL;            /* need another page */
161
                }
162
                dir = (void **) *dir;
163
        }
164
 
165
        dir += index;
166
        if (!*dir) {
167
                if (!page || !*page)
168
                        return NULL;            /* need a page */
169
                *dir = (void *) *page;
170
                *page = 0;
171
        }
172
        return (swp_entry_t *) *dir + offset;
173
}
174
 
175
/*
176
 * shmem_swp_alloc - get the position of the swap entry for the page.
177
 *                   If it does not exist allocate the entry.
178
 *
179
 * @info:       info structure for the inode
180
 * @index:      index of the page to find
181
 * @sgp:        check and recheck i_size? skip allocation?
182
 */
183
static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
184
{
185
        struct inode *inode = info->inode;
186
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
187
        unsigned long page = 0;
188
        swp_entry_t *entry;
189
        static const swp_entry_t unswapped = {0};
190
 
191
        if (sgp != SGP_WRITE &&
192
            ((loff_t) index << PAGE_CACHE_SHIFT) >= inode->i_size)
193
                return ERR_PTR(-EINVAL);
194
 
195
        while (!(entry = shmem_swp_entry(info, index, &page))) {
196
                if (sgp == SGP_READ)
197
                        return (swp_entry_t *) &unswapped;
198
                /*
199
                 * Test free_blocks against 1 not 0, since we have 1 data
200
                 * page (and perhaps indirect index pages) yet to allocate:
201
                 * a waste to allocate index if we cannot allocate data.
202
                 */
203
                spin_lock(&sbinfo->stat_lock);
204
                if (sbinfo->free_blocks <= 1) {
205
                        spin_unlock(&sbinfo->stat_lock);
206
                        return ERR_PTR(-ENOSPC);
207
                }
208
                sbinfo->free_blocks--;
209
                inode->i_blocks += BLOCKS_PER_PAGE;
210
                spin_unlock(&sbinfo->stat_lock);
211
 
212
                spin_unlock(&info->lock);
213
                page = get_zeroed_page(GFP_USER);
214
                spin_lock(&info->lock);
215
 
216
                if (!page) {
217
                        shmem_free_block(inode);
218
                        return ERR_PTR(-ENOMEM);
219
                }
220
                if (sgp != SGP_WRITE &&
221
                    ((loff_t) index << PAGE_CACHE_SHIFT) >= inode->i_size) {
222
                        entry = ERR_PTR(-EINVAL);
223
                        break;
224
                }
225
                if (info->next_index <= index)
226
                        info->next_index = index + 1;
227
        }
228
        if (page) {
229
                /* another task gave its page, or truncated the file */
230
                shmem_free_block(inode);
231
                free_page(page);
232
        }
233
        if (info->next_index <= index && !IS_ERR(entry))
234
                info->next_index = index + 1;
235
        return entry;
236
}
237
 
238
/*
239
 * shmem_free_swp - free some swap entries in a directory
240
 *
241
 * @dir:   pointer to the directory
242
 * @edir:  pointer after last entry of the directory
243
 */
244
static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
245
{
246
        swp_entry_t *ptr;
247
        int freed = 0;
248
 
249
        for (ptr = dir; ptr < edir; ptr++) {
250
                if (ptr->val) {
251
                        free_swap_and_cache(*ptr);
252
                        *ptr = (swp_entry_t){0};
253
                        freed++;
254
                }
255
        }
256
        return freed;
257
}
258
 
259
/*
260
 * shmem_truncate_direct - free the swap entries of a whole doubly
261
 *                         indirect block
262
 *
263
 * @info:       the info structure of the inode
264
 * @dir:        pointer to the pointer to the block
265
 * @start:      offset to start from (in pages)
266
 * @len:        how many pages are stored in this block
267
 */
268
static inline unsigned long
269
shmem_truncate_direct(struct shmem_inode_info *info, swp_entry_t ***dir, unsigned long start, unsigned long len)
270
{
271
        swp_entry_t **last, **ptr;
272
        unsigned long off, freed_swp, freed = 0;
273
 
274
        last = *dir + (len + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
275
        off = start % ENTRIES_PER_PAGE;
276
 
277
        for (ptr = *dir + start/ENTRIES_PER_PAGE; ptr < last; ptr++, off = 0) {
278
                if (!*ptr)
279
                        continue;
280
 
281
                if (info->swapped) {
282
                        freed_swp = shmem_free_swp(*ptr + off,
283
                                                *ptr + ENTRIES_PER_PAGE);
284
                        info->swapped -= freed_swp;
285
                        freed += freed_swp;
286
                }
287
 
288
                if (!off) {
289
                        freed++;
290
                        free_page((unsigned long) *ptr);
291
                        *ptr = 0;
292
                }
293
        }
294
 
295
        if (!start) {
296
                freed++;
297
                free_page((unsigned long) *dir);
298
                *dir = 0;
299
        }
300
        return freed;
301
}
302
 
303
/*
304
 * shmem_truncate_indirect - truncate an inode
305
 *
306
 * @info:  the info structure of the inode
307
 * @index: the index to truncate
308
 *
309
 * This function locates the last doubly indirect block and calls
310
 * then shmem_truncate_direct to do the real work
311
 */
312
static inline unsigned long
313
shmem_truncate_indirect(struct shmem_inode_info *info, unsigned long index)
314
{
315
        swp_entry_t ***base;
316
        unsigned long baseidx, start;
317
        unsigned long len = info->next_index;
318
        unsigned long freed;
319
 
320
        if (len <= SHMEM_NR_DIRECT) {
321
                info->next_index = index;
322
                if (!info->swapped)
323
                        return 0;
324
                freed = shmem_free_swp(info->i_direct + index,
325
                                        info->i_direct + len);
326
                info->swapped -= freed;
327
                return freed;
328
        }
329
 
330
        if (len <= ENTRIES_PER_PAGEPAGE/2 + SHMEM_NR_DIRECT) {
331
                len -= SHMEM_NR_DIRECT;
332
                base = (swp_entry_t ***) &info->i_indirect;
333
                baseidx = SHMEM_NR_DIRECT;
334
        } else {
335
                len -= ENTRIES_PER_PAGEPAGE/2 + SHMEM_NR_DIRECT;
336
                BUG_ON(len > ENTRIES_PER_PAGEPAGE*ENTRIES_PER_PAGE/2);
337
                baseidx = len - 1;
338
                baseidx -= baseidx % ENTRIES_PER_PAGEPAGE;
339
                base = (swp_entry_t ***) info->i_indirect +
340
                        ENTRIES_PER_PAGE/2 + baseidx/ENTRIES_PER_PAGEPAGE;
341
                len -= baseidx;
342
                baseidx += ENTRIES_PER_PAGEPAGE/2 + SHMEM_NR_DIRECT;
343
        }
344
 
345
        if (index > baseidx) {
346
                info->next_index = index;
347
                start = index - baseidx;
348
        } else {
349
                info->next_index = baseidx;
350
                start = 0;
351
        }
352
        return *base? shmem_truncate_direct(info, base, start, len): 0;
353
}
354
 
355
static void shmem_truncate(struct inode *inode)
356
{
357
        struct shmem_inode_info *info = SHMEM_I(inode);
358
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
359
        unsigned long freed = 0;
360
        unsigned long index;
361
 
362
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
363
        index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
364
        if (index >= info->next_index)
365
                return;
366
 
367
        spin_lock(&info->lock);
368
        while (index < info->next_index)
369
                freed += shmem_truncate_indirect(info, index);
370
        BUG_ON(info->swapped > info->next_index);
371
 
372
        if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
373
                /*
374
                 * Call truncate_inode_pages again: racing shmem_unuse_inode
375
                 * may have swizzled a page in from swap since vmtruncate or
376
                 * generic_delete_inode did it, before we lowered next_index.
377
                 * Also, though shmem_getpage checks i_size before adding to
378
                 * cache, no recheck after: so fix the narrow window there too.
379
                 */
380
                info->flags |= SHMEM_TRUNCATE;
381
                spin_unlock(&info->lock);
382
                truncate_inode_pages(inode->i_mapping, inode->i_size);
383
                spin_lock(&info->lock);
384
                info->flags &= ~SHMEM_TRUNCATE;
385
        }
386
 
387
        spin_unlock(&info->lock);
388
        spin_lock(&sbinfo->stat_lock);
389
        sbinfo->free_blocks += freed;
390
        inode->i_blocks -= freed*BLOCKS_PER_PAGE;
391
        spin_unlock(&sbinfo->stat_lock);
392
}
393
 
394
static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
395
{
396
        struct inode *inode = dentry->d_inode;
397
        struct page *page = NULL;
398
        int error;
399
 
400
        if (attr->ia_valid & ATTR_SIZE) {
401
                if (attr->ia_size < inode->i_size) {
402
                        /*
403
                         * If truncating down to a partial page, then
404
                         * if that page is already allocated, hold it
405
                         * in memory until the truncation is over, so
406
                         * truncate_partial_page cannnot miss it were
407
                         * it assigned to swap.
408
                         */
409
                        if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
410
                                (void) shmem_getpage(inode,
411
                                        attr->ia_size>>PAGE_CACHE_SHIFT,
412
                                                &page, SGP_READ);
413
                        }
414
                        /*
415
                         * Reset SHMEM_PAGEIN flag so that shmem_truncate can
416
                         * detect if any pages might have been added to cache
417
                         * after truncate_inode_pages.  But we needn't bother
418
                         * if it's being fully truncated to zero-length: the
419
                         * nrpages check is efficient enough in that case.
420
                         */
421
                        if (attr->ia_size) {
422
                                struct shmem_inode_info *info = SHMEM_I(inode);
423
                                spin_lock(&info->lock);
424
                                info->flags &= ~SHMEM_PAGEIN;
425
                                spin_unlock(&info->lock);
426
                        }
427
                }
428
        }
429
 
430
        error = inode_change_ok(inode, attr);
431
        if (!error)
432
                error = inode_setattr(inode, attr);
433
        if (page)
434
                page_cache_release(page);
435
        return error;
436
}
437
 
438
static void shmem_delete_inode(struct inode *inode)
439
{
440
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
441
        struct shmem_inode_info *info = SHMEM_I(inode);
442
 
443
        if (inode->i_op->truncate == shmem_truncate) {
444
                spin_lock(&shmem_ilock);
445
                list_del(&info->list);
446
                spin_unlock(&shmem_ilock);
447
                inode->i_size = 0;
448
                shmem_truncate(inode);
449
        }
450
        BUG_ON(inode->i_blocks);
451
        spin_lock(&sbinfo->stat_lock);
452
        sbinfo->free_inodes++;
453
        spin_unlock(&sbinfo->stat_lock);
454
        clear_inode(inode);
455
}
456
 
457
static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
458
{
459
        swp_entry_t *ptr;
460
 
461
        for (ptr = dir; ptr < edir; ptr++) {
462
                if (ptr->val == entry.val)
463
                        return ptr - dir;
464
        }
465
        return -1;
466
}
467
 
468
static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
469
{
470
        struct inode *inode;
471
        struct address_space *mapping;
472
        swp_entry_t *ptr;
473
        unsigned long idx;
474
        int offset;
475
 
476
        idx = 0;
477
        ptr = info->i_direct;
478
        spin_lock(&info->lock);
479
        offset = info->next_index;
480
        if (offset > SHMEM_NR_DIRECT)
481
                offset = SHMEM_NR_DIRECT;
482
        offset = shmem_find_swp(entry, ptr, ptr + offset);
483
        if (offset >= 0)
484
                goto found;
485
 
486
        for (idx = SHMEM_NR_DIRECT; idx < info->next_index;
487
             idx += ENTRIES_PER_PAGE) {
488
                ptr = shmem_swp_entry(info, idx, NULL);
489
                if (!ptr)
490
                        continue;
491
                offset = info->next_index - idx;
492
                if (offset > ENTRIES_PER_PAGE)
493
                        offset = ENTRIES_PER_PAGE;
494
                offset = shmem_find_swp(entry, ptr, ptr + offset);
495
                if (offset >= 0)
496
                        goto found;
497
        }
498
        spin_unlock(&info->lock);
499
        return 0;
500
found:
501
        idx += offset;
502
        inode = info->inode;
503
        mapping = inode->i_mapping;
504
        delete_from_swap_cache(page);
505
        if (add_to_page_cache_unique(page,
506
                        mapping, idx, page_hash(mapping, idx)) == 0) {
507
                info->flags |= SHMEM_PAGEIN;
508
                ptr[offset].val = 0;
509
                info->swapped--;
510
        } else if (add_to_swap_cache(page, entry) != 0)
511
                BUG();
512
        spin_unlock(&info->lock);
513
        SetPageUptodate(page);
514
        /*
515
         * Decrement swap count even when the entry is left behind:
516
         * try_to_unuse will skip over mms, then reincrement count.
517
         */
518
        swap_free(entry);
519
        return 1;
520
}
521
 
522
/*
523
 * shmem_unuse() search for an eventually swapped out shmem page.
524
 */
525
int shmem_unuse(swp_entry_t entry, struct page *page)
526
{
527
        struct list_head *p;
528
        struct shmem_inode_info *info;
529
        int found = 0;
530
 
531
        spin_lock(&shmem_ilock);
532
        list_for_each(p, &shmem_inodes) {
533
                info = list_entry(p, struct shmem_inode_info, list);
534
 
535
                if (info->swapped && shmem_unuse_inode(info, entry, page)) {
536
                        /* move head to start search for next from here */
537
                        list_move_tail(&shmem_inodes, &info->list);
538
                        found = 1;
539
                        break;
540
                }
541
        }
542
        spin_unlock(&shmem_ilock);
543
        return found;
544
}
545
 
546
/*
547
 * Move the page from the page cache to the swap cache.
548
 */
549
static int shmem_writepage(struct page *page)
550
{
551
        struct shmem_inode_info *info;
552
        swp_entry_t *entry, swap;
553
        struct address_space *mapping;
554
        unsigned long index;
555
        struct inode *inode;
556
 
557
        BUG_ON(!PageLocked(page));
558
        if (!PageLaunder(page))
559
                goto fail;
560
 
561
        mapping = page->mapping;
562
        index = page->index;
563
        inode = mapping->host;
564
        info = SHMEM_I(inode);
565
        if (info->flags & VM_LOCKED)
566
                goto fail;
567
getswap:
568
        swap = get_swap_page();
569
        if (!swap.val)
570
                goto fail;
571
 
572
        spin_lock(&info->lock);
573
        if (index >= info->next_index) {
574
                BUG_ON(!(info->flags & SHMEM_TRUNCATE));
575
                spin_unlock(&info->lock);
576
                swap_free(swap);
577
                goto fail;
578
        }
579
        entry = shmem_swp_entry(info, index, NULL);
580
        BUG_ON(!entry);
581
        BUG_ON(entry->val);
582
 
583
        /* Remove it from the page cache */
584
        remove_inode_page(page);
585
        page_cache_release(page);
586
 
587
        /* Add it to the swap cache */
588
        if (add_to_swap_cache(page, swap) != 0) {
589
                /*
590
                 * Raced with "speculative" read_swap_cache_async.
591
                 * Add page back to page cache, unref swap, try again.
592
                 */
593
                add_to_page_cache_locked(page, mapping, index);
594
                info->flags |= SHMEM_PAGEIN;
595
                spin_unlock(&info->lock);
596
                swap_free(swap);
597
                goto getswap;
598
        }
599
 
600
        *entry = swap;
601
        info->swapped++;
602
        spin_unlock(&info->lock);
603
        SetPageUptodate(page);
604
        set_page_dirty(page);
605
        UnlockPage(page);
606
        return 0;
607
fail:
608
        return fail_writepage(page);
609
}
610
 
611
/*
612
 * shmem_getpage - either get the page from swap or allocate a new one
613
 *
614
 * If we allocate a new one we do not mark it dirty. That's up to the
615
 * vm. If we swap it in we mark it dirty since we also free the swap
616
 * entry since a page cannot live in both the swap and page cache
617
 */
618
static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp)
619
{
620
        struct address_space *mapping = inode->i_mapping;
621
        struct shmem_inode_info *info = SHMEM_I(inode);
622
        struct shmem_sb_info *sbinfo;
623
        struct page *filepage = *pagep;
624
        struct page *swappage;
625
        swp_entry_t *entry;
626
        swp_entry_t swap;
627
        int error = 0;
628
 
629
        if (idx >= SHMEM_MAX_INDEX)
630
                return -EFBIG;
631
        /*
632
         * Normally, filepage is NULL on entry, and either found
633
         * uptodate immediately, or allocated and zeroed, or read
634
         * in under swappage, which is then assigned to filepage.
635
         * But shmem_readpage and shmem_prepare_write pass in a locked
636
         * filepage, which may be found not uptodate by other callers
637
         * too, and may need to be copied from the swappage read in.
638
         */
639
repeat:
640
        if (!filepage)
641
                filepage = find_lock_page(mapping, idx);
642
        if (filepage && Page_Uptodate(filepage))
643
                goto done;
644
 
645
        spin_lock(&info->lock);
646
        entry = shmem_swp_alloc(info, idx, sgp);
647
        if (IS_ERR(entry)) {
648
                spin_unlock(&info->lock);
649
                error = PTR_ERR(entry);
650
                goto failed;
651
        }
652
        swap = *entry;
653
 
654
        if (swap.val) {
655
                /* Look it up and read it in.. */
656
                swappage = lookup_swap_cache(swap);
657
                if (!swappage) {
658
                        spin_unlock(&info->lock);
659
                        swapin_readahead(swap);
660
                        swappage = read_swap_cache_async(swap);
661
                        if (!swappage) {
662
                                spin_lock(&info->lock);
663
                                entry = shmem_swp_alloc(info, idx, sgp);
664
                                if (IS_ERR(entry))
665
                                        error = PTR_ERR(entry);
666
                                else if (entry->val == swap.val)
667
                                        error = -ENOMEM;
668
                                spin_unlock(&info->lock);
669
                                if (error)
670
                                        goto failed;
671
                                goto repeat;
672
                        }
673
                        wait_on_page(swappage);
674
                        page_cache_release(swappage);
675
                        goto repeat;
676
                }
677
 
678
                /* We have to do this with page locked to prevent races */
679
                if (TryLockPage(swappage)) {
680
                        spin_unlock(&info->lock);
681
                        wait_on_page(swappage);
682
                        page_cache_release(swappage);
683
                        goto repeat;
684
                }
685
                if (!Page_Uptodate(swappage)) {
686
                        spin_unlock(&info->lock);
687
                        UnlockPage(swappage);
688
                        page_cache_release(swappage);
689
                        error = -EIO;
690
                        goto failed;
691
                }
692
 
693
                delete_from_swap_cache(swappage);
694
                if (filepage) {
695
                        entry->val = 0;
696
                        info->swapped--;
697
                        spin_unlock(&info->lock);
698
                        flush_page_to_ram(swappage);
699
                        copy_highpage(filepage, swappage);
700
                        UnlockPage(swappage);
701
                        page_cache_release(swappage);
702
                        flush_dcache_page(filepage);
703
                        SetPageUptodate(filepage);
704
                        SetPageDirty(filepage);
705
                        swap_free(swap);
706
                } else if (add_to_page_cache_unique(swappage,
707
                        mapping, idx, page_hash(mapping, idx)) == 0) {
708
                        info->flags |= SHMEM_PAGEIN;
709
                        entry->val = 0;
710
                        info->swapped--;
711
                        spin_unlock(&info->lock);
712
                        filepage = swappage;
713
                        SetPageUptodate(filepage);
714
                        SetPageDirty(filepage);
715
                        swap_free(swap);
716
                } else {
717
                        if (add_to_swap_cache(swappage, swap) != 0)
718
                                BUG();
719
                        spin_unlock(&info->lock);
720
                        SetPageUptodate(swappage);
721
                        SetPageDirty(swappage);
722
                        UnlockPage(swappage);
723
                        page_cache_release(swappage);
724
                        goto repeat;
725
                }
726
        } else if (sgp == SGP_READ && !filepage) {
727
                filepage = find_get_page(mapping, idx);
728
                if (filepage &&
729
                    (!Page_Uptodate(filepage) || TryLockPage(filepage))) {
730
                        spin_unlock(&info->lock);
731
                        wait_on_page(filepage);
732
                        page_cache_release(filepage);
733
                        filepage = NULL;
734
                        goto repeat;
735
                }
736
                spin_unlock(&info->lock);
737
        } else {
738
                sbinfo = SHMEM_SB(inode->i_sb);
739
                spin_lock(&sbinfo->stat_lock);
740
                if (sbinfo->free_blocks == 0) {
741
                        spin_unlock(&sbinfo->stat_lock);
742
                        spin_unlock(&info->lock);
743
                        error = -ENOSPC;
744
                        goto failed;
745
                }
746
                sbinfo->free_blocks--;
747
                inode->i_blocks += BLOCKS_PER_PAGE;
748
                spin_unlock(&sbinfo->stat_lock);
749
 
750
                if (!filepage) {
751
                        spin_unlock(&info->lock);
752
                        filepage = page_cache_alloc(mapping);
753
                        if (!filepage) {
754
                                shmem_free_block(inode);
755
                                error = -ENOMEM;
756
                                goto failed;
757
                        }
758
 
759
                        spin_lock(&info->lock);
760
                        entry = shmem_swp_alloc(info, idx, sgp);
761
                        if (IS_ERR(entry))
762
                                error = PTR_ERR(entry);
763
                        if (error || entry->val ||
764
                            add_to_page_cache_unique(filepage,
765
                            mapping, idx, page_hash(mapping, idx)) != 0) {
766
                                spin_unlock(&info->lock);
767
                                page_cache_release(filepage);
768
                                shmem_free_block(inode);
769
                                filepage = NULL;
770
                                if (error)
771
                                        goto failed;
772
                                goto repeat;
773
                        }
774
                        info->flags |= SHMEM_PAGEIN;
775
                }
776
 
777
                spin_unlock(&info->lock);
778
                clear_highpage(filepage);
779
                flush_dcache_page(filepage);
780
                SetPageUptodate(filepage);
781
        }
782
done:
783
        if (!*pagep) {
784
                if (filepage) {
785
                        UnlockPage(filepage);
786
                        *pagep = filepage;
787
                } else
788
                        *pagep = ZERO_PAGE(0);
789
        }
790
        return 0;
791
 
792
failed:
793
        if (*pagep != filepage) {
794
                UnlockPage(filepage);
795
                page_cache_release(filepage);
796
        }
797
        return error;
798
}
799
 
800
struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int unused)
801
{
802
        struct inode *inode = vma->vm_file->f_dentry->d_inode;
803
        struct page *page = NULL;
804
        unsigned long idx;
805
        int error;
806
 
807
        idx = (address - vma->vm_start) >> PAGE_SHIFT;
808
        idx += vma->vm_pgoff;
809
        idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
810
 
811
        error = shmem_getpage(inode, idx, &page, SGP_CACHE);
812
        if (error)
813
                return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
814
 
815
        mark_page_accessed(page);
816
        flush_page_to_ram(page);
817
        return page;
818
}
819
 
820
void shmem_lock(struct file *file, int lock)
821
{
822
        struct inode *inode = file->f_dentry->d_inode;
823
        struct shmem_inode_info *info = SHMEM_I(inode);
824
 
825
        spin_lock(&info->lock);
826
        if (lock)
827
                info->flags |= VM_LOCKED;
828
        else
829
                info->flags &= ~VM_LOCKED;
830
        spin_unlock(&info->lock);
831
}
832
 
833
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
834
{
835
        struct vm_operations_struct *ops;
836
        struct inode *inode = file->f_dentry->d_inode;
837
 
838
        ops = &shmem_vm_ops;
839
        if (!S_ISREG(inode->i_mode))
840
                return -EACCES;
841
        UPDATE_ATIME(inode);
842
        vma->vm_ops = ops;
843
        return 0;
844
}
845
 
846
static struct inode *shmem_get_inode(struct super_block *sb, int mode, int dev)
847
{
848
        struct inode *inode;
849
        struct shmem_inode_info *info;
850
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
851
 
852
        spin_lock(&sbinfo->stat_lock);
853
        if (!sbinfo->free_inodes) {
854
                spin_unlock(&sbinfo->stat_lock);
855
                return NULL;
856
        }
857
        sbinfo->free_inodes--;
858
        spin_unlock(&sbinfo->stat_lock);
859
 
860
        inode = new_inode(sb);
861
        if (inode) {
862
                inode->i_mode = mode;
863
                inode->i_uid = current->fsuid;
864
                inode->i_gid = current->fsgid;
865
                inode->i_blksize = PAGE_CACHE_SIZE;
866
                inode->i_blocks = 0;
867
                inode->i_rdev = NODEV;
868
                inode->i_mapping->a_ops = &shmem_aops;
869
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
870
                info = SHMEM_I(inode);
871
                info->inode = inode;
872
                spin_lock_init(&info->lock);
873
                switch (mode & S_IFMT) {
874
                default:
875
                        init_special_inode(inode, mode, dev);
876
                        break;
877
                case S_IFREG:
878
                        inode->i_op = &shmem_inode_operations;
879
                        inode->i_fop = &shmem_file_operations;
880
                        spin_lock(&shmem_ilock);
881
                        list_add_tail(&info->list, &shmem_inodes);
882
                        spin_unlock(&shmem_ilock);
883
                        break;
884
                case S_IFDIR:
885
                        inode->i_nlink++;
886
                        /* Some things misbehave if size == 0 on a directory */
887
                        inode->i_size = 2 * BOGO_DIRENT_SIZE;
888
                        inode->i_op = &shmem_dir_inode_operations;
889
                        inode->i_fop = &dcache_dir_ops;
890
                        break;
891
                case S_IFLNK:
892
                        break;
893
                }
894
        }
895
        return inode;
896
}
897
 
898
static int shmem_set_size(struct shmem_sb_info *info,
899
                          unsigned long max_blocks, unsigned long max_inodes)
900
{
901
        int error;
902
        unsigned long blocks, inodes;
903
 
904
        spin_lock(&info->stat_lock);
905
        blocks = info->max_blocks - info->free_blocks;
906
        inodes = info->max_inodes - info->free_inodes;
907
        error = -EINVAL;
908
        if (max_blocks < blocks)
909
                goto out;
910
        if (max_inodes < inodes)
911
                goto out;
912
        error = 0;
913
        info->max_blocks  = max_blocks;
914
        info->free_blocks = max_blocks - blocks;
915
        info->max_inodes  = max_inodes;
916
        info->free_inodes = max_inodes - inodes;
917
out:
918
        spin_unlock(&info->stat_lock);
919
        return error;
920
}
921
 
922
#ifdef CONFIG_TMPFS
923
 
924
static struct inode_operations shmem_symlink_inode_operations;
925
static struct inode_operations shmem_symlink_inline_operations;
926
 
927
/*
928
 * tmpfs itself makes no use of generic_file_read, generic_file_mmap
929
 * or generic_file_write; but shmem_readpage, shmem_prepare_write and
930
 * shmem_commit_write let a tmpfs file be used below the loop driver,
931
 * and shmem_readpage lets a tmpfs file be used by sendfile.
932
 */
933
static int
934
shmem_readpage(struct file *file, struct page *page)
935
{
936
        struct inode *inode = page->mapping->host;
937
        int error = shmem_getpage(inode, page->index, &page, SGP_CACHE);
938
        UnlockPage(page);
939
        return error;
940
}
941
 
942
static int
943
shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
944
{
945
        struct inode *inode = page->mapping->host;
946
        return shmem_getpage(inode, page->index, &page, SGP_WRITE);
947
}
948
 
949
static int
950
shmem_commit_write(struct file *file, struct page *page, unsigned offset, unsigned to)
951
{
952
        struct inode *inode = page->mapping->host;
953
        loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
954
 
955
        if (pos > inode->i_size)
956
                inode->i_size = pos;
957
        SetPageDirty(page);
958
        return 0;
959
}
960
 
961
static ssize_t
962
shmem_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
963
{
964
        struct inode    *inode = file->f_dentry->d_inode;
965
        loff_t          pos;
966
        unsigned long   written;
967
        int             err;
968
 
969
        if ((ssize_t) count < 0)
970
                return -EINVAL;
971
 
972
        if (!access_ok(VERIFY_READ, buf, count))
973
                return -EFAULT;
974
 
975
        down(&inode->i_sem);
976
 
977
        pos = *ppos;
978
        written = 0;
979
 
980
        err = precheck_file_write(file, inode, &count, &pos);
981
        if (err || !count)
982
                goto out;
983
 
984
        remove_suid(inode);
985
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
986
 
987
        do {
988
                struct page *page = NULL;
989
                unsigned long bytes, index, offset;
990
                char *kaddr;
991
                int left;
992
 
993
                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
994
                index = pos >> PAGE_CACHE_SHIFT;
995
                bytes = PAGE_CACHE_SIZE - offset;
996
                if (bytes > count)
997
                        bytes = count;
998
 
999
                /*
1000
                 * We don't hold page lock across copy from user -
1001
                 * what would it guard against? - so no deadlock here.
1002
                 */
1003
 
1004
                err = shmem_getpage(inode, index, &page, SGP_WRITE);
1005
                if (err)
1006
                        break;
1007
 
1008
                kaddr = kmap(page);
1009
                left = __copy_from_user(kaddr + offset, buf, bytes);
1010
                kunmap(page);
1011
 
1012
                written += bytes;
1013
                count -= bytes;
1014
                pos += bytes;
1015
                buf += bytes;
1016
                if (pos > inode->i_size)
1017
                        inode->i_size = pos;
1018
 
1019
                flush_dcache_page(page);
1020
                SetPageDirty(page);
1021
                SetPageReferenced(page);
1022
                page_cache_release(page);
1023
 
1024
                if (left) {
1025
                        pos -= left;
1026
                        written -= left;
1027
                        err = -EFAULT;
1028
                        break;
1029
                }
1030
        } while (count);
1031
 
1032
        *ppos = pos;
1033
        if (written)
1034
                err = written;
1035
out:
1036
        up(&inode->i_sem);
1037
        return err;
1038
}
1039
 
1040
static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc)
1041
{
1042
        struct inode *inode = filp->f_dentry->d_inode;
1043
        struct address_space *mapping = inode->i_mapping;
1044
        unsigned long index, offset;
1045
 
1046
        index = *ppos >> PAGE_CACHE_SHIFT;
1047
        offset = *ppos & ~PAGE_CACHE_MASK;
1048
 
1049
        for (;;) {
1050
                struct page *page = NULL;
1051
                unsigned long end_index, nr, ret;
1052
 
1053
                end_index = inode->i_size >> PAGE_CACHE_SHIFT;
1054
                if (index > end_index)
1055
                        break;
1056
                if (index == end_index) {
1057
                        nr = inode->i_size & ~PAGE_CACHE_MASK;
1058
                        if (nr <= offset)
1059
                                break;
1060
                }
1061
 
1062
                desc->error = shmem_getpage(inode, index, &page, SGP_READ);
1063
                if (desc->error) {
1064
                        if (desc->error == -EINVAL)
1065
                                desc->error = 0;
1066
                        break;
1067
                }
1068
 
1069
                /*
1070
                 * We must evaluate after, since reads (unlike writes)
1071
                 * are called without i_sem protection against truncate
1072
                 */
1073
                nr = PAGE_CACHE_SIZE;
1074
                end_index = inode->i_size >> PAGE_CACHE_SHIFT;
1075
                if (index == end_index) {
1076
                        nr = inode->i_size & ~PAGE_CACHE_MASK;
1077
                        if (nr <= offset) {
1078
                                page_cache_release(page);
1079
                                break;
1080
                        }
1081
                }
1082
                nr -= offset;
1083
 
1084
                if (page != ZERO_PAGE(0)) {
1085
                        /*
1086
                         * If users can be writing to this page using arbitrary
1087
                         * virtual addresses, take care about potential aliasing
1088
                         * before reading the page on the kernel side.
1089
                         */
1090
                        if (mapping->i_mmap_shared != NULL)
1091
                                flush_dcache_page(page);
1092
                        /*
1093
                         * Mark the page accessed if we read the
1094
                         * beginning or we just did an lseek.
1095
                         */
1096
                        if (!offset || !filp->f_reada)
1097
                                mark_page_accessed(page);
1098
                }
1099
 
1100
                /*
1101
                 * Ok, we have the page, and it's up-to-date, so
1102
                 * now we can copy it to user space...
1103
                 *
1104
                 * The actor routine returns how many bytes were actually used..
1105
                 * NOTE! This may not be the same as how much of a user buffer
1106
                 * we filled up (we may be padding etc), so we can only update
1107
                 * "pos" here (the actor routine has to update the user buffer
1108
                 * pointers and the remaining count).
1109
                 */
1110
                ret = file_read_actor(desc, page, offset, nr);
1111
                offset += ret;
1112
                index += offset >> PAGE_CACHE_SHIFT;
1113
                offset &= ~PAGE_CACHE_MASK;
1114
 
1115
                page_cache_release(page);
1116
                if (ret != nr || !desc->count)
1117
                        break;
1118
        }
1119
 
1120
        *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1121
        filp->f_reada = 1;
1122
        UPDATE_ATIME(inode);
1123
}
1124
 
1125
static ssize_t shmem_file_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
1126
{
1127
        read_descriptor_t desc;
1128
 
1129
        if ((ssize_t) count < 0)
1130
                return -EINVAL;
1131
        if (!access_ok(VERIFY_WRITE, buf, count))
1132
                return -EFAULT;
1133
        if (!count)
1134
                return 0;
1135
 
1136
        desc.written = 0;
1137
        desc.count = count;
1138
        desc.buf = buf;
1139
        desc.error = 0;
1140
 
1141
        do_shmem_file_read(filp, ppos, &desc);
1142
        if (desc.written)
1143
                return desc.written;
1144
        return desc.error;
1145
}
1146
 
1147
static int shmem_statfs(struct super_block *sb, struct statfs *buf)
1148
{
1149
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1150
 
1151
        buf->f_type = TMPFS_MAGIC;
1152
        buf->f_bsize = PAGE_CACHE_SIZE;
1153
        spin_lock(&sbinfo->stat_lock);
1154
        buf->f_blocks = sbinfo->max_blocks;
1155
        buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1156
        buf->f_files = sbinfo->max_inodes;
1157
        buf->f_ffree = sbinfo->free_inodes;
1158
        spin_unlock(&sbinfo->stat_lock);
1159
        buf->f_namelen = NAME_MAX;
1160
        return 0;
1161
}
1162
 
1163
/*
1164
 * Lookup the data. This is trivial - if the dentry didn't already
1165
 * exist, we know it is negative.
1166
 */
1167
static struct dentry *shmem_lookup(struct inode *dir, struct dentry *dentry)
1168
{
1169
        if (dentry->d_name.len > NAME_MAX)
1170
                return ERR_PTR(-ENAMETOOLONG);
1171
        d_add(dentry, NULL);
1172
        return NULL;
1173
}
1174
 
1175
/*
1176
 * File creation. Allocate an inode, and we're done..
1177
 */
1178
static int shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, int dev)
1179
{
1180
        struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1181
        int error = -ENOSPC;
1182
 
1183
        if (inode) {
1184
                if (dir->i_mode & S_ISGID) {
1185
                        inode->i_gid = dir->i_gid;
1186
                        if (S_ISDIR(mode))
1187
                                inode->i_mode |= S_ISGID;
1188
                }
1189
                dir->i_size += BOGO_DIRENT_SIZE;
1190
                dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1191
                d_instantiate(dentry, inode);
1192
                dget(dentry); /* Extra count - pin the dentry in core */
1193
                error = 0;
1194
        }
1195
        return error;
1196
}
1197
 
1198
static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1199
{
1200
        int error;
1201
 
1202
        if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1203
                return error;
1204
        dir->i_nlink++;
1205
        return 0;
1206
}
1207
 
1208
static int shmem_create(struct inode *dir, struct dentry *dentry, int mode)
1209
{
1210
        return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1211
}
1212
 
1213
/*
1214
 * Link a file..
1215
 */
1216
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1217
{
1218
        struct inode *inode = old_dentry->d_inode;
1219
 
1220
        if (S_ISDIR(inode->i_mode))
1221
                return -EPERM;
1222
 
1223
        dir->i_size += BOGO_DIRENT_SIZE;
1224
        inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1225
        inode->i_nlink++;
1226
        atomic_inc(&inode->i_count);    /* New dentry reference */
1227
        dget(dentry);           /* Extra pinning count for the created dentry */
1228
        d_instantiate(dentry, inode);
1229
        return 0;
1230
}
1231
 
1232
static inline int shmem_positive(struct dentry *dentry)
1233
{
1234
        return dentry->d_inode && !d_unhashed(dentry);
1235
}
1236
 
1237
/*
1238
 * Check that a directory is empty (this works
1239
 * for regular files too, they'll just always be
1240
 * considered empty..).
1241
 *
1242
 * Note that an empty directory can still have
1243
 * children, they just all have to be negative..
1244
 */
1245
static int shmem_empty(struct dentry *dentry)
1246
{
1247
        struct list_head *list;
1248
 
1249
        spin_lock(&dcache_lock);
1250
        list = dentry->d_subdirs.next;
1251
 
1252
        while (list != &dentry->d_subdirs) {
1253
                struct dentry *de = list_entry(list, struct dentry, d_child);
1254
 
1255
                if (shmem_positive(de)) {
1256
                        spin_unlock(&dcache_lock);
1257
                        return 0;
1258
                }
1259
                list = list->next;
1260
        }
1261
        spin_unlock(&dcache_lock);
1262
        return 1;
1263
}
1264
 
1265
static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1266
{
1267
        struct inode *inode = dentry->d_inode;
1268
 
1269
        dir->i_size -= BOGO_DIRENT_SIZE;
1270
        inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1271
        inode->i_nlink--;
1272
        dput(dentry);   /* Undo the count from "create" - this does all the work */
1273
        return 0;
1274
}
1275
 
1276
static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1277
{
1278
        if (!shmem_empty(dentry))
1279
                return -ENOTEMPTY;
1280
 
1281
        dir->i_nlink--;
1282
        return shmem_unlink(dir, dentry);
1283
}
1284
 
1285
/*
1286
 * The VFS layer already does all the dentry stuff for rename,
1287
 * we just have to decrement the usage count for the target if
1288
 * it exists so that the VFS layer correctly free's it when it
1289
 * gets overwritten.
1290
 */
1291
static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1292
{
1293
        struct inode *inode = old_dentry->d_inode;
1294
        int they_are_dirs = S_ISDIR(inode->i_mode);
1295
 
1296
        if (!shmem_empty(new_dentry))
1297
                return -ENOTEMPTY;
1298
 
1299
        if (new_dentry->d_inode) {
1300
                (void) shmem_unlink(new_dir, new_dentry);
1301
                if (they_are_dirs)
1302
                        old_dir->i_nlink--;
1303
        } else if (they_are_dirs) {
1304
                old_dir->i_nlink--;
1305
                new_dir->i_nlink++;
1306
        }
1307
 
1308
        old_dir->i_size -= BOGO_DIRENT_SIZE;
1309
        new_dir->i_size += BOGO_DIRENT_SIZE;
1310
        old_dir->i_ctime = old_dir->i_mtime =
1311
        new_dir->i_ctime = new_dir->i_mtime =
1312
        inode->i_ctime = CURRENT_TIME;
1313
        return 0;
1314
}
1315
 
1316
static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1317
{
1318
        int error;
1319
        int len;
1320
        struct inode *inode;
1321
        struct page *page = NULL;
1322
        char *kaddr;
1323
        struct shmem_inode_info *info;
1324
 
1325
        len = strlen(symname) + 1;
1326
        if (len > PAGE_CACHE_SIZE)
1327
                return -ENAMETOOLONG;
1328
 
1329
        inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1330
        if (!inode)
1331
                return -ENOSPC;
1332
 
1333
        info = SHMEM_I(inode);
1334
        inode->i_size = len-1;
1335
        if (len <= sizeof(struct shmem_inode_info)) {
1336
                /* do it inline */
1337
                memcpy(info, symname, len);
1338
                inode->i_op = &shmem_symlink_inline_operations;
1339
        } else {
1340
                error = shmem_getpage(inode, 0, &page, SGP_WRITE);
1341
                if (error) {
1342
                        iput(inode);
1343
                        return error;
1344
                }
1345
                inode->i_op = &shmem_symlink_inode_operations;
1346
                spin_lock(&shmem_ilock);
1347
                list_add_tail(&info->list, &shmem_inodes);
1348
                spin_unlock(&shmem_ilock);
1349
                kaddr = kmap(page);
1350
                memcpy(kaddr, symname, len);
1351
                kunmap(page);
1352
                SetPageDirty(page);
1353
                page_cache_release(page);
1354
        }
1355
        if (dir->i_mode & S_ISGID)
1356
                inode->i_gid = dir->i_gid;
1357
        dir->i_size += BOGO_DIRENT_SIZE;
1358
        dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1359
        d_instantiate(dentry, inode);
1360
        dget(dentry);
1361
        return 0;
1362
}
1363
 
1364
static int shmem_readlink_inline(struct dentry *dentry, char *buffer, int buflen)
1365
{
1366
        return vfs_readlink(dentry, buffer, buflen, (const char *)SHMEM_I(dentry->d_inode));
1367
}
1368
 
1369
static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1370
{
1371
        return vfs_follow_link(nd, (const char *)SHMEM_I(dentry->d_inode));
1372
}
1373
 
1374
static int shmem_readlink(struct dentry *dentry, char *buffer, int buflen)
1375
{
1376
        struct page *page = NULL;
1377
        int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ);
1378
        if (res)
1379
                return res;
1380
        res = vfs_readlink(dentry, buffer, buflen, kmap(page));
1381
        kunmap(page);
1382
        mark_page_accessed(page);
1383
        page_cache_release(page);
1384
        return res;
1385
}
1386
 
1387
static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1388
{
1389
        struct page *page = NULL;
1390
        int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ);
1391
        if (res)
1392
                return res;
1393
        res = vfs_follow_link(nd, kmap(page));
1394
        kunmap(page);
1395
        mark_page_accessed(page);
1396
        page_cache_release(page);
1397
        return res;
1398
}
1399
 
1400
static struct inode_operations shmem_symlink_inline_operations = {
1401
        readlink:       shmem_readlink_inline,
1402
        follow_link:    shmem_follow_link_inline,
1403
};
1404
 
1405
static struct inode_operations shmem_symlink_inode_operations = {
1406
        truncate:       shmem_truncate,
1407
        readlink:       shmem_readlink,
1408
        follow_link:    shmem_follow_link,
1409
};
1410
 
1411
static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
1412
{
1413
        char *this_char, *value, *rest;
1414
 
1415
        while ((this_char = strsep(&options, ",")) != NULL) {
1416
                if (!*this_char)
1417
                        continue;
1418
                if ((value = strchr(this_char,'=')) != NULL) {
1419
                        *value++ = 0;
1420
                } else {
1421
                        printk(KERN_ERR
1422
                            "tmpfs: No value for mount option '%s'\n",
1423
                            this_char);
1424
                        return 1;
1425
                }
1426
 
1427
                if (!strcmp(this_char,"size")) {
1428
                        unsigned long long size;
1429
                        size = memparse(value,&rest);
1430
                        if (*rest == '%') {
1431
                                struct sysinfo si;
1432
                                si_meminfo(&si);
1433
                                size <<= PAGE_SHIFT;
1434
                                size *= si.totalram;
1435
                                do_div(size, 100);
1436
                                rest++;
1437
                        }
1438
                        if (*rest)
1439
                                goto bad_val;
1440
                        *blocks = size >> PAGE_CACHE_SHIFT;
1441
                } else if (!strcmp(this_char,"nr_blocks")) {
1442
                        *blocks = memparse(value,&rest);
1443
                        if (*rest)
1444
                                goto bad_val;
1445
                } else if (!strcmp(this_char,"nr_inodes")) {
1446
                        *inodes = memparse(value,&rest);
1447
                        if (*rest)
1448
                                goto bad_val;
1449
                } else if (!strcmp(this_char,"mode")) {
1450
                        if (!mode)
1451
                                continue;
1452
                        *mode = simple_strtoul(value,&rest,8);
1453
                        if (*rest)
1454
                                goto bad_val;
1455
                } else if (!strcmp(this_char,"uid")) {
1456
                        if (!uid)
1457
                                continue;
1458
                        *uid = simple_strtoul(value,&rest,0);
1459
                        if (*rest)
1460
                                goto bad_val;
1461
                } else if (!strcmp(this_char,"gid")) {
1462
                        if (!gid)
1463
                                continue;
1464
                        *gid = simple_strtoul(value,&rest,0);
1465
                        if (*rest)
1466
                                goto bad_val;
1467
                } else {
1468
                        printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1469
                               this_char);
1470
                        return 1;
1471
                }
1472
        }
1473
        return 0;
1474
 
1475
bad_val:
1476
        printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1477
               value, this_char);
1478
        return 1;
1479
}
1480
 
1481
static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
1482
{
1483
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1484
        unsigned long max_blocks = sbinfo->max_blocks;
1485
        unsigned long max_inodes = sbinfo->max_inodes;
1486
 
1487
        if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, &max_inodes))
1488
                return -EINVAL;
1489
        return shmem_set_size(sbinfo, max_blocks, max_inodes);
1490
}
1491
 
1492
static int shmem_sync_file(struct file *file, struct dentry *dentry, int datasync)
1493
{
1494
        return 0;
1495
}
1496
#endif
1497
 
1498
static struct super_block *shmem_read_super(struct super_block *sb, void *data, int silent)
1499
{
1500
        struct inode *inode;
1501
        struct dentry *root;
1502
        unsigned long blocks, inodes;
1503
        int mode   = S_IRWXUGO | S_ISVTX;
1504
        uid_t uid = current->fsuid;
1505
        gid_t gid = current->fsgid;
1506
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1507
        struct sysinfo si;
1508
 
1509
        /*
1510
         * Per default we only allow half of the physical ram per
1511
         * tmpfs instance
1512
         */
1513
        si_meminfo(&si);
1514
        blocks = inodes = si.totalram / 2;
1515
 
1516
#ifdef CONFIG_TMPFS
1517
        if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, &inodes))
1518
                return NULL;
1519
#endif
1520
 
1521
        spin_lock_init(&sbinfo->stat_lock);
1522
        sbinfo->max_blocks = blocks;
1523
        sbinfo->free_blocks = blocks;
1524
        sbinfo->max_inodes = inodes;
1525
        sbinfo->free_inodes = inodes;
1526
        sb->s_maxbytes = SHMEM_MAX_BYTES;
1527
        sb->s_blocksize = PAGE_CACHE_SIZE;
1528
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1529
        sb->s_magic = TMPFS_MAGIC;
1530
        sb->s_op = &shmem_ops;
1531
        inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
1532
        if (!inode)
1533
                return NULL;
1534
 
1535
        inode->i_uid = uid;
1536
        inode->i_gid = gid;
1537
        root = d_alloc_root(inode);
1538
        if (!root) {
1539
                iput(inode);
1540
                return NULL;
1541
        }
1542
        sb->s_root = root;
1543
        return sb;
1544
}
1545
 
1546
static struct address_space_operations shmem_aops = {
1547
        removepage:     shmem_removepage,
1548
        writepage:      shmem_writepage,
1549
#ifdef CONFIG_TMPFS
1550
        readpage:       shmem_readpage,
1551
        prepare_write:  shmem_prepare_write,
1552
        commit_write:   shmem_commit_write,
1553
#endif
1554
};
1555
 
1556
static struct file_operations shmem_file_operations = {
1557
        mmap:           shmem_mmap,
1558
#ifdef CONFIG_TMPFS
1559
        read:           shmem_file_read,
1560
        write:          shmem_file_write,
1561
        fsync:          shmem_sync_file,
1562
#endif
1563
};
1564
 
1565
static struct inode_operations shmem_inode_operations = {
1566
        truncate:       shmem_truncate,
1567
        setattr:        shmem_notify_change,
1568
};
1569
 
1570
static struct inode_operations shmem_dir_inode_operations = {
1571
#ifdef CONFIG_TMPFS
1572
        create:         shmem_create,
1573
        lookup:         shmem_lookup,
1574
        link:           shmem_link,
1575
        unlink:         shmem_unlink,
1576
        symlink:        shmem_symlink,
1577
        mkdir:          shmem_mkdir,
1578
        rmdir:          shmem_rmdir,
1579
        mknod:          shmem_mknod,
1580
        rename:         shmem_rename,
1581
#endif
1582
};
1583
 
1584
static struct super_operations shmem_ops = {
1585
#ifdef CONFIG_TMPFS
1586
        statfs:         shmem_statfs,
1587
        remount_fs:     shmem_remount_fs,
1588
#endif
1589
        delete_inode:   shmem_delete_inode,
1590
        put_inode:      force_delete,
1591
};
1592
 
1593
static struct vm_operations_struct shmem_vm_ops = {
1594
        nopage:         shmem_nopage,
1595
};
1596
 
1597
#ifdef CONFIG_TMPFS
1598
/* type "shm" will be tagged obsolete in 2.5 */
1599
static DECLARE_FSTYPE(shmem_fs_type, "shm", shmem_read_super, FS_LITTER);
1600
static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER);
1601
#else
1602
static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER|FS_NOMOUNT);
1603
#endif
1604
static struct vfsmount *shm_mnt;
1605
 
1606
static int __init init_tmpfs(void)
1607
{
1608
        int error;
1609
 
1610
        error = register_filesystem(&tmpfs_fs_type);
1611
        if (error) {
1612
                printk(KERN_ERR "Could not register tmpfs\n");
1613
                goto out3;
1614
        }
1615
#ifdef CONFIG_TMPFS
1616
        error = register_filesystem(&shmem_fs_type);
1617
        if (error) {
1618
                printk(KERN_ERR "Could not register shm fs\n");
1619
                goto out2;
1620
        }
1621
        devfs_mk_dir(NULL, "shm", NULL);
1622
#endif
1623
        shm_mnt = kern_mount(&tmpfs_fs_type);
1624
        if (IS_ERR(shm_mnt)) {
1625
                error = PTR_ERR(shm_mnt);
1626
                printk(KERN_ERR "Could not kern_mount tmpfs\n");
1627
                goto out1;
1628
        }
1629
 
1630
        /* The internal instance should not do size checking */
1631
        shmem_set_size(SHMEM_SB(shm_mnt->mnt_sb), ULONG_MAX, ULONG_MAX);
1632
        return 0;
1633
 
1634
out1:
1635
#ifdef CONFIG_TMPFS
1636
        unregister_filesystem(&shmem_fs_type);
1637
out2:
1638
#endif
1639
        unregister_filesystem(&tmpfs_fs_type);
1640
out3:
1641
        shm_mnt = ERR_PTR(error);
1642
        return error;
1643
}
1644
module_init(init_tmpfs)
1645
 
1646
/*
1647
 * shmem_file_setup - get an unlinked file living in tmpfs
1648
 *
1649
 * @name: name for dentry (to be seen in /proc/<pid>/maps
1650
 * @size: size to be set for the file
1651
 *
1652
 */
1653
struct file *shmem_file_setup(char *name, loff_t size)
1654
{
1655
        int error;
1656
        struct file *file;
1657
        struct inode *inode;
1658
        struct dentry *dentry, *root;
1659
        struct qstr this;
1660
        int vm_enough_memory(long pages);
1661
 
1662
        if (IS_ERR(shm_mnt))
1663
                return (void *)shm_mnt;
1664
 
1665
        if (size > SHMEM_MAX_BYTES)
1666
                return ERR_PTR(-EINVAL);
1667
 
1668
        if (!vm_enough_memory(VM_ACCT(size)))
1669
                return ERR_PTR(-ENOMEM);
1670
 
1671
        this.name = name;
1672
        this.len = strlen(name);
1673
        this.hash = 0; /* will go */
1674
        root = shm_mnt->mnt_root;
1675
        dentry = d_alloc(root, &this);
1676
        if (!dentry)
1677
                return ERR_PTR(-ENOMEM);
1678
 
1679
        error = -ENFILE;
1680
        file = get_empty_filp();
1681
        if (!file)
1682
                goto put_dentry;
1683
 
1684
        error = -ENOSPC;
1685
        inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
1686
        if (!inode)
1687
                goto close_file;
1688
 
1689
        d_instantiate(dentry, inode);
1690
        inode->i_size = size;
1691
        inode->i_nlink = 0;      /* It is unlinked */
1692
        file->f_vfsmnt = mntget(shm_mnt);
1693
        file->f_dentry = dentry;
1694
        file->f_op = &shmem_file_operations;
1695
        file->f_mode = FMODE_WRITE | FMODE_READ;
1696
        return file;
1697
 
1698
close_file:
1699
        put_filp(file);
1700
put_dentry:
1701
        dput(dentry);
1702
        return ERR_PTR(error);
1703
}
1704
 
1705
/*
1706
 * shmem_zero_setup - setup a shared anonymous mapping
1707
 *
1708
 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
1709
 */
1710
int shmem_zero_setup(struct vm_area_struct *vma)
1711
{
1712
        struct file *file;
1713
        loff_t size = vma->vm_end - vma->vm_start;
1714
 
1715
        file = shmem_file_setup("dev/zero", size);
1716
        if (IS_ERR(file))
1717
                return PTR_ERR(file);
1718
 
1719
        if (vma->vm_file)
1720
                fput(vma->vm_file);
1721
        vma->vm_file = file;
1722
        vma->vm_ops = &shmem_vm_ops;
1723
        return 0;
1724
}
1725
 
1726
EXPORT_SYMBOL(shmem_file_setup);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.