OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [mm/] [filemap.c] - Blame information for rev 1766

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *      linux/mm/filemap.c
3
 *
4
 * Copyright (C) 1994-1999  Linus Torvalds
5
 */
6
 
7
/*
8
 * This file handles the generic file mmap semantics used by
9
 * most "normal" filesystems (but you don't /have/ to use this:
10
 * the NFS filesystem used to do this differently, for example)
11
 */
12
#include <linux/module.h>
13
#include <linux/slab.h>
14
#include <linux/shm.h>
15
#include <linux/mman.h>
16
#include <linux/locks.h>
17
#include <linux/pagemap.h>
18
#include <linux/swap.h>
19
#include <linux/smp_lock.h>
20
#include <linux/blkdev.h>
21
#include <linux/file.h>
22
#include <linux/swapctl.h>
23
#include <linux/init.h>
24
#include <linux/mm.h>
25
#include <linux/iobuf.h>
26
 
27
#include <asm/pgalloc.h>
28
#include <asm/uaccess.h>
29
#include <asm/mman.h>
30
 
31
#include <linux/highmem.h>
32
 
33
/*
34
 * Shared mappings implemented 30.11.1994. It's not fully working yet,
35
 * though.
36
 *
37
 * Shared mappings now work. 15.8.1995  Bruno.
38
 *
39
 * finished 'unifying' the page and buffer cache and SMP-threaded the
40
 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
41
 *
42
 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
43
 */
44
 
45
unsigned long page_cache_size;
46
unsigned int page_hash_bits;
47
struct page **page_hash_table;
48
 
49
int vm_max_readahead = 31;
50
int vm_min_readahead = 3;
51
EXPORT_SYMBOL(vm_max_readahead);
52
EXPORT_SYMBOL(vm_min_readahead);
53
 
54
 
55
spinlock_cacheline_t pagecache_lock_cacheline  = {SPIN_LOCK_UNLOCKED};
56
/*
57
 * NOTE: to avoid deadlocking you must never acquire the pagemap_lru_lock
58
 *      with the pagecache_lock held.
59
 *
60
 * Ordering:
61
 *      swap_lock ->
62
 *              pagemap_lru_lock ->
63
 *                      pagecache_lock
64
 */
65
spinlock_cacheline_t pagemap_lru_lock_cacheline = {SPIN_LOCK_UNLOCKED};
66
 
67
#define CLUSTER_PAGES           (1 << page_cluster)
68
#define CLUSTER_OFFSET(x)       (((x) >> page_cluster) << page_cluster)
69
 
70
static void FASTCALL(add_page_to_hash_queue(struct page * page, struct page **p));
71
static void add_page_to_hash_queue(struct page * page, struct page **p)
72
{
73
        struct page *next = *p;
74
 
75
        *p = page;
76
        page->next_hash = next;
77
        page->pprev_hash = p;
78
        if (next)
79
                next->pprev_hash = &page->next_hash;
80
        if (page->buffers)
81
                PAGE_BUG(page);
82
        inc_nr_cache_pages(page);
83
}
84
 
85
static inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page)
86
{
87
        struct list_head *head = &mapping->clean_pages;
88
 
89
        mapping->nrpages++;
90
        list_add(&page->list, head);
91
        page->mapping = mapping;
92
}
93
 
94
static inline void remove_page_from_inode_queue(struct page * page)
95
{
96
        struct address_space * mapping = page->mapping;
97
 
98
        if (mapping->a_ops->removepage)
99
                mapping->a_ops->removepage(page);
100
 
101
        list_del(&page->list);
102
        page->mapping = NULL;
103
        wmb();
104
        mapping->nrpages--;
105
        if (!mapping->nrpages)
106
                refile_inode(mapping->host);
107
}
108
 
109
static inline void remove_page_from_hash_queue(struct page * page)
110
{
111
        struct page *next = page->next_hash;
112
        struct page **pprev = page->pprev_hash;
113
 
114
        if (next)
115
                next->pprev_hash = pprev;
116
        *pprev = next;
117
        page->pprev_hash = NULL;
118
        dec_nr_cache_pages(page);
119
}
120
 
121
/*
122
 * Remove a page from the page cache and free it. Caller has to make
123
 * sure the page is locked and that nobody else uses it - or that usage
124
 * is safe.
125
 */
126
void __remove_inode_page(struct page *page)
127
{
128
        remove_page_from_inode_queue(page);
129
        remove_page_from_hash_queue(page);
130
}
131
 
132
void remove_inode_page(struct page *page)
133
{
134
        if (!PageLocked(page))
135
                PAGE_BUG(page);
136
 
137
        spin_lock(&pagecache_lock);
138
        __remove_inode_page(page);
139
        spin_unlock(&pagecache_lock);
140
}
141
 
142
static inline int sync_page(struct page *page)
143
{
144
        struct address_space *mapping = page->mapping;
145
 
146
        if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
147
                return mapping->a_ops->sync_page(page);
148
        return 0;
149
}
150
 
151
/*
152
 * Add a page to the dirty page list.
153
 */
154
void set_page_dirty(struct page *page)
155
{
156
        if (!test_and_set_bit(PG_dirty, &page->flags)) {
157
                struct address_space *mapping = page->mapping;
158
 
159
                if (mapping) {
160
                        spin_lock(&pagecache_lock);
161
                        mapping = page->mapping;
162
                        if (mapping) {  /* may have been truncated */
163
                                list_del(&page->list);
164
                                list_add(&page->list, &mapping->dirty_pages);
165
                        }
166
                        spin_unlock(&pagecache_lock);
167
 
168
                        if (mapping && mapping->host)
169
                                mark_inode_dirty_pages(mapping->host);
170
                        if (block_dump)
171
                                printk(KERN_DEBUG "%s: dirtied page\n", current->comm);
172
                }
173
        }
174
}
175
 
176
/**
177
 * invalidate_inode_pages - Invalidate all the unlocked pages of one inode
178
 * @inode: the inode which pages we want to invalidate
179
 *
180
 * This function only removes the unlocked pages, if you want to
181
 * remove all the pages of one inode, you must call truncate_inode_pages.
182
 */
183
 
184
void invalidate_inode_pages(struct inode * inode)
185
{
186
        struct list_head *head, *curr;
187
        struct page * page;
188
 
189
        head = &inode->i_mapping->clean_pages;
190
 
191
        spin_lock(&pagemap_lru_lock);
192
        spin_lock(&pagecache_lock);
193
        curr = head->next;
194
 
195
        while (curr != head) {
196
                page = list_entry(curr, struct page, list);
197
                curr = curr->next;
198
 
199
                /* We cannot invalidate something in dirty.. */
200
                if (PageDirty(page))
201
                        continue;
202
 
203
                /* ..or locked */
204
                if (TryLockPage(page))
205
                        continue;
206
 
207
                if (page->buffers && !try_to_free_buffers(page, 0))
208
                        goto unlock;
209
 
210
                if (page_count(page) != 1)
211
                        goto unlock;
212
 
213
                __lru_cache_del(page);
214
                __remove_inode_page(page);
215
                UnlockPage(page);
216
                page_cache_release(page);
217
                continue;
218
unlock:
219
                UnlockPage(page);
220
                continue;
221
        }
222
 
223
        spin_unlock(&pagecache_lock);
224
        spin_unlock(&pagemap_lru_lock);
225
}
226
 
227
static int do_flushpage(struct page *page, unsigned long offset)
228
{
229
        int (*flushpage) (struct page *, unsigned long);
230
        flushpage = page->mapping->a_ops->flushpage;
231
        if (flushpage)
232
                return (*flushpage)(page, offset);
233
        return block_flushpage(page, offset);
234
}
235
 
236
static inline void truncate_partial_page(struct page *page, unsigned partial)
237
{
238
        memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
239
        if (page->buffers)
240
                do_flushpage(page, partial);
241
}
242
 
243
static void truncate_complete_page(struct page *page)
244
{
245
        /* Leave it on the LRU if it gets converted into anonymous buffers */
246
        if (!page->buffers || do_flushpage(page, 0))
247
                lru_cache_del(page);
248
 
249
        /*
250
         * We remove the page from the page cache _after_ we have
251
         * destroyed all buffer-cache references to it. Otherwise some
252
         * other process might think this inode page is not in the
253
         * page cache and creates a buffer-cache alias to it causing
254
         * all sorts of fun problems ...
255
         */
256
        ClearPageDirty(page);
257
        ClearPageUptodate(page);
258
        remove_inode_page(page);
259
        page_cache_release(page);
260
}
261
 
262
static int FASTCALL(truncate_list_pages(struct list_head *, unsigned long, unsigned *));
263
static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial)
264
{
265
        struct list_head *curr;
266
        struct page * page;
267
        int unlocked = 0;
268
 
269
 restart:
270
        curr = head->prev;
271
        while (curr != head) {
272
                unsigned long offset;
273
 
274
                page = list_entry(curr, struct page, list);
275
                offset = page->index;
276
 
277
                /* Is one of the pages to truncate? */
278
                if ((offset >= start) || (*partial && (offset + 1) == start)) {
279
                        int failed;
280
 
281
                        page_cache_get(page);
282
                        failed = TryLockPage(page);
283
 
284
                        list_del(head);
285
                        if (!failed)
286
                                /* Restart after this page */
287
                                list_add_tail(head, curr);
288
                        else
289
                                /* Restart on this page */
290
                                list_add(head, curr);
291
 
292
                        spin_unlock(&pagecache_lock);
293
                        unlocked = 1;
294
 
295
                        if (!failed) {
296
                                if (*partial && (offset + 1) == start) {
297
                                        truncate_partial_page(page, *partial);
298
                                        *partial = 0;
299
                                } else
300
                                        truncate_complete_page(page);
301
 
302
                                UnlockPage(page);
303
                        } else
304
                                wait_on_page(page);
305
 
306
                        page_cache_release(page);
307
 
308
                        if (current->need_resched) {
309
                                __set_current_state(TASK_RUNNING);
310
                                schedule();
311
                        }
312
 
313
                        spin_lock(&pagecache_lock);
314
                        goto restart;
315
                }
316
                curr = curr->prev;
317
        }
318
        return unlocked;
319
}
320
 
321
 
322
/**
323
 * truncate_inode_pages - truncate *all* the pages from an offset
324
 * @mapping: mapping to truncate
325
 * @lstart: offset from with to truncate
326
 *
327
 * Truncate the page cache at a set offset, removing the pages
328
 * that are beyond that offset (and zeroing out partial pages).
329
 * If any page is locked we wait for it to become unlocked.
330
 */
331
void truncate_inode_pages(struct address_space * mapping, loff_t lstart)
332
{
333
        unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
334
        unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
335
        int unlocked;
336
 
337
        spin_lock(&pagecache_lock);
338
        do {
339
                unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial);
340
                unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial);
341
                unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial);
342
        } while (unlocked);
343
        /* Traversed all three lists without dropping the lock */
344
        spin_unlock(&pagecache_lock);
345
}
346
 
347
static inline int invalidate_this_page2(struct page * page,
348
                                        struct list_head * curr,
349
                                        struct list_head * head)
350
{
351
        int unlocked = 1;
352
 
353
        /*
354
         * The page is locked and we hold the pagecache_lock as well
355
         * so both page_count(page) and page->buffers stays constant here.
356
         */
357
        if (page_count(page) == 1 + !!page->buffers) {
358
                /* Restart after this page */
359
                list_del(head);
360
                list_add_tail(head, curr);
361
 
362
                page_cache_get(page);
363
                spin_unlock(&pagecache_lock);
364
                truncate_complete_page(page);
365
        } else {
366
                if (page->buffers) {
367
                        /* Restart after this page */
368
                        list_del(head);
369
                        list_add_tail(head, curr);
370
 
371
                        page_cache_get(page);
372
                        spin_unlock(&pagecache_lock);
373
                        block_invalidate_page(page);
374
                } else
375
                        unlocked = 0;
376
 
377
                ClearPageDirty(page);
378
                ClearPageUptodate(page);
379
        }
380
 
381
        return unlocked;
382
}
383
 
384
static int FASTCALL(invalidate_list_pages2(struct list_head *));
385
static int invalidate_list_pages2(struct list_head *head)
386
{
387
        struct list_head *curr;
388
        struct page * page;
389
        int unlocked = 0;
390
 
391
 restart:
392
        curr = head->prev;
393
        while (curr != head) {
394
                page = list_entry(curr, struct page, list);
395
 
396
                if (!TryLockPage(page)) {
397
                        int __unlocked;
398
 
399
                        __unlocked = invalidate_this_page2(page, curr, head);
400
                        UnlockPage(page);
401
                        unlocked |= __unlocked;
402
                        if (!__unlocked) {
403
                                curr = curr->prev;
404
                                continue;
405
                        }
406
                } else {
407
                        /* Restart on this page */
408
                        list_del(head);
409
                        list_add(head, curr);
410
 
411
                        page_cache_get(page);
412
                        spin_unlock(&pagecache_lock);
413
                        unlocked = 1;
414
                        wait_on_page(page);
415
                }
416
 
417
                page_cache_release(page);
418
                if (current->need_resched) {
419
                        __set_current_state(TASK_RUNNING);
420
                        schedule();
421
                }
422
 
423
                spin_lock(&pagecache_lock);
424
                goto restart;
425
        }
426
        return unlocked;
427
}
428
 
429
/**
430
 * invalidate_inode_pages2 - Clear all the dirty bits around if it can't
431
 * free the pages because they're mapped.
432
 * @mapping: the address_space which pages we want to invalidate
433
 */
434
void invalidate_inode_pages2(struct address_space * mapping)
435
{
436
        int unlocked;
437
 
438
        spin_lock(&pagecache_lock);
439
        do {
440
                unlocked = invalidate_list_pages2(&mapping->clean_pages);
441
                unlocked |= invalidate_list_pages2(&mapping->dirty_pages);
442
                unlocked |= invalidate_list_pages2(&mapping->locked_pages);
443
        } while (unlocked);
444
        spin_unlock(&pagecache_lock);
445
}
446
 
447
static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page)
448
{
449
        goto inside;
450
 
451
        for (;;) {
452
                page = page->next_hash;
453
inside:
454
                if (!page)
455
                        goto not_found;
456
                if (page->mapping != mapping)
457
                        continue;
458
                if (page->index == offset)
459
                        break;
460
        }
461
 
462
not_found:
463
        return page;
464
}
465
 
466
static int do_buffer_fdatasync(struct list_head *head, unsigned long start, unsigned long end, int (*fn)(struct page *))
467
{
468
        struct list_head *curr;
469
        struct page *page;
470
        int retval = 0;
471
 
472
        spin_lock(&pagecache_lock);
473
        curr = head->next;
474
        while (curr != head) {
475
                page = list_entry(curr, struct page, list);
476
                curr = curr->next;
477
                if (!page->buffers)
478
                        continue;
479
                if (page->index >= end)
480
                        continue;
481
                if (page->index < start)
482
                        continue;
483
 
484
                page_cache_get(page);
485
                spin_unlock(&pagecache_lock);
486
                lock_page(page);
487
 
488
                /* The buffers could have been free'd while we waited for the page lock */
489
                if (page->buffers)
490
                        retval |= fn(page);
491
 
492
                UnlockPage(page);
493
                spin_lock(&pagecache_lock);
494
                curr = page->list.next;
495
                page_cache_release(page);
496
        }
497
        spin_unlock(&pagecache_lock);
498
 
499
        return retval;
500
}
501
 
502
/*
503
 * Two-stage data sync: first start the IO, then go back and
504
 * collect the information..
505
 */
506
int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsigned long end_idx)
507
{
508
        int retval;
509
 
510
        /* writeout dirty buffers on pages from both clean and dirty lists */
511
        retval = do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, writeout_one_page);
512
        retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, writeout_one_page);
513
        retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, writeout_one_page);
514
 
515
        /* now wait for locked buffers on pages from both clean and dirty lists */
516
        retval |= do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, waitfor_one_page);
517
        retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, waitfor_one_page);
518
        retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, waitfor_one_page);
519
 
520
        return retval;
521
}
522
 
523
/*
524
 * In-memory filesystems have to fail their
525
 * writepage function - and this has to be
526
 * worked around in the VM layer..
527
 *
528
 * We
529
 *  - mark the page dirty again (but do NOT
530
 *    add it back to the inode dirty list, as
531
 *    that would livelock in fdatasync)
532
 *  - activate the page so that the page stealer
533
 *    doesn't try to write it out over and over
534
 *    again.
535
 */
536
int fail_writepage(struct page *page)
537
{
538
        /* Only activate on memory-pressure, not fsync.. */
539
        if (PageLaunder(page)) {
540
                activate_page(page);
541
                SetPageReferenced(page);
542
        }
543
 
544
        /* Set the page dirty again, unlock */
545
        SetPageDirty(page);
546
        UnlockPage(page);
547
        return 0;
548
}
549
 
550
EXPORT_SYMBOL(fail_writepage);
551
 
552
/**
553
 *      filemap_fdatawrite - walk the list of dirty pages of the given address space
554
 *      and writepage() each unlocked page (does not wait on locked pages).
555
 *
556
 *      @mapping: address space structure to write
557
 *
558
 */
559
int filemap_fdatawrite(struct address_space * mapping)
560
{
561
        int ret = 0;
562
        int (*writepage)(struct page *) = mapping->a_ops->writepage;
563
 
564
        spin_lock(&pagecache_lock);
565
 
566
        while (!list_empty(&mapping->dirty_pages)) {
567
                struct page *page = list_entry(mapping->dirty_pages.prev, struct page, list);
568
 
569
                list_del(&page->list);
570
                list_add(&page->list, &mapping->locked_pages);
571
 
572
                if (!PageDirty(page))
573
                        continue;
574
 
575
                page_cache_get(page);
576
                spin_unlock(&pagecache_lock);
577
 
578
                if (!TryLockPage(page)) {
579
                        if (PageDirty(page)) {
580
                                int err;
581
                                ClearPageDirty(page);
582
                                err = writepage(page);
583
                                if (err && !ret)
584
                                        ret = err;
585
                        } else
586
                                UnlockPage(page);
587
                }
588
                page_cache_release(page);
589
                spin_lock(&pagecache_lock);
590
        }
591
        spin_unlock(&pagecache_lock);
592
        return ret;
593
}
594
 
595
/**
596
 *      filemap_fdatasync - walk the list of dirty pages of the given address space
597
 *      and writepage() all of them.
598
 *
599
 *      @mapping: address space structure to write
600
 *
601
 */
602
int filemap_fdatasync(struct address_space * mapping)
603
{
604
        int ret = 0;
605
        int (*writepage)(struct page *) = mapping->a_ops->writepage;
606
 
607
        spin_lock(&pagecache_lock);
608
 
609
        while (!list_empty(&mapping->dirty_pages)) {
610
                struct page *page = list_entry(mapping->dirty_pages.prev, struct page, list);
611
 
612
                list_del(&page->list);
613
                list_add(&page->list, &mapping->locked_pages);
614
 
615
                if (!PageDirty(page))
616
                        continue;
617
 
618
                page_cache_get(page);
619
                spin_unlock(&pagecache_lock);
620
 
621
                lock_page(page);
622
 
623
                if (PageDirty(page)) {
624
                        int err;
625
                        ClearPageDirty(page);
626
                        err = writepage(page);
627
                        if (err && !ret)
628
                                ret = err;
629
                } else
630
                        UnlockPage(page);
631
 
632
                page_cache_release(page);
633
                spin_lock(&pagecache_lock);
634
        }
635
        spin_unlock(&pagecache_lock);
636
        return ret;
637
}
638
 
639
/**
640
 *      filemap_fdatawait - walk the list of locked pages of the given address space
641
 *      and wait for all of them.
642
 *
643
 *      @mapping: address space structure to wait for
644
 *
645
 */
646
int filemap_fdatawait(struct address_space * mapping)
647
{
648
        int ret = 0;
649
 
650
        spin_lock(&pagecache_lock);
651
 
652
        while (!list_empty(&mapping->locked_pages)) {
653
                struct page *page = list_entry(mapping->locked_pages.next, struct page, list);
654
 
655
                list_del(&page->list);
656
                list_add(&page->list, &mapping->clean_pages);
657
 
658
                if (!PageLocked(page))
659
                        continue;
660
 
661
                page_cache_get(page);
662
                spin_unlock(&pagecache_lock);
663
 
664
                ___wait_on_page(page);
665
                if (PageError(page))
666
                        ret = -EIO;
667
 
668
                page_cache_release(page);
669
                spin_lock(&pagecache_lock);
670
        }
671
        spin_unlock(&pagecache_lock);
672
        return ret;
673
}
674
 
675
/*
676
 * Add a page to the inode page cache.
677
 *
678
 * The caller must have locked the page and
679
 * set all the page flags correctly..
680
 */
681
void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index)
682
{
683
        if (!PageLocked(page))
684
                BUG();
685
 
686
        page->index = index;
687
        page_cache_get(page);
688
        spin_lock(&pagecache_lock);
689
        add_page_to_inode_queue(mapping, page);
690
        add_page_to_hash_queue(page, page_hash(mapping, index));
691
        spin_unlock(&pagecache_lock);
692
 
693
        lru_cache_add(page);
694
}
695
 
696
/*
697
 * This adds a page to the page cache, starting out as locked,
698
 * owned by us, but unreferenced, not uptodate and with no errors.
699
 */
700
static inline void __add_to_page_cache(struct page * page,
701
        struct address_space *mapping, unsigned long offset,
702
        struct page **hash)
703
{
704
        /*
705
         * Yes this is inefficient, however it is needed.  The problem
706
         * is that we could be adding a page to the swap cache while
707
         * another CPU is also modifying page->flags, so the updates
708
         * really do need to be atomic.  -- Rik
709
         */
710
        ClearPageUptodate(page);
711
        ClearPageError(page);
712
        ClearPageDirty(page);
713
        ClearPageReferenced(page);
714
        ClearPageArch1(page);
715
        ClearPageChecked(page);
716
        LockPage(page);
717
        page_cache_get(page);
718
        page->index = offset;
719
        add_page_to_inode_queue(mapping, page);
720
        add_page_to_hash_queue(page, hash);
721
}
722
 
723
void add_to_page_cache(struct page * page, struct address_space * mapping, unsigned long offset)
724
{
725
        spin_lock(&pagecache_lock);
726
        __add_to_page_cache(page, mapping, offset, page_hash(mapping, offset));
727
        spin_unlock(&pagecache_lock);
728
        lru_cache_add(page);
729
}
730
 
731
int add_to_page_cache_unique(struct page * page,
732
        struct address_space *mapping, unsigned long offset,
733
        struct page **hash)
734
{
735
        int err;
736
        struct page *alias;
737
 
738
        spin_lock(&pagecache_lock);
739
        alias = __find_page_nolock(mapping, offset, *hash);
740
 
741
        err = 1;
742
        if (!alias) {
743
                __add_to_page_cache(page,mapping,offset,hash);
744
                err = 0;
745
        }
746
 
747
        spin_unlock(&pagecache_lock);
748
        if (!err)
749
                lru_cache_add(page);
750
        return err;
751
}
752
 
753
/*
754
 * This adds the requested page to the page cache if it isn't already there,
755
 * and schedules an I/O to read in its contents from disk.
756
 */
757
static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
758
static int page_cache_read(struct file * file, unsigned long offset)
759
{
760
        struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
761
        struct page **hash = page_hash(mapping, offset);
762
        struct page *page;
763
 
764
        spin_lock(&pagecache_lock);
765
        page = __find_page_nolock(mapping, offset, *hash);
766
        spin_unlock(&pagecache_lock);
767
        if (page)
768
                return 0;
769
 
770
        page = page_cache_alloc(mapping);
771
        if (!page)
772
                return -ENOMEM;
773
 
774
        if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
775
                int error = mapping->a_ops->readpage(file, page);
776
                page_cache_release(page);
777
                return error;
778
        }
779
        /*
780
         * We arrive here in the unlikely event that someone
781
         * raced with us and added our page to the cache first.
782
         */
783
        page_cache_release(page);
784
        return 0;
785
}
786
 
787
/*
788
 * Read in an entire cluster at once.  A cluster is usually a 64k-
789
 * aligned block that includes the page requested in "offset."
790
 */
791
static int FASTCALL(read_cluster_nonblocking(struct file * file, unsigned long offset,
792
                                             unsigned long filesize));
793
static int read_cluster_nonblocking(struct file * file, unsigned long offset,
794
        unsigned long filesize)
795
{
796
        unsigned long pages = CLUSTER_PAGES;
797
 
798
        offset = CLUSTER_OFFSET(offset);
799
        while ((pages-- > 0) && (offset < filesize)) {
800
                int error = page_cache_read(file, offset);
801
                if (error < 0)
802
                        return error;
803
                offset ++;
804
        }
805
 
806
        return 0;
807
}
808
 
809
/*
810
 * Knuth recommends primes in approximately golden ratio to the maximum
811
 * integer representable by a machine word for multiplicative hashing.
812
 * Chuck Lever verified the effectiveness of this technique:
813
 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
814
 *
815
 * These primes are chosen to be bit-sparse, that is operations on
816
 * them can use shifts and additions instead of multiplications for
817
 * machines where multiplications are slow.
818
 */
819
#if BITS_PER_LONG == 32
820
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
821
#define GOLDEN_RATIO_PRIME 0x9e370001UL
822
#elif BITS_PER_LONG == 64
823
/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
824
#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
825
#else
826
#error Define GOLDEN_RATIO_PRIME for your wordsize.
827
#endif
828
 
829
/*
830
 * In order to wait for pages to become available there must be
831
 * waitqueues associated with pages. By using a hash table of
832
 * waitqueues where the bucket discipline is to maintain all
833
 * waiters on the same queue and wake all when any of the pages
834
 * become available, and for the woken contexts to check to be
835
 * sure the appropriate page became available, this saves space
836
 * at a cost of "thundering herd" phenomena during rare hash
837
 * collisions.
838
 */
839
static inline wait_queue_head_t *page_waitqueue(struct page *page)
840
{
841
        const zone_t *zone = page_zone(page);
842
        wait_queue_head_t *wait = zone->wait_table;
843
        unsigned long hash = (unsigned long)page;
844
 
845
#if BITS_PER_LONG == 64
846
        /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
847
        unsigned long n = hash;
848
        n <<= 18;
849
        hash -= n;
850
        n <<= 33;
851
        hash -= n;
852
        n <<= 3;
853
        hash += n;
854
        n <<= 3;
855
        hash -= n;
856
        n <<= 4;
857
        hash += n;
858
        n <<= 2;
859
        hash += n;
860
#else
861
        /* On some cpus multiply is faster, on others gcc will do shifts */
862
        hash *= GOLDEN_RATIO_PRIME;
863
#endif
864
        hash >>= zone->wait_table_shift;
865
 
866
        return &wait[hash];
867
}
868
 
869
/*
870
 * This must be called after every submit_bh with end_io
871
 * callbacks that would result into the blkdev layer waking
872
 * up the page after a queue unplug.
873
 */
874
void wakeup_page_waiters(struct page * page)
875
{
876
        wait_queue_head_t * head;
877
 
878
        head = page_waitqueue(page);
879
        if (waitqueue_active(head))
880
                wake_up(head);
881
}
882
 
883
/*
884
 * Wait for a page to get unlocked.
885
 *
886
 * This must be called with the caller "holding" the page,
887
 * ie with increased "page->count" so that the page won't
888
 * go away during the wait..
889
 *
890
 * The waiting strategy is to get on a waitqueue determined
891
 * by hashing. Waiters will then collide, and the newly woken
892
 * task must then determine whether it was woken for the page
893
 * it really wanted, and go back to sleep on the waitqueue if
894
 * that wasn't it. With the waitqueue semantics, it never leaves
895
 * the waitqueue unless it calls, so the loop moves forward one
896
 * iteration every time there is
897
 * (1) a collision
898
 * and
899
 * (2) one of the colliding pages is woken
900
 *
901
 * This is the thundering herd problem, but it is expected to
902
 * be very rare due to the few pages that are actually being
903
 * waited on at any given time and the quality of the hash function.
904
 */
905
void ___wait_on_page(struct page *page)
906
{
907
        wait_queue_head_t *waitqueue = page_waitqueue(page);
908
        struct task_struct *tsk = current;
909
        DECLARE_WAITQUEUE(wait, tsk);
910
 
911
        add_wait_queue(waitqueue, &wait);
912
        do {
913
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
914
                if (!PageLocked(page))
915
                        break;
916
                sync_page(page);
917
                schedule();
918
        } while (PageLocked(page));
919
        __set_task_state(tsk, TASK_RUNNING);
920
        remove_wait_queue(waitqueue, &wait);
921
}
922
 
923
/*
924
 * unlock_page() is the other half of the story just above
925
 * __wait_on_page(). Here a couple of quick checks are done
926
 * and a couple of flags are set on the page, and then all
927
 * of the waiters for all of the pages in the appropriate
928
 * wait queue are woken.
929
 */
930
void unlock_page(struct page *page)
931
{
932
        wait_queue_head_t *waitqueue = page_waitqueue(page);
933
        ClearPageLaunder(page);
934
        smp_mb__before_clear_bit();
935
        if (!test_and_clear_bit(PG_locked, &(page)->flags))
936
                BUG();
937
        smp_mb__after_clear_bit();
938
 
939
        /*
940
         * Although the default semantics of wake_up() are
941
         * to wake all, here the specific function is used
942
         * to make it even more explicit that a number of
943
         * pages are being waited on here.
944
         */
945
        if (waitqueue_active(waitqueue))
946
                wake_up_all(waitqueue);
947
}
948
 
949
/*
950
 * Get a lock on the page, assuming we need to sleep
951
 * to get it..
952
 */
953
static void __lock_page(struct page *page)
954
{
955
        wait_queue_head_t *waitqueue = page_waitqueue(page);
956
        struct task_struct *tsk = current;
957
        DECLARE_WAITQUEUE(wait, tsk);
958
 
959
        add_wait_queue_exclusive(waitqueue, &wait);
960
        for (;;) {
961
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
962
                if (PageLocked(page)) {
963
                        sync_page(page);
964
                        schedule();
965
                }
966
                if (!TryLockPage(page))
967
                        break;
968
        }
969
        __set_task_state(tsk, TASK_RUNNING);
970
        remove_wait_queue(waitqueue, &wait);
971
}
972
 
973
/*
974
 * Get an exclusive lock on the page, optimistically
975
 * assuming it's not locked..
976
 */
977
void lock_page(struct page *page)
978
{
979
        if (TryLockPage(page))
980
                __lock_page(page);
981
}
982
 
983
/*
984
 * a rather lightweight function, finding and getting a reference to a
985
 * hashed page atomically.
986
 */
987
struct page * __find_get_page(struct address_space *mapping,
988
                              unsigned long offset, struct page **hash)
989
{
990
        struct page *page;
991
 
992
        /*
993
         * We scan the hash list read-only. Addition to and removal from
994
         * the hash-list needs a held write-lock.
995
         */
996
        spin_lock(&pagecache_lock);
997
        page = __find_page_nolock(mapping, offset, *hash);
998
        if (page)
999
                page_cache_get(page);
1000
        spin_unlock(&pagecache_lock);
1001
        return page;
1002
}
1003
 
1004
/*
1005
 * Same as above, but trylock it instead of incrementing the count.
1006
 */
1007
struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
1008
{
1009
        struct page *page;
1010
        struct page **hash = page_hash(mapping, offset);
1011
 
1012
        spin_lock(&pagecache_lock);
1013
        page = __find_page_nolock(mapping, offset, *hash);
1014
        if (page) {
1015
                if (TryLockPage(page))
1016
                        page = NULL;
1017
        }
1018
        spin_unlock(&pagecache_lock);
1019
        return page;
1020
}
1021
 
1022
/*
1023
 * Must be called with the pagecache lock held,
1024
 * will return with it held (but it may be dropped
1025
 * during blocking operations..
1026
 */
1027
static struct page * FASTCALL(__find_lock_page_helper(struct address_space *, unsigned long, struct page *));
1028
static struct page * __find_lock_page_helper(struct address_space *mapping,
1029
                                        unsigned long offset, struct page *hash)
1030
{
1031
        struct page *page;
1032
 
1033
        /*
1034
         * We scan the hash list read-only. Addition to and removal from
1035
         * the hash-list needs a held write-lock.
1036
         */
1037
repeat:
1038
        page = __find_page_nolock(mapping, offset, hash);
1039
        if (page) {
1040
                page_cache_get(page);
1041
                if (TryLockPage(page)) {
1042
                        spin_unlock(&pagecache_lock);
1043
                        lock_page(page);
1044
                        spin_lock(&pagecache_lock);
1045
 
1046
                        /* Has the page been re-allocated while we slept? */
1047
                        if (page->mapping != mapping || page->index != offset) {
1048
                                UnlockPage(page);
1049
                                page_cache_release(page);
1050
                                goto repeat;
1051
                        }
1052
                }
1053
        }
1054
        return page;
1055
}
1056
 
1057
/*
1058
 * Same as the above, but lock the page too, verifying that
1059
 * it's still valid once we own it.
1060
 */
1061
struct page * __find_lock_page (struct address_space *mapping,
1062
                                unsigned long offset, struct page **hash)
1063
{
1064
        struct page *page;
1065
 
1066
        spin_lock(&pagecache_lock);
1067
        page = __find_lock_page_helper(mapping, offset, *hash);
1068
        spin_unlock(&pagecache_lock);
1069
        return page;
1070
}
1071
 
1072
/*
1073
 * Same as above, but create the page if required..
1074
 */
1075
struct page * find_or_create_page(struct address_space *mapping, unsigned long index, unsigned int gfp_mask)
1076
{
1077
        struct page *page;
1078
        struct page **hash = page_hash(mapping, index);
1079
 
1080
        spin_lock(&pagecache_lock);
1081
        page = __find_lock_page_helper(mapping, index, *hash);
1082
        spin_unlock(&pagecache_lock);
1083
        if (!page) {
1084
                struct page *newpage = alloc_page(gfp_mask);
1085
                if (newpage) {
1086
                        spin_lock(&pagecache_lock);
1087
                        page = __find_lock_page_helper(mapping, index, *hash);
1088
                        if (likely(!page)) {
1089
                                page = newpage;
1090
                                __add_to_page_cache(page, mapping, index, hash);
1091
                                newpage = NULL;
1092
                        }
1093
                        spin_unlock(&pagecache_lock);
1094
                        if (newpage == NULL)
1095
                                lru_cache_add(page);
1096
                        else
1097
                                page_cache_release(newpage);
1098
                }
1099
        }
1100
        return page;
1101
}
1102
 
1103
/*
1104
 * Same as grab_cache_page, but do not wait if the page is unavailable.
1105
 * This is intended for speculative data generators, where the data can
1106
 * be regenerated if the page couldn't be grabbed.  This routine should
1107
 * be safe to call while holding the lock for another page.
1108
 */
1109
struct page *grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
1110
{
1111
        struct page *page, **hash;
1112
 
1113
        hash = page_hash(mapping, index);
1114
        page = __find_get_page(mapping, index, hash);
1115
 
1116
        if ( page ) {
1117
                if ( !TryLockPage(page) ) {
1118
                        /* Page found and locked */
1119
                        /* This test is overly paranoid, but what the heck... */
1120
                        if ( unlikely(page->mapping != mapping || page->index != index) ) {
1121
                                /* Someone reallocated this page under us. */
1122
                                UnlockPage(page);
1123
                                page_cache_release(page);
1124
                                return NULL;
1125
                        } else {
1126
                                return page;
1127
                        }
1128
                } else {
1129
                        /* Page locked by someone else */
1130
                        page_cache_release(page);
1131
                        return NULL;
1132
                }
1133
        }
1134
 
1135
        page = page_cache_alloc(mapping);
1136
        if ( unlikely(!page) )
1137
                return NULL;    /* Failed to allocate a page */
1138
 
1139
        if ( unlikely(add_to_page_cache_unique(page, mapping, index, hash)) ) {
1140
                /* Someone else grabbed the page already. */
1141
                page_cache_release(page);
1142
                return NULL;
1143
        }
1144
 
1145
        return page;
1146
}
1147
 
1148
#if 0
1149
#define PROFILE_READAHEAD
1150
#define DEBUG_READAHEAD
1151
#endif
1152
 
1153
/*
1154
 * Read-ahead profiling information
1155
 * --------------------------------
1156
 * Every PROFILE_MAXREADCOUNT, the following information is written
1157
 * to the syslog:
1158
 *   Percentage of asynchronous read-ahead.
1159
 *   Average of read-ahead fields context value.
1160
 * If DEBUG_READAHEAD is defined, a snapshot of these fields is written
1161
 * to the syslog.
1162
 */
1163
 
1164
#ifdef PROFILE_READAHEAD
1165
 
1166
#define PROFILE_MAXREADCOUNT 1000
1167
 
1168
static unsigned long total_reada;
1169
static unsigned long total_async;
1170
static unsigned long total_ramax;
1171
static unsigned long total_ralen;
1172
static unsigned long total_rawin;
1173
 
1174
static void profile_readahead(int async, struct file *filp)
1175
{
1176
        unsigned long flags;
1177
 
1178
        ++total_reada;
1179
        if (async)
1180
                ++total_async;
1181
 
1182
        total_ramax     += filp->f_ramax;
1183
        total_ralen     += filp->f_ralen;
1184
        total_rawin     += filp->f_rawin;
1185
 
1186
        if (total_reada > PROFILE_MAXREADCOUNT) {
1187
                save_flags(flags);
1188
                cli();
1189
                if (!(total_reada > PROFILE_MAXREADCOUNT)) {
1190
                        restore_flags(flags);
1191
                        return;
1192
                }
1193
 
1194
                printk("Readahead average:  max=%ld, len=%ld, win=%ld, async=%ld%%\n",
1195
                        total_ramax/total_reada,
1196
                        total_ralen/total_reada,
1197
                        total_rawin/total_reada,
1198
                        (total_async*100)/total_reada);
1199
#ifdef DEBUG_READAHEAD
1200
                printk("Readahead snapshot: max=%ld, len=%ld, win=%ld, raend=%Ld\n",
1201
                        filp->f_ramax, filp->f_ralen, filp->f_rawin, filp->f_raend);
1202
#endif
1203
 
1204
                total_reada     = 0;
1205
                total_async     = 0;
1206
                total_ramax     = 0;
1207
                total_ralen     = 0;
1208
                total_rawin     = 0;
1209
 
1210
                restore_flags(flags);
1211
        }
1212
}
1213
#endif  /* defined PROFILE_READAHEAD */
1214
 
1215
/*
1216
 * Read-ahead context:
1217
 * -------------------
1218
 * The read ahead context fields of the "struct file" are the following:
1219
 * - f_raend : position of the first byte after the last page we tried to
1220
 *             read ahead.
1221
 * - f_ramax : current read-ahead maximum size.
1222
 * - f_ralen : length of the current IO read block we tried to read-ahead.
1223
 * - f_rawin : length of the current read-ahead window.
1224
 *              if last read-ahead was synchronous then
1225
 *                      f_rawin = f_ralen
1226
 *              otherwise (was asynchronous)
1227
 *                      f_rawin = previous value of f_ralen + f_ralen
1228
 *
1229
 * Read-ahead limits:
1230
 * ------------------
1231
 * MIN_READAHEAD   : minimum read-ahead size when read-ahead.
1232
 * MAX_READAHEAD   : maximum read-ahead size when read-ahead.
1233
 *
1234
 * Synchronous read-ahead benefits:
1235
 * --------------------------------
1236
 * Using reasonable IO xfer length from peripheral devices increase system
1237
 * performances.
1238
 * Reasonable means, in this context, not too large but not too small.
1239
 * The actual maximum value is:
1240
 *      MAX_READAHEAD + PAGE_CACHE_SIZE = 76k is CONFIG_READA_SMALL is undefined
1241
 *      and 32K if defined (4K page size assumed).
1242
 *
1243
 * Asynchronous read-ahead benefits:
1244
 * ---------------------------------
1245
 * Overlapping next read request and user process execution increase system
1246
 * performance.
1247
 *
1248
 * Read-ahead risks:
1249
 * -----------------
1250
 * We have to guess which further data are needed by the user process.
1251
 * If these data are often not really needed, it's bad for system
1252
 * performances.
1253
 * However, we know that files are often accessed sequentially by
1254
 * application programs and it seems that it is possible to have some good
1255
 * strategy in that guessing.
1256
 * We only try to read-ahead files that seems to be read sequentially.
1257
 *
1258
 * Asynchronous read-ahead risks:
1259
 * ------------------------------
1260
 * In order to maximize overlapping, we must start some asynchronous read
1261
 * request from the device, as soon as possible.
1262
 * We must be very careful about:
1263
 * - The number of effective pending IO read requests.
1264
 *   ONE seems to be the only reasonable value.
1265
 * - The total memory pool usage for the file access stream.
1266
 *   This maximum memory usage is implicitly 2 IO read chunks:
1267
 *   2*(MAX_READAHEAD + PAGE_CACHE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
1268
 *   64k if defined (4K page size assumed).
1269
 */
1270
 
1271
static inline int get_max_readahead(struct inode * inode)
1272
{
1273
        if (!inode->i_dev || !max_readahead[MAJOR(inode->i_dev)])
1274
                return vm_max_readahead;
1275
        return max_readahead[MAJOR(inode->i_dev)][MINOR(inode->i_dev)];
1276
}
1277
 
1278
static void generic_file_readahead(int reada_ok,
1279
        struct file * filp, struct inode * inode,
1280
        struct page * page)
1281
{
1282
        unsigned long end_index;
1283
        unsigned long index = page->index;
1284
        unsigned long max_ahead, ahead;
1285
        unsigned long raend;
1286
        int max_readahead = get_max_readahead(inode);
1287
 
1288
        end_index = inode->i_size >> PAGE_CACHE_SHIFT;
1289
 
1290
        raend = filp->f_raend;
1291
        max_ahead = 0;
1292
 
1293
/*
1294
 * The current page is locked.
1295
 * If the current position is inside the previous read IO request, do not
1296
 * try to reread previously read ahead pages.
1297
 * Otherwise decide or not to read ahead some pages synchronously.
1298
 * If we are not going to read ahead, set the read ahead context for this
1299
 * page only.
1300
 */
1301
        if (PageLocked(page)) {
1302
                if (!filp->f_ralen || index >= raend || index + filp->f_rawin < raend) {
1303
                        raend = index;
1304
                        if (raend < end_index)
1305
                                max_ahead = filp->f_ramax;
1306
                        filp->f_rawin = 0;
1307
                        filp->f_ralen = 1;
1308
                        if (!max_ahead) {
1309
                                filp->f_raend  = index + filp->f_ralen;
1310
                                filp->f_rawin += filp->f_ralen;
1311
                        }
1312
                }
1313
        }
1314
/*
1315
 * The current page is not locked.
1316
 * If we were reading ahead and,
1317
 * if the current max read ahead size is not zero and,
1318
 * if the current position is inside the last read-ahead IO request,
1319
 *   it is the moment to try to read ahead asynchronously.
1320
 * We will later force unplug device in order to force asynchronous read IO.
1321
 */
1322
        else if (reada_ok && filp->f_ramax && raend >= 1 &&
1323
                 index <= raend && index + filp->f_ralen >= raend) {
1324
/*
1325
 * Add ONE page to max_ahead in order to try to have about the same IO max size
1326
 * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_CACHE_SIZE.
1327
 * Compute the position of the last page we have tried to read in order to
1328
 * begin to read ahead just at the next page.
1329
 */
1330
                raend -= 1;
1331
                if (raend < end_index)
1332
                        max_ahead = filp->f_ramax + 1;
1333
 
1334
                if (max_ahead) {
1335
                        filp->f_rawin = filp->f_ralen;
1336
                        filp->f_ralen = 0;
1337
                        reada_ok      = 2;
1338
                }
1339
        }
1340
/*
1341
 * Try to read ahead pages.
1342
 * We hope that ll_rw_blk() plug/unplug, coalescence, requests sort and the
1343
 * scheduler, will work enough for us to avoid too bad actuals IO requests.
1344
 */
1345
        ahead = 0;
1346
        while (ahead < max_ahead) {
1347
                unsigned long ra_index = raend + ahead + 1;
1348
 
1349
                if (ra_index >= end_index)
1350
                        break;
1351
                if (page_cache_read(filp, ra_index) < 0)
1352
                        break;
1353
 
1354
                ahead++;
1355
        }
1356
/*
1357
 * If we tried to read ahead some pages,
1358
 * If we tried to read ahead asynchronously,
1359
 *   Try to force unplug of the device in order to start an asynchronous
1360
 *   read IO request.
1361
 * Update the read-ahead context.
1362
 * Store the length of the current read-ahead window.
1363
 * Double the current max read ahead size.
1364
 *   That heuristic avoid to do some large IO for files that are not really
1365
 *   accessed sequentially.
1366
 */
1367
        if (ahead) {
1368
                filp->f_ralen += ahead;
1369
                filp->f_rawin += filp->f_ralen;
1370
                filp->f_raend = raend + ahead + 1;
1371
 
1372
                filp->f_ramax += filp->f_ramax;
1373
 
1374
                if (filp->f_ramax > max_readahead)
1375
                        filp->f_ramax = max_readahead;
1376
 
1377
#ifdef PROFILE_READAHEAD
1378
                profile_readahead((reada_ok == 2), filp);
1379
#endif
1380
        }
1381
 
1382
        return;
1383
}
1384
 
1385
/*
1386
 * Mark a page as having seen activity.
1387
 *
1388
 * If it was already so marked, move it to the active queue and drop
1389
 * the referenced bit.  Otherwise, just mark it for future action..
1390
 */
1391
void mark_page_accessed(struct page *page)
1392
{
1393
        if (!PageActive(page) && PageReferenced(page)) {
1394
                activate_page(page);
1395
                ClearPageReferenced(page);
1396
        } else
1397
                SetPageReferenced(page);
1398
}
1399
 
1400
/*
1401
 * This is a generic file read routine, and uses the
1402
 * inode->i_op->readpage() function for the actual low-level
1403
 * stuff.
1404
 *
1405
 * This is really ugly. But the goto's actually try to clarify some
1406
 * of the logic when it comes to error handling etc.
1407
 */
1408
void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
1409
{
1410
        struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
1411
        struct inode *inode = mapping->host;
1412
        unsigned long index, offset;
1413
        struct page *cached_page;
1414
        int reada_ok;
1415
        int error;
1416
        int max_readahead = get_max_readahead(inode);
1417
 
1418
        cached_page = NULL;
1419
        index = *ppos >> PAGE_CACHE_SHIFT;
1420
        offset = *ppos & ~PAGE_CACHE_MASK;
1421
 
1422
/*
1423
 * If the current position is outside the previous read-ahead window,
1424
 * we reset the current read-ahead context and set read ahead max to zero
1425
 * (will be set to just needed value later),
1426
 * otherwise, we assume that the file accesses are sequential enough to
1427
 * continue read-ahead.
1428
 */
1429
        if (index > filp->f_raend || index + filp->f_rawin < filp->f_raend) {
1430
                reada_ok = 0;
1431
                filp->f_raend = 0;
1432
                filp->f_ralen = 0;
1433
                filp->f_ramax = 0;
1434
                filp->f_rawin = 0;
1435
        } else {
1436
                reada_ok = 1;
1437
        }
1438
/*
1439
 * Adjust the current value of read-ahead max.
1440
 * If the read operation stay in the first half page, force no readahead.
1441
 * Otherwise try to increase read ahead max just enough to do the read request.
1442
 * Then, at least MIN_READAHEAD if read ahead is ok,
1443
 * and at most MAX_READAHEAD in all cases.
1444
 */
1445
        if (!index && offset + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
1446
                filp->f_ramax = 0;
1447
        } else {
1448
                unsigned long needed;
1449
 
1450
                needed = ((offset + desc->count) >> PAGE_CACHE_SHIFT) + 1;
1451
 
1452
                if (filp->f_ramax < needed)
1453
                        filp->f_ramax = needed;
1454
 
1455
                if (reada_ok && filp->f_ramax < vm_min_readahead)
1456
                                filp->f_ramax = vm_min_readahead;
1457
                if (filp->f_ramax > max_readahead)
1458
                        filp->f_ramax = max_readahead;
1459
        }
1460
 
1461
        for (;;) {
1462
                struct page *page, **hash;
1463
                unsigned long end_index, nr, ret;
1464
 
1465
                end_index = inode->i_size >> PAGE_CACHE_SHIFT;
1466
 
1467
                if (index > end_index)
1468
                        break;
1469
                nr = PAGE_CACHE_SIZE;
1470
                if (index == end_index) {
1471
                        nr = inode->i_size & ~PAGE_CACHE_MASK;
1472
                        if (nr <= offset)
1473
                                break;
1474
                }
1475
 
1476
                nr = nr - offset;
1477
 
1478
                /*
1479
                 * Try to find the data in the page cache..
1480
                 */
1481
                hash = page_hash(mapping, index);
1482
 
1483
                spin_lock(&pagecache_lock);
1484
                page = __find_page_nolock(mapping, index, *hash);
1485
                if (!page)
1486
                        goto no_cached_page;
1487
found_page:
1488
                page_cache_get(page);
1489
                spin_unlock(&pagecache_lock);
1490
 
1491
                if (!Page_Uptodate(page))
1492
                        goto page_not_up_to_date;
1493
                generic_file_readahead(reada_ok, filp, inode, page);
1494
page_ok:
1495
                /* If users can be writing to this page using arbitrary
1496
                 * virtual addresses, take care about potential aliasing
1497
                 * before reading the page on the kernel side.
1498
                 */
1499
                if (mapping->i_mmap_shared != NULL)
1500
                        flush_dcache_page(page);
1501
 
1502
                /*
1503
                 * Mark the page accessed if we read the
1504
                 * beginning or we just did an lseek.
1505
                 */
1506
                if (!offset || !filp->f_reada)
1507
                        mark_page_accessed(page);
1508
 
1509
                /*
1510
                 * Ok, we have the page, and it's up-to-date, so
1511
                 * now we can copy it to user space...
1512
                 *
1513
                 * The actor routine returns how many bytes were actually used..
1514
                 * NOTE! This may not be the same as how much of a user buffer
1515
                 * we filled up (we may be padding etc), so we can only update
1516
                 * "pos" here (the actor routine has to update the user buffer
1517
                 * pointers and the remaining count).
1518
                 */
1519
                ret = actor(desc, page, offset, nr);
1520
                offset += ret;
1521
                index += offset >> PAGE_CACHE_SHIFT;
1522
                offset &= ~PAGE_CACHE_MASK;
1523
 
1524
                page_cache_release(page);
1525
                if (ret == nr && desc->count)
1526
                        continue;
1527
                break;
1528
 
1529
/*
1530
 * Ok, the page was not immediately readable, so let's try to read ahead while we're at it..
1531
 */
1532
page_not_up_to_date:
1533
                generic_file_readahead(reada_ok, filp, inode, page);
1534
 
1535
                if (Page_Uptodate(page))
1536
                        goto page_ok;
1537
 
1538
                /* Get exclusive access to the page ... */
1539
                lock_page(page);
1540
 
1541
                /* Did it get unhashed before we got the lock? */
1542
                if (!page->mapping) {
1543
                        UnlockPage(page);
1544
                        page_cache_release(page);
1545
                        continue;
1546
                }
1547
 
1548
                /* Did somebody else fill it already? */
1549
                if (Page_Uptodate(page)) {
1550
                        UnlockPage(page);
1551
                        goto page_ok;
1552
                }
1553
 
1554
readpage:
1555
                /* ... and start the actual read. The read will unlock the page. */
1556
                error = mapping->a_ops->readpage(filp, page);
1557
 
1558
                if (!error) {
1559
                        if (Page_Uptodate(page))
1560
                                goto page_ok;
1561
 
1562
                        /* Again, try some read-ahead while waiting for the page to finish.. */
1563
                        generic_file_readahead(reada_ok, filp, inode, page);
1564
                        wait_on_page(page);
1565
                        if (Page_Uptodate(page))
1566
                                goto page_ok;
1567
                        error = -EIO;
1568
                }
1569
 
1570
                /* UHHUH! A synchronous read error occurred. Report it */
1571
                desc->error = error;
1572
                page_cache_release(page);
1573
                break;
1574
 
1575
no_cached_page:
1576
                /*
1577
                 * Ok, it wasn't cached, so we need to create a new
1578
                 * page..
1579
                 *
1580
                 * We get here with the page cache lock held.
1581
                 */
1582
                if (!cached_page) {
1583
                        spin_unlock(&pagecache_lock);
1584
                        cached_page = page_cache_alloc(mapping);
1585
                        if (!cached_page) {
1586
                                desc->error = -ENOMEM;
1587
                                break;
1588
                        }
1589
 
1590
                        /*
1591
                         * Somebody may have added the page while we
1592
                         * dropped the page cache lock. Check for that.
1593
                         */
1594
                        spin_lock(&pagecache_lock);
1595
                        page = __find_page_nolock(mapping, index, *hash);
1596
                        if (page)
1597
                                goto found_page;
1598
                }
1599
 
1600
                /*
1601
                 * Ok, add the new page to the hash-queues...
1602
                 */
1603
                page = cached_page;
1604
                __add_to_page_cache(page, mapping, index, hash);
1605
                spin_unlock(&pagecache_lock);
1606
                lru_cache_add(page);
1607
                cached_page = NULL;
1608
 
1609
                goto readpage;
1610
        }
1611
 
1612
        *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1613
        filp->f_reada = 1;
1614
        if (cached_page)
1615
                page_cache_release(cached_page);
1616
        UPDATE_ATIME(inode);
1617
}
1618
 
1619
static inline int have_mapping_directIO(struct address_space * mapping)
1620
{
1621
        return mapping->a_ops->direct_IO || mapping->a_ops->direct_fileIO;
1622
}
1623
 
1624
/* Switch between old and new directIO formats */
1625
static inline int do_call_directIO(int rw, struct file *filp, struct kiobuf *iobuf, unsigned long offset, int blocksize)
1626
{
1627
        struct address_space * mapping = filp->f_dentry->d_inode->i_mapping;
1628
 
1629
        if (mapping->a_ops->direct_fileIO)
1630
                return mapping->a_ops->direct_fileIO(rw, filp, iobuf, offset, blocksize);
1631
        return mapping->a_ops->direct_IO(rw, mapping->host, iobuf, offset, blocksize);
1632
}
1633
 
1634
/*
1635
 * i_sem and i_alloc_sem should be held already.  i_sem may be dropped
1636
 * later once we've mapped the new IO.  i_alloc_sem is kept until the IO
1637
 * completes.
1638
 */
1639
 
1640
static ssize_t generic_file_direct_IO(int rw, struct file * filp, char * buf, size_t count, loff_t offset)
1641
{
1642
        ssize_t retval;
1643
        int new_iobuf, chunk_size, blocksize_mask, blocksize, blocksize_bits, iosize, progress;
1644
        struct kiobuf * iobuf;
1645
        struct address_space * mapping = filp->f_dentry->d_inode->i_mapping;
1646
        struct inode * inode = mapping->host;
1647
        loff_t size = inode->i_size;
1648
 
1649
        new_iobuf = 0;
1650
        iobuf = filp->f_iobuf;
1651
        if (test_and_set_bit(0, &filp->f_iobuf_lock)) {
1652
                /*
1653
                 * A parallel read/write is using the preallocated iobuf
1654
                 * so just run slow and allocate a new one.
1655
                 */
1656
                retval = alloc_kiovec(1, &iobuf);
1657
                if (retval)
1658
                        goto out;
1659
                new_iobuf = 1;
1660
        }
1661
 
1662
        blocksize = 1 << inode->i_blkbits;
1663
        blocksize_bits = inode->i_blkbits;
1664
        blocksize_mask = blocksize - 1;
1665
        chunk_size = KIO_MAX_ATOMIC_IO << 10;
1666
 
1667
        retval = -EINVAL;
1668
        if ((offset & blocksize_mask) || (count & blocksize_mask) || ((unsigned long) buf & blocksize_mask))
1669
                goto out_free;
1670
        if (!have_mapping_directIO(mapping))
1671
                goto out_free;
1672
 
1673
        if ((rw == READ) && (offset + count > size))
1674
                count = size - offset;
1675
 
1676
        /*
1677
         * Flush to disk exclusively the _data_, metadata must remain
1678
         * completly asynchronous or performance will go to /dev/null.
1679
         */
1680
        retval = filemap_fdatasync(mapping);
1681
        if (retval == 0)
1682
                retval = fsync_inode_data_buffers(inode);
1683
        if (retval == 0)
1684
                retval = filemap_fdatawait(mapping);
1685
        if (retval < 0)
1686
                goto out_free;
1687
 
1688
        progress = retval = 0;
1689
        while (count > 0) {
1690
                iosize = count;
1691
                if (iosize > chunk_size)
1692
                        iosize = chunk_size;
1693
 
1694
                retval = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
1695
                if (retval)
1696
                        break;
1697
 
1698
                retval = do_call_directIO(rw, filp, iobuf, (offset+progress) >> blocksize_bits, blocksize);
1699
 
1700
                if (rw == READ && retval > 0)
1701
                        mark_dirty_kiobuf(iobuf, retval);
1702
 
1703
                if (retval >= 0) {
1704
                        count -= retval;
1705
                        buf += retval;
1706
                        /* warning: weird semantics here, we're reporting a read behind the end of the file */
1707
                        progress += retval;
1708
                }
1709
 
1710
                unmap_kiobuf(iobuf);
1711
 
1712
                if (retval != iosize)
1713
                        break;
1714
        }
1715
 
1716
        if (progress)
1717
                retval = progress;
1718
 
1719
 out_free:
1720
        if (!new_iobuf)
1721
                clear_bit(0, &filp->f_iobuf_lock);
1722
        else
1723
                free_kiovec(1, &iobuf);
1724
 out:
1725
        return retval;
1726
}
1727
 
1728
int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
1729
{
1730
        char *kaddr;
1731
        unsigned long left, count = desc->count;
1732
 
1733
        if (size > count)
1734
                size = count;
1735
 
1736
        kaddr = kmap(page);
1737
        left = __copy_to_user(desc->buf, kaddr + offset, size);
1738
        kunmap(page);
1739
 
1740
        if (left) {
1741
                size -= left;
1742
                desc->error = -EFAULT;
1743
        }
1744
        desc->count = count - size;
1745
        desc->written += size;
1746
        desc->buf += size;
1747
        return size;
1748
}
1749
 
1750
inline ssize_t do_generic_direct_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
1751
{
1752
        ssize_t retval;
1753
        loff_t pos = *ppos;
1754
 
1755
        retval = generic_file_direct_IO(READ, filp, buf, count, pos);
1756
        if (retval > 0)
1757
                *ppos = pos + retval;
1758
        return retval;
1759
}
1760
 
1761
/*
1762
 * This is the "read()" routine for all filesystems
1763
 * that can use the page cache directly.
1764
 */
1765
ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
1766
{
1767
        ssize_t retval;
1768
 
1769
        if ((ssize_t) count < 0)
1770
                return -EINVAL;
1771
 
1772
        if (filp->f_flags & O_DIRECT)
1773
                goto o_direct;
1774
 
1775
        retval = -EFAULT;
1776
        if (access_ok(VERIFY_WRITE, buf, count)) {
1777
                retval = 0;
1778
 
1779
                if (count) {
1780
                        read_descriptor_t desc;
1781
 
1782
                        desc.written = 0;
1783
                        desc.count = count;
1784
                        desc.buf = buf;
1785
                        desc.error = 0;
1786
                        do_generic_file_read(filp, ppos, &desc, file_read_actor);
1787
 
1788
                        retval = desc.written;
1789
                        if (!retval)
1790
                                retval = desc.error;
1791
                }
1792
        }
1793
 out:
1794
        return retval;
1795
 
1796
 o_direct:
1797
        {
1798
                loff_t size;
1799
                struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
1800
                struct inode *inode = mapping->host;
1801
 
1802
                retval = 0;
1803
                if (!count)
1804
                        goto out; /* skip atime */
1805
                down_read(&inode->i_alloc_sem);
1806
                down(&inode->i_sem);
1807
                size = inode->i_size;
1808
                if (*ppos < size)
1809
                        retval = do_generic_direct_read(filp, buf, count, ppos);
1810
                up(&inode->i_sem);
1811
                up_read(&inode->i_alloc_sem);
1812
                UPDATE_ATIME(filp->f_dentry->d_inode);
1813
                goto out;
1814
        }
1815
}
1816
 
1817
static int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset , unsigned long size)
1818
{
1819
        ssize_t written;
1820
        unsigned long count = desc->count;
1821
        struct file *file = (struct file *) desc->buf;
1822
 
1823
        if (size > count)
1824
                size = count;
1825
 
1826
        if (file->f_op->sendpage) {
1827
                written = file->f_op->sendpage(file, page, offset,
1828
                                               size, &file->f_pos, size<count);
1829
        } else {
1830
                char *kaddr;
1831
                mm_segment_t old_fs;
1832
 
1833
                old_fs = get_fs();
1834
                set_fs(KERNEL_DS);
1835
 
1836
                kaddr = kmap(page);
1837
                written = file->f_op->write(file, kaddr + offset, size, &file->f_pos);
1838
                kunmap(page);
1839
 
1840
                set_fs(old_fs);
1841
        }
1842
        if (written < 0) {
1843
                desc->error = written;
1844
                written = 0;
1845
        }
1846
        desc->count = count - written;
1847
        desc->written += written;
1848
        return written;
1849
}
1850
 
1851
static ssize_t common_sendfile(int out_fd, int in_fd, loff_t *offset, size_t count)
1852
{
1853
        ssize_t retval;
1854
        struct file * in_file, * out_file;
1855
        struct inode * in_inode, * out_inode;
1856
 
1857
        /*
1858
         * Get input file, and verify that it is ok..
1859
         */
1860
        retval = -EBADF;
1861
        in_file = fget(in_fd);
1862
        if (!in_file)
1863
                goto out;
1864
        if (!(in_file->f_mode & FMODE_READ))
1865
                goto fput_in;
1866
        retval = -EINVAL;
1867
        in_inode = in_file->f_dentry->d_inode;
1868
        if (!in_inode)
1869
                goto fput_in;
1870
        if (!in_inode->i_mapping->a_ops->readpage)
1871
                goto fput_in;
1872
        retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, in_file->f_pos, count);
1873
        if (retval)
1874
                goto fput_in;
1875
 
1876
        /*
1877
         * Get output file, and verify that it is ok..
1878
         */
1879
        retval = -EBADF;
1880
        out_file = fget(out_fd);
1881
        if (!out_file)
1882
                goto fput_in;
1883
        if (!(out_file->f_mode & FMODE_WRITE))
1884
                goto fput_out;
1885
        retval = -EINVAL;
1886
        if (!out_file->f_op || !out_file->f_op->write)
1887
                goto fput_out;
1888
        out_inode = out_file->f_dentry->d_inode;
1889
        retval = locks_verify_area(FLOCK_VERIFY_WRITE, out_inode, out_file, out_file->f_pos, count);
1890
        if (retval)
1891
                goto fput_out;
1892
 
1893
        retval = 0;
1894
        if (count) {
1895
                read_descriptor_t desc;
1896
 
1897
                if (!offset)
1898
                        offset = &in_file->f_pos;
1899
 
1900
                desc.written = 0;
1901
                desc.count = count;
1902
                desc.buf = (char *) out_file;
1903
                desc.error = 0;
1904
                do_generic_file_read(in_file, offset, &desc, file_send_actor);
1905
 
1906
                retval = desc.written;
1907
                if (!retval)
1908
                        retval = desc.error;
1909
        }
1910
 
1911
fput_out:
1912
        fput(out_file);
1913
fput_in:
1914
        fput(in_file);
1915
out:
1916
        return retval;
1917
}
1918
 
1919
asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
1920
{
1921
        loff_t pos, *ppos = NULL;
1922
        ssize_t ret;
1923
        if (offset) {
1924
                off_t off;
1925
                if (unlikely(get_user(off, offset)))
1926
                        return -EFAULT;
1927
                pos = off;
1928
                ppos = &pos;
1929
        }
1930
        ret = common_sendfile(out_fd, in_fd, ppos, count);
1931
        if (offset)
1932
                put_user((off_t)pos, offset);
1933
        return ret;
1934
}
1935
 
1936
asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t *offset, size_t count)
1937
{
1938
        loff_t pos, *ppos = NULL;
1939
        ssize_t ret;
1940
        if (offset) {
1941
                if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
1942
                        return -EFAULT;
1943
                ppos = &pos;
1944
        }
1945
        ret = common_sendfile(out_fd, in_fd, ppos, count);
1946
        if (offset)
1947
                put_user(pos, offset);
1948
        return ret;
1949
}
1950
 
1951
static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr)
1952
{
1953
        struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
1954
        unsigned long max;
1955
 
1956
        if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1957
                return -EINVAL;
1958
 
1959
        /* Limit it to the size of the file.. */
1960
        max = (mapping->host->i_size + ~PAGE_CACHE_MASK) >> PAGE_CACHE_SHIFT;
1961
        if (index > max)
1962
                return 0;
1963
        max -= index;
1964
        if (nr > max)
1965
                nr = max;
1966
 
1967
        /* And limit it to a sane percentage of the inactive list.. */
1968
        max = (nr_free_pages() + nr_inactive_pages) / 2;
1969
        if (nr > max)
1970
                nr = max;
1971
 
1972
        while (nr) {
1973
                page_cache_read(file, index);
1974
                index++;
1975
                nr--;
1976
        }
1977
        return 0;
1978
}
1979
 
1980
asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1981
{
1982
        ssize_t ret;
1983
        struct file *file;
1984
 
1985
        ret = -EBADF;
1986
        file = fget(fd);
1987
        if (file) {
1988
                if (file->f_mode & FMODE_READ) {
1989
                        unsigned long start = offset >> PAGE_CACHE_SHIFT;
1990
                        unsigned long len = (count + ((long)offset & ~PAGE_CACHE_MASK)) >> PAGE_CACHE_SHIFT;
1991
                        ret = do_readahead(file, start, len);
1992
                }
1993
                fput(file);
1994
        }
1995
        return ret;
1996
}
1997
 
1998
/*
1999
 * Read-ahead and flush behind for MADV_SEQUENTIAL areas.  Since we are
2000
 * sure this is sequential access, we don't need a flexible read-ahead
2001
 * window size -- we can always use a large fixed size window.
2002
 */
2003
static void nopage_sequential_readahead(struct vm_area_struct * vma,
2004
        unsigned long pgoff, unsigned long filesize)
2005
{
2006
        unsigned long ra_window;
2007
 
2008
        ra_window = get_max_readahead(vma->vm_file->f_dentry->d_inode);
2009
        ra_window = CLUSTER_OFFSET(ra_window + CLUSTER_PAGES - 1);
2010
 
2011
        /* vm_raend is zero if we haven't read ahead in this area yet.  */
2012
        if (vma->vm_raend == 0)
2013
                vma->vm_raend = vma->vm_pgoff + ra_window;
2014
 
2015
        /*
2016
         * If we've just faulted the page half-way through our window,
2017
         * then schedule reads for the next window, and release the
2018
         * pages in the previous window.
2019
         */
2020
        if ((pgoff + (ra_window >> 1)) == vma->vm_raend) {
2021
                unsigned long start = vma->vm_pgoff + vma->vm_raend;
2022
                unsigned long end = start + ra_window;
2023
 
2024
                if (end > ((vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff))
2025
                        end = (vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff;
2026
                if (start > end)
2027
                        return;
2028
 
2029
                while ((start < end) && (start < filesize)) {
2030
                        if (read_cluster_nonblocking(vma->vm_file,
2031
                                                        start, filesize) < 0)
2032
                                break;
2033
                        start += CLUSTER_PAGES;
2034
                }
2035
                run_task_queue(&tq_disk);
2036
 
2037
                /* if we're far enough past the beginning of this area,
2038
                   recycle pages that are in the previous window. */
2039
                if (vma->vm_raend > (vma->vm_pgoff + ra_window + ra_window)) {
2040
                        unsigned long window = ra_window << PAGE_SHIFT;
2041
 
2042
                        end = vma->vm_start + (vma->vm_raend << PAGE_SHIFT);
2043
                        end -= window + window;
2044
                        filemap_sync(vma, end - window, window, MS_INVALIDATE);
2045
                }
2046
 
2047
                vma->vm_raend += ra_window;
2048
        }
2049
 
2050
        return;
2051
}
2052
 
2053
/*
2054
 * filemap_nopage() is invoked via the vma operations vector for a
2055
 * mapped memory region to read in file data during a page fault.
2056
 *
2057
 * The goto's are kind of ugly, but this streamlines the normal case of having
2058
 * it in the page cache, and handles the special cases reasonably without
2059
 * having a lot of duplicated code.
2060
 */
2061
struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
2062
{
2063
        int error;
2064
        struct file *file = area->vm_file;
2065
        struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
2066
        struct inode *inode = mapping->host;
2067
        struct page *page, **hash;
2068
        unsigned long size, pgoff, endoff;
2069
 
2070
        pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
2071
        endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
2072
 
2073
retry_all:
2074
        /*
2075
         * An external ptracer can access pages that normally aren't
2076
         * accessible..
2077
         */
2078
        size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2079
        if ((pgoff >= size) && (area->vm_mm == current->mm))
2080
                return NULL;
2081
 
2082
        /* The "size" of the file, as far as mmap is concerned, isn't bigger than the mapping */
2083
        if (size > endoff)
2084
                size = endoff;
2085
 
2086
        /*
2087
         * Do we have something in the page cache already?
2088
         */
2089
        hash = page_hash(mapping, pgoff);
2090
retry_find:
2091
        page = __find_get_page(mapping, pgoff, hash);
2092
        if (!page)
2093
                goto no_cached_page;
2094
 
2095
        /*
2096
         * Ok, found a page in the page cache, now we need to check
2097
         * that it's up-to-date.
2098
         */
2099
        if (!Page_Uptodate(page))
2100
                goto page_not_uptodate;
2101
 
2102
success:
2103
        /*
2104
         * Try read-ahead for sequential areas.
2105
         */
2106
        if (VM_SequentialReadHint(area))
2107
                nopage_sequential_readahead(area, pgoff, size);
2108
 
2109
        /*
2110
         * Found the page and have a reference on it, need to check sharing
2111
         * and possibly copy it over to another page..
2112
         */
2113
        mark_page_accessed(page);
2114
        flush_page_to_ram(page);
2115
        return page;
2116
 
2117
no_cached_page:
2118
        /*
2119
         * If the requested offset is within our file, try to read a whole
2120
         * cluster of pages at once.
2121
         *
2122
         * Otherwise, we're off the end of a privately mapped file,
2123
         * so we need to map a zero page.
2124
         */
2125
        if ((pgoff < size) && !VM_RandomReadHint(area))
2126
                error = read_cluster_nonblocking(file, pgoff, size);
2127
        else
2128
                error = page_cache_read(file, pgoff);
2129
 
2130
        /*
2131
         * The page we want has now been added to the page cache.
2132
         * In the unlikely event that someone removed it in the
2133
         * meantime, we'll just come back here and read it again.
2134
         */
2135
        if (error >= 0)
2136
                goto retry_find;
2137
 
2138
        /*
2139
         * An error return from page_cache_read can result if the
2140
         * system is low on memory, or a problem occurs while trying
2141
         * to schedule I/O.
2142
         */
2143
        if (error == -ENOMEM)
2144
                return NOPAGE_OOM;
2145
        return NULL;
2146
 
2147
page_not_uptodate:
2148
        lock_page(page);
2149
 
2150
        /* Did it get unhashed while we waited for it? */
2151
        if (!page->mapping) {
2152
                UnlockPage(page);
2153
                page_cache_release(page);
2154
                goto retry_all;
2155
        }
2156
 
2157
        /* Did somebody else get it up-to-date? */
2158
        if (Page_Uptodate(page)) {
2159
                UnlockPage(page);
2160
                goto success;
2161
        }
2162
 
2163
        if (!mapping->a_ops->readpage(file, page)) {
2164
                wait_on_page(page);
2165
                if (Page_Uptodate(page))
2166
                        goto success;
2167
        }
2168
 
2169
        /*
2170
         * Umm, take care of errors if the page isn't up-to-date.
2171
         * Try to re-read it _once_. We do this synchronously,
2172
         * because there really aren't any performance issues here
2173
         * and we need to check for errors.
2174
         */
2175
        lock_page(page);
2176
 
2177
        /* Somebody truncated the page on us? */
2178
        if (!page->mapping) {
2179
                UnlockPage(page);
2180
                page_cache_release(page);
2181
                goto retry_all;
2182
        }
2183
 
2184
        /* Somebody else successfully read it in? */
2185
        if (Page_Uptodate(page)) {
2186
                UnlockPage(page);
2187
                goto success;
2188
        }
2189
        ClearPageError(page);
2190
        if (!mapping->a_ops->readpage(file, page)) {
2191
                wait_on_page(page);
2192
                if (Page_Uptodate(page))
2193
                        goto success;
2194
        }
2195
 
2196
        /*
2197
         * Things didn't work out. Return zero to tell the
2198
         * mm layer so, possibly freeing the page cache page first.
2199
         */
2200
        page_cache_release(page);
2201
        return NULL;
2202
}
2203
 
2204
/* Called with mm->page_table_lock held to protect against other
2205
 * threads/the swapper from ripping pte's out from under us.
2206
 */
2207
static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
2208
        unsigned long address, unsigned int flags)
2209
{
2210
        pte_t pte = *ptep;
2211
 
2212
        if (pte_present(pte)) {
2213
                struct page *page = pte_page(pte);
2214
                if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
2215
                        flush_tlb_page(vma, address);
2216
                        set_page_dirty(page);
2217
                }
2218
        }
2219
        return 0;
2220
}
2221
 
2222
static inline int filemap_sync_pte_range(pmd_t * pmd,
2223
        unsigned long address, unsigned long size,
2224
        struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
2225
{
2226
        pte_t * pte;
2227
        unsigned long end;
2228
        int error;
2229
 
2230
        if (pmd_none(*pmd))
2231
                return 0;
2232
        if (pmd_bad(*pmd)) {
2233
                pmd_ERROR(*pmd);
2234
                pmd_clear(pmd);
2235
                return 0;
2236
        }
2237
        pte = pte_offset(pmd, address);
2238
        offset += address & PMD_MASK;
2239
        address &= ~PMD_MASK;
2240
        end = address + size;
2241
        if (end > PMD_SIZE)
2242
                end = PMD_SIZE;
2243
        error = 0;
2244
        do {
2245
                error |= filemap_sync_pte(pte, vma, address + offset, flags);
2246
                address += PAGE_SIZE;
2247
                pte++;
2248
        } while (address && (address < end));
2249
        return error;
2250
}
2251
 
2252
static inline int filemap_sync_pmd_range(pgd_t * pgd,
2253
        unsigned long address, unsigned long size,
2254
        struct vm_area_struct *vma, unsigned int flags)
2255
{
2256
        pmd_t * pmd;
2257
        unsigned long offset, end;
2258
        int error;
2259
 
2260
        if (pgd_none(*pgd))
2261
                return 0;
2262
        if (pgd_bad(*pgd)) {
2263
                pgd_ERROR(*pgd);
2264
                pgd_clear(pgd);
2265
                return 0;
2266
        }
2267
        pmd = pmd_offset(pgd, address);
2268
        offset = address & PGDIR_MASK;
2269
        address &= ~PGDIR_MASK;
2270
        end = address + size;
2271
        if (end > PGDIR_SIZE)
2272
                end = PGDIR_SIZE;
2273
        error = 0;
2274
        do {
2275
                error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
2276
                address = (address + PMD_SIZE) & PMD_MASK;
2277
                pmd++;
2278
        } while (address && (address < end));
2279
        return error;
2280
}
2281
 
2282
int filemap_sync(struct vm_area_struct * vma, unsigned long address,
2283
        size_t size, unsigned int flags)
2284
{
2285
        pgd_t * dir;
2286
        unsigned long end = address + size;
2287
        int error = 0;
2288
 
2289
        /* Aquire the lock early; it may be possible to avoid dropping
2290
         * and reaquiring it repeatedly.
2291
         */
2292
        spin_lock(&vma->vm_mm->page_table_lock);
2293
 
2294
        dir = pgd_offset(vma->vm_mm, address);
2295
        flush_cache_range(vma->vm_mm, end - size, end);
2296
        if (address >= end)
2297
                BUG();
2298
        do {
2299
                error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
2300
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
2301
                dir++;
2302
        } while (address && (address < end));
2303
        flush_tlb_range(vma->vm_mm, end - size, end);
2304
 
2305
        spin_unlock(&vma->vm_mm->page_table_lock);
2306
 
2307
        return error;
2308
}
2309
 
2310
static struct vm_operations_struct generic_file_vm_ops = {
2311
        nopage:         filemap_nopage,
2312
};
2313
 
2314
/* This is used for a general mmap of a disk file */
2315
 
2316
int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2317
{
2318
        struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
2319
        struct inode *inode = mapping->host;
2320
 
2321
        if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
2322
                if (!mapping->a_ops->writepage)
2323
                        return -EINVAL;
2324
        }
2325
        if (!mapping->a_ops->readpage)
2326
                return -ENOEXEC;
2327
        UPDATE_ATIME(inode);
2328
        vma->vm_ops = &generic_file_vm_ops;
2329
        return 0;
2330
}
2331
 
2332
/*
2333
 * The msync() system call.
2334
 */
2335
 
2336
/*
2337
 * MS_SYNC syncs the entire file - including mappings.
2338
 *
2339
 * MS_ASYNC initiates writeout of just the dirty mapped data.
2340
 * This provides no guarantee of file integrity - things like indirect
2341
 * blocks may not have started writeout.  MS_ASYNC is primarily useful
2342
 * where the application knows that it has finished with the data and
2343
 * wishes to intelligently schedule its own I/O traffic.
2344
 */
2345
static int msync_interval(struct vm_area_struct * vma,
2346
        unsigned long start, unsigned long end, int flags)
2347
{
2348
        int ret = 0;
2349
        struct file * file = vma->vm_file;
2350
 
2351
        if ( (flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED) )
2352
                return -EBUSY;
2353
 
2354
        if (file && (vma->vm_flags & VM_SHARED)) {
2355
                ret = filemap_sync(vma, start, end-start, flags);
2356
 
2357
                if (!ret && (flags & (MS_SYNC|MS_ASYNC))) {
2358
                        struct inode * inode = file->f_dentry->d_inode;
2359
 
2360
                        down(&inode->i_sem);
2361
                        ret = filemap_fdatasync(inode->i_mapping);
2362
                        if (flags & MS_SYNC) {
2363
                                int err;
2364
 
2365
                                if (file->f_op && file->f_op->fsync) {
2366
                                        err = file->f_op->fsync(file, file->f_dentry, 1);
2367
                                        if (err && !ret)
2368
                                                ret = err;
2369
                                }
2370
                                err = filemap_fdatawait(inode->i_mapping);
2371
                                if (err && !ret)
2372
                                        ret = err;
2373
                        }
2374
                        up(&inode->i_sem);
2375
                }
2376
        }
2377
        return ret;
2378
}
2379
 
2380
asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
2381
{
2382
        unsigned long end;
2383
        struct vm_area_struct * vma;
2384
        int unmapped_error, error = -EINVAL;
2385
 
2386
        down_read(&current->mm->mmap_sem);
2387
        if (start & ~PAGE_MASK)
2388
                goto out;
2389
        len = (len + ~PAGE_MASK) & PAGE_MASK;
2390
        end = start + len;
2391
        if (end < start)
2392
                goto out;
2393
        if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
2394
                goto out;
2395
        if ((flags & MS_ASYNC) && (flags & MS_SYNC))
2396
                goto out;
2397
 
2398
        error = 0;
2399
        if (end == start)
2400
                goto out;
2401
        /*
2402
         * If the interval [start,end) covers some unmapped address ranges,
2403
         * just ignore them, but return -ENOMEM at the end.
2404
         */
2405
        vma = find_vma(current->mm, start);
2406
        unmapped_error = 0;
2407
        for (;;) {
2408
                /* Still start < end. */
2409
                error = -ENOMEM;
2410
                if (!vma)
2411
                        goto out;
2412
                /* Here start < vma->vm_end. */
2413
                if (start < vma->vm_start) {
2414
                        unmapped_error = -ENOMEM;
2415
                        start = vma->vm_start;
2416
                }
2417
                /* Here vma->vm_start <= start < vma->vm_end. */
2418
                if (end <= vma->vm_end) {
2419
                        if (start < end) {
2420
                                error = msync_interval(vma, start, end, flags);
2421
                                if (error)
2422
                                        goto out;
2423
                        }
2424
                        error = unmapped_error;
2425
                        goto out;
2426
                }
2427
                /* Here vma->vm_start <= start < vma->vm_end < end. */
2428
                error = msync_interval(vma, start, vma->vm_end, flags);
2429
                if (error)
2430
                        goto out;
2431
                start = vma->vm_end;
2432
                vma = vma->vm_next;
2433
        }
2434
out:
2435
        up_read(&current->mm->mmap_sem);
2436
        return error;
2437
}
2438
 
2439
static inline void setup_read_behavior(struct vm_area_struct * vma,
2440
        int behavior)
2441
{
2442
        VM_ClearReadHint(vma);
2443
        switch(behavior) {
2444
                case MADV_SEQUENTIAL:
2445
                        vma->vm_flags |= VM_SEQ_READ;
2446
                        break;
2447
                case MADV_RANDOM:
2448
                        vma->vm_flags |= VM_RAND_READ;
2449
                        break;
2450
                default:
2451
                        break;
2452
        }
2453
        return;
2454
}
2455
 
2456
static long madvise_fixup_start(struct vm_area_struct * vma,
2457
        unsigned long end, int behavior)
2458
{
2459
        struct vm_area_struct * n;
2460
        struct mm_struct * mm = vma->vm_mm;
2461
 
2462
        n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2463
        if (!n)
2464
                return -EAGAIN;
2465
        *n = *vma;
2466
        n->vm_end = end;
2467
        setup_read_behavior(n, behavior);
2468
        n->vm_raend = 0;
2469
        if (n->vm_file)
2470
                get_file(n->vm_file);
2471
        if (n->vm_ops && n->vm_ops->open)
2472
                n->vm_ops->open(n);
2473
        vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
2474
        lock_vma_mappings(vma);
2475
        spin_lock(&mm->page_table_lock);
2476
        vma->vm_start = end;
2477
        __insert_vm_struct(mm, n);
2478
        spin_unlock(&mm->page_table_lock);
2479
        unlock_vma_mappings(vma);
2480
        return 0;
2481
}
2482
 
2483
static long madvise_fixup_end(struct vm_area_struct * vma,
2484
        unsigned long start, int behavior)
2485
{
2486
        struct vm_area_struct * n;
2487
        struct mm_struct * mm = vma->vm_mm;
2488
 
2489
        n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2490
        if (!n)
2491
                return -EAGAIN;
2492
        *n = *vma;
2493
        n->vm_start = start;
2494
        n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
2495
        setup_read_behavior(n, behavior);
2496
        n->vm_raend = 0;
2497
        if (n->vm_file)
2498
                get_file(n->vm_file);
2499
        if (n->vm_ops && n->vm_ops->open)
2500
                n->vm_ops->open(n);
2501
        lock_vma_mappings(vma);
2502
        spin_lock(&mm->page_table_lock);
2503
        vma->vm_end = start;
2504
        __insert_vm_struct(mm, n);
2505
        spin_unlock(&mm->page_table_lock);
2506
        unlock_vma_mappings(vma);
2507
        return 0;
2508
}
2509
 
2510
static long madvise_fixup_middle(struct vm_area_struct * vma,
2511
        unsigned long start, unsigned long end, int behavior)
2512
{
2513
        struct vm_area_struct * left, * right;
2514
        struct mm_struct * mm = vma->vm_mm;
2515
 
2516
        left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2517
        if (!left)
2518
                return -EAGAIN;
2519
        right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2520
        if (!right) {
2521
                kmem_cache_free(vm_area_cachep, left);
2522
                return -EAGAIN;
2523
        }
2524
        *left = *vma;
2525
        *right = *vma;
2526
        left->vm_end = start;
2527
        right->vm_start = end;
2528
        right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
2529
        left->vm_raend = 0;
2530
        right->vm_raend = 0;
2531
        if (vma->vm_file)
2532
                atomic_add(2, &vma->vm_file->f_count);
2533
 
2534
        if (vma->vm_ops && vma->vm_ops->open) {
2535
                vma->vm_ops->open(left);
2536
                vma->vm_ops->open(right);
2537
        }
2538
        vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
2539
        vma->vm_raend = 0;
2540
        lock_vma_mappings(vma);
2541
        spin_lock(&mm->page_table_lock);
2542
        vma->vm_start = start;
2543
        vma->vm_end = end;
2544
        setup_read_behavior(vma, behavior);
2545
        __insert_vm_struct(mm, left);
2546
        __insert_vm_struct(mm, right);
2547
        spin_unlock(&mm->page_table_lock);
2548
        unlock_vma_mappings(vma);
2549
        return 0;
2550
}
2551
 
2552
/*
2553
 * We can potentially split a vm area into separate
2554
 * areas, each area with its own behavior.
2555
 */
2556
static long madvise_behavior(struct vm_area_struct * vma,
2557
        unsigned long start, unsigned long end, int behavior)
2558
{
2559
        int error = 0;
2560
 
2561
        /* This caps the number of vma's this process can own */
2562
        if (vma->vm_mm->map_count > max_map_count)
2563
                return -ENOMEM;
2564
 
2565
        if (start == vma->vm_start) {
2566
                if (end == vma->vm_end) {
2567
                        setup_read_behavior(vma, behavior);
2568
                        vma->vm_raend = 0;
2569
                } else
2570
                        error = madvise_fixup_start(vma, end, behavior);
2571
        } else {
2572
                if (end == vma->vm_end)
2573
                        error = madvise_fixup_end(vma, start, behavior);
2574
                else
2575
                        error = madvise_fixup_middle(vma, start, end, behavior);
2576
        }
2577
 
2578
        return error;
2579
}
2580
 
2581
/*
2582
 * Schedule all required I/O operations, then run the disk queue
2583
 * to make sure they are started.  Do not wait for completion.
2584
 */
2585
static long madvise_willneed(struct vm_area_struct * vma,
2586
        unsigned long start, unsigned long end)
2587
{
2588
        long error = -EBADF;
2589
        struct file * file;
2590
        struct inode * inode;
2591
        unsigned long size, rlim_rss;
2592
 
2593
        /* Doesn't work if there's no mapped file. */
2594
        if (!vma->vm_file)
2595
                return error;
2596
        file = vma->vm_file;
2597
        inode = file->f_dentry->d_inode;
2598
        if (!inode->i_mapping->a_ops->readpage)
2599
                return error;
2600
        size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2601
 
2602
        start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2603
        if (end > vma->vm_end)
2604
                end = vma->vm_end;
2605
        end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2606
 
2607
        /* Make sure this doesn't exceed the process's max rss. */
2608
        error = -EIO;
2609
        rlim_rss = current->rlim ?  current->rlim[RLIMIT_RSS].rlim_cur :
2610
                                LONG_MAX; /* default: see resource.h */
2611
        if ((vma->vm_mm->rss + (end - start)) > rlim_rss)
2612
                return error;
2613
 
2614
        /* round to cluster boundaries if this isn't a "random" area. */
2615
        if (!VM_RandomReadHint(vma)) {
2616
                start = CLUSTER_OFFSET(start);
2617
                end = CLUSTER_OFFSET(end + CLUSTER_PAGES - 1);
2618
 
2619
                while ((start < end) && (start < size)) {
2620
                        error = read_cluster_nonblocking(file, start, size);
2621
                        start += CLUSTER_PAGES;
2622
                        if (error < 0)
2623
                                break;
2624
                }
2625
        } else {
2626
                while ((start < end) && (start < size)) {
2627
                        error = page_cache_read(file, start);
2628
                        start++;
2629
                        if (error < 0)
2630
                                break;
2631
                }
2632
        }
2633
 
2634
        /* Don't wait for someone else to push these requests. */
2635
        run_task_queue(&tq_disk);
2636
 
2637
        return error;
2638
}
2639
 
2640
/*
2641
 * Application no longer needs these pages.  If the pages are dirty,
2642
 * it's OK to just throw them away.  The app will be more careful about
2643
 * data it wants to keep.  Be sure to free swap resources too.  The
2644
 * zap_page_range call sets things up for refill_inactive to actually free
2645
 * these pages later if no one else has touched them in the meantime,
2646
 * although we could add these pages to a global reuse list for
2647
 * refill_inactive to pick up before reclaiming other pages.
2648
 *
2649
 * NB: This interface discards data rather than pushes it out to swap,
2650
 * as some implementations do.  This has performance implications for
2651
 * applications like large transactional databases which want to discard
2652
 * pages in anonymous maps after committing to backing store the data
2653
 * that was kept in them.  There is no reason to write this data out to
2654
 * the swap area if the application is discarding it.
2655
 *
2656
 * An interface that causes the system to free clean pages and flush
2657
 * dirty pages is already available as msync(MS_INVALIDATE).
2658
 */
2659
static long madvise_dontneed(struct vm_area_struct * vma,
2660
        unsigned long start, unsigned long end)
2661
{
2662
        if (vma->vm_flags & VM_LOCKED)
2663
                return -EINVAL;
2664
 
2665
        zap_page_range(vma->vm_mm, start, end - start);
2666
        return 0;
2667
}
2668
 
2669
static long madvise_vma(struct vm_area_struct * vma, unsigned long start,
2670
        unsigned long end, int behavior)
2671
{
2672
        long error = -EBADF;
2673
 
2674
        switch (behavior) {
2675
        case MADV_NORMAL:
2676
        case MADV_SEQUENTIAL:
2677
        case MADV_RANDOM:
2678
                error = madvise_behavior(vma, start, end, behavior);
2679
                break;
2680
 
2681
        case MADV_WILLNEED:
2682
                error = madvise_willneed(vma, start, end);
2683
                break;
2684
 
2685
        case MADV_DONTNEED:
2686
                error = madvise_dontneed(vma, start, end);
2687
                break;
2688
 
2689
        default:
2690
                error = -EINVAL;
2691
                break;
2692
        }
2693
 
2694
        return error;
2695
}
2696
 
2697
/*
2698
 * The madvise(2) system call.
2699
 *
2700
 * Applications can use madvise() to advise the kernel how it should
2701
 * handle paging I/O in this VM area.  The idea is to help the kernel
2702
 * use appropriate read-ahead and caching techniques.  The information
2703
 * provided is advisory only, and can be safely disregarded by the
2704
 * kernel without affecting the correct operation of the application.
2705
 *
2706
 * behavior values:
2707
 *  MADV_NORMAL - the default behavior is to read clusters.  This
2708
 *              results in some read-ahead and read-behind.
2709
 *  MADV_RANDOM - the system should read the minimum amount of data
2710
 *              on any access, since it is unlikely that the appli-
2711
 *              cation will need more than what it asks for.
2712
 *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
2713
 *              once, so they can be aggressively read ahead, and
2714
 *              can be freed soon after they are accessed.
2715
 *  MADV_WILLNEED - the application is notifying the system to read
2716
 *              some pages ahead.
2717
 *  MADV_DONTNEED - the application is finished with the given range,
2718
 *              so the kernel can free resources associated with it.
2719
 *
2720
 * return values:
2721
 *  zero    - success
2722
 *  -EINVAL - start + len < 0, start is not page-aligned,
2723
 *              "behavior" is not a valid value, or application
2724
 *              is attempting to release locked or shared pages.
2725
 *  -ENOMEM - addresses in the specified range are not currently
2726
 *              mapped, or are outside the AS of the process.
2727
 *  -EIO    - an I/O error occurred while paging in data.
2728
 *  -EBADF  - map exists, but area maps something that isn't a file.
2729
 *  -EAGAIN - a kernel resource was temporarily unavailable.
2730
 */
2731
asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior)
2732
{
2733
        unsigned long end;
2734
        struct vm_area_struct * vma;
2735
        int unmapped_error = 0;
2736
        int error = -EINVAL;
2737
 
2738
        down_write(&current->mm->mmap_sem);
2739
 
2740
        if (start & ~PAGE_MASK)
2741
                goto out;
2742
        len = (len + ~PAGE_MASK) & PAGE_MASK;
2743
        end = start + len;
2744
        if (end < start)
2745
                goto out;
2746
 
2747
        error = 0;
2748
        if (end == start)
2749
                goto out;
2750
 
2751
        /*
2752
         * If the interval [start,end) covers some unmapped address
2753
         * ranges, just ignore them, but return -ENOMEM at the end.
2754
         */
2755
        vma = find_vma(current->mm, start);
2756
        for (;;) {
2757
                /* Still start < end. */
2758
                error = -ENOMEM;
2759
                if (!vma)
2760
                        goto out;
2761
 
2762
                /* Here start < vma->vm_end. */
2763
                if (start < vma->vm_start) {
2764
                        unmapped_error = -ENOMEM;
2765
                        start = vma->vm_start;
2766
                }
2767
 
2768
                /* Here vma->vm_start <= start < vma->vm_end. */
2769
                if (end <= vma->vm_end) {
2770
                        if (start < end) {
2771
                                error = madvise_vma(vma, start, end,
2772
                                                        behavior);
2773
                                if (error)
2774
                                        goto out;
2775
                        }
2776
                        error = unmapped_error;
2777
                        goto out;
2778
                }
2779
 
2780
                /* Here vma->vm_start <= start < vma->vm_end < end. */
2781
                error = madvise_vma(vma, start, vma->vm_end, behavior);
2782
                if (error)
2783
                        goto out;
2784
                start = vma->vm_end;
2785
                vma = vma->vm_next;
2786
        }
2787
 
2788
out:
2789
        up_write(&current->mm->mmap_sem);
2790
        return error;
2791
}
2792
 
2793
/*
2794
 * Later we can get more picky about what "in core" means precisely.
2795
 * For now, simply check to see if the page is in the page cache,
2796
 * and is up to date; i.e. that no page-in operation would be required
2797
 * at this time if an application were to map and access this page.
2798
 */
2799
static unsigned char mincore_page(struct vm_area_struct * vma,
2800
        unsigned long pgoff)
2801
{
2802
        unsigned char present = 0;
2803
        struct address_space * as = vma->vm_file->f_dentry->d_inode->i_mapping;
2804
        struct page * page, ** hash = page_hash(as, pgoff);
2805
 
2806
        spin_lock(&pagecache_lock);
2807
        page = __find_page_nolock(as, pgoff, *hash);
2808
        if ((page) && (Page_Uptodate(page)))
2809
                present = 1;
2810
        spin_unlock(&pagecache_lock);
2811
 
2812
        return present;
2813
}
2814
 
2815
static long mincore_vma(struct vm_area_struct * vma,
2816
        unsigned long start, unsigned long end, unsigned char * vec)
2817
{
2818
        long error, i, remaining;
2819
        unsigned char * tmp;
2820
 
2821
        error = -ENOMEM;
2822
        if (!vma->vm_file)
2823
                return error;
2824
 
2825
        start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2826
        if (end > vma->vm_end)
2827
                end = vma->vm_end;
2828
        end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2829
 
2830
        error = -EAGAIN;
2831
        tmp = (unsigned char *) __get_free_page(GFP_KERNEL);
2832
        if (!tmp)
2833
                return error;
2834
 
2835
        /* (end - start) is # of pages, and also # of bytes in "vec */
2836
        remaining = (end - start),
2837
 
2838
        error = 0;
2839
        for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) {
2840
                int j = 0;
2841
                long thispiece = (remaining < PAGE_SIZE) ?
2842
                                                remaining : PAGE_SIZE;
2843
 
2844
                while (j < thispiece)
2845
                        tmp[j++] = mincore_page(vma, start++);
2846
 
2847
                if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) {
2848
                        error = -EFAULT;
2849
                        break;
2850
                }
2851
        }
2852
 
2853
        free_page((unsigned long) tmp);
2854
        return error;
2855
}
2856
 
2857
/*
2858
 * The mincore(2) system call.
2859
 *
2860
 * mincore() returns the memory residency status of the pages in the
2861
 * current process's address space specified by [addr, addr + len).
2862
 * The status is returned in a vector of bytes.  The least significant
2863
 * bit of each byte is 1 if the referenced page is in memory, otherwise
2864
 * it is zero.
2865
 *
2866
 * Because the status of a page can change after mincore() checks it
2867
 * but before it returns to the application, the returned vector may
2868
 * contain stale information.  Only locked pages are guaranteed to
2869
 * remain in memory.
2870
 *
2871
 * return values:
2872
 *  zero    - success
2873
 *  -EFAULT - vec points to an illegal address
2874
 *  -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE,
2875
 *              or len has a nonpositive value
2876
 *  -ENOMEM - Addresses in the range [addr, addr + len] are
2877
 *              invalid for the address space of this process, or
2878
 *              specify one or more pages which are not currently
2879
 *              mapped
2880
 *  -EAGAIN - A kernel resource was temporarily unavailable.
2881
 */
2882
asmlinkage long sys_mincore(unsigned long start, size_t len,
2883
        unsigned char * vec)
2884
{
2885
        int index = 0;
2886
        unsigned long end;
2887
        struct vm_area_struct * vma;
2888
        int unmapped_error = 0;
2889
        long error = -EINVAL;
2890
 
2891
        down_read(&current->mm->mmap_sem);
2892
 
2893
        if (start & ~PAGE_CACHE_MASK)
2894
                goto out;
2895
        len = (len + ~PAGE_CACHE_MASK) & PAGE_CACHE_MASK;
2896
        end = start + len;
2897
        if (end < start)
2898
                goto out;
2899
 
2900
        error = 0;
2901
        if (end == start)
2902
                goto out;
2903
 
2904
        /*
2905
         * If the interval [start,end) covers some unmapped address
2906
         * ranges, just ignore them, but return -ENOMEM at the end.
2907
         */
2908
        vma = find_vma(current->mm, start);
2909
        for (;;) {
2910
                /* Still start < end. */
2911
                error = -ENOMEM;
2912
                if (!vma)
2913
                        goto out;
2914
 
2915
                /* Here start < vma->vm_end. */
2916
                if (start < vma->vm_start) {
2917
                        unmapped_error = -ENOMEM;
2918
                        start = vma->vm_start;
2919
                }
2920
 
2921
                /* Here vma->vm_start <= start < vma->vm_end. */
2922
                if (end <= vma->vm_end) {
2923
                        if (start < end) {
2924
                                error = mincore_vma(vma, start, end,
2925
                                                        &vec[index]);
2926
                                if (error)
2927
                                        goto out;
2928
                        }
2929
                        error = unmapped_error;
2930
                        goto out;
2931
                }
2932
 
2933
                /* Here vma->vm_start <= start < vma->vm_end < end. */
2934
                error = mincore_vma(vma, start, vma->vm_end, &vec[index]);
2935
                if (error)
2936
                        goto out;
2937
                index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT;
2938
                start = vma->vm_end;
2939
                vma = vma->vm_next;
2940
        }
2941
 
2942
out:
2943
        up_read(&current->mm->mmap_sem);
2944
        return error;
2945
}
2946
 
2947
static inline
2948
struct page *__read_cache_page(struct address_space *mapping,
2949
                                unsigned long index,
2950
                                int (*filler)(void *,struct page*),
2951
                                void *data)
2952
{
2953
        struct page **hash = page_hash(mapping, index);
2954
        struct page *page, *cached_page = NULL;
2955
        int err;
2956
repeat:
2957
        page = __find_get_page(mapping, index, hash);
2958
        if (!page) {
2959
                if (!cached_page) {
2960
                        cached_page = page_cache_alloc(mapping);
2961
                        if (!cached_page)
2962
                                return ERR_PTR(-ENOMEM);
2963
                }
2964
                page = cached_page;
2965
                if (add_to_page_cache_unique(page, mapping, index, hash))
2966
                        goto repeat;
2967
                cached_page = NULL;
2968
                err = filler(data, page);
2969
                if (err < 0) {
2970
                        page_cache_release(page);
2971
                        page = ERR_PTR(err);
2972
                }
2973
        }
2974
        if (cached_page)
2975
                page_cache_release(cached_page);
2976
        return page;
2977
}
2978
 
2979
/*
2980
 * Read into the page cache. If a page already exists,
2981
 * and Page_Uptodate() is not set, try to fill the page.
2982
 */
2983
struct page *read_cache_page(struct address_space *mapping,
2984
                                unsigned long index,
2985
                                int (*filler)(void *,struct page*),
2986
                                void *data)
2987
{
2988
        struct page *page;
2989
        int err;
2990
 
2991
retry:
2992
        page = __read_cache_page(mapping, index, filler, data);
2993
        if (IS_ERR(page))
2994
                goto out;
2995
        mark_page_accessed(page);
2996
        if (Page_Uptodate(page))
2997
                goto out;
2998
 
2999
        lock_page(page);
3000
        if (!page->mapping) {
3001
                UnlockPage(page);
3002
                page_cache_release(page);
3003
                goto retry;
3004
        }
3005
        if (Page_Uptodate(page)) {
3006
                UnlockPage(page);
3007
                goto out;
3008
        }
3009
        err = filler(data, page);
3010
        if (err < 0) {
3011
                page_cache_release(page);
3012
                page = ERR_PTR(err);
3013
        }
3014
 out:
3015
        return page;
3016
}
3017
 
3018
static inline struct page * __grab_cache_page(struct address_space *mapping,
3019
                                unsigned long index, struct page **cached_page)
3020
{
3021
        struct page *page, **hash = page_hash(mapping, index);
3022
repeat:
3023
        page = __find_lock_page(mapping, index, hash);
3024
        if (!page) {
3025
                if (!*cached_page) {
3026
                        *cached_page = page_cache_alloc(mapping);
3027
                        if (!*cached_page)
3028
                                return NULL;
3029
                }
3030
                page = *cached_page;
3031
                if (add_to_page_cache_unique(page, mapping, index, hash))
3032
                        goto repeat;
3033
                *cached_page = NULL;
3034
        }
3035
        return page;
3036
}
3037
 
3038
inline void remove_suid(struct inode *inode)
3039
{
3040
        unsigned int mode;
3041
 
3042
        /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
3043
        mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
3044
 
3045
        /* was any of the uid bits set? */
3046
        mode &= inode->i_mode;
3047
        if (mode && !capable(CAP_FSETID)) {
3048
                inode->i_mode &= ~mode;
3049
                mark_inode_dirty(inode);
3050
        }
3051
}
3052
 
3053
/*
3054
 * precheck_file_write():
3055
 * Check the conditions on a file descriptor prior to beginning a write
3056
 * on it.  Contains the common precheck code for both buffered and direct
3057
 * IO.
3058
 */
3059
int precheck_file_write(struct file *file, struct inode *inode,
3060
                        size_t *count, loff_t *ppos)
3061
{
3062
        ssize_t         err;
3063
        unsigned long   limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
3064
        loff_t          pos = *ppos;
3065
 
3066
        err = -EINVAL;
3067
        if (pos < 0)
3068
                goto out;
3069
 
3070
        err = file->f_error;
3071
        if (err) {
3072
                file->f_error = 0;
3073
                goto out;
3074
        }
3075
 
3076
        /* FIXME: this is for backwards compatibility with 2.4 */
3077
        if (!S_ISBLK(inode->i_mode) && (file->f_flags & O_APPEND))
3078
                *ppos = pos = inode->i_size;
3079
 
3080
        /*
3081
         * Check whether we've reached the file size limit.
3082
         */
3083
        err = -EFBIG;
3084
 
3085
        if (!S_ISBLK(inode->i_mode) && limit != RLIM_INFINITY) {
3086
                if (pos >= limit) {
3087
                        send_sig(SIGXFSZ, current, 0);
3088
                        goto out;
3089
                }
3090
                if (pos > 0xFFFFFFFFULL || *count > limit - (u32)pos) {
3091
                        /* send_sig(SIGXFSZ, current, 0); */
3092
                        *count = limit - (u32)pos;
3093
                }
3094
        }
3095
 
3096
        /*
3097
         *      LFS rule
3098
         */
3099
        if ( pos + *count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
3100
                if (pos >= MAX_NON_LFS) {
3101
                        send_sig(SIGXFSZ, current, 0);
3102
                        goto out;
3103
                }
3104
                if (*count > MAX_NON_LFS - (u32)pos) {
3105
                        /* send_sig(SIGXFSZ, current, 0); */
3106
                        *count = MAX_NON_LFS - (u32)pos;
3107
                }
3108
        }
3109
 
3110
        /*
3111
         *      Are we about to exceed the fs block limit ?
3112
         *
3113
         *      If we have written data it becomes a short write
3114
         *      If we have exceeded without writing data we send
3115
         *      a signal and give them an EFBIG.
3116
         *
3117
         *      Linus frestrict idea will clean these up nicely..
3118
         */
3119
 
3120
        if (!S_ISBLK(inode->i_mode)) {
3121
                if (pos >= inode->i_sb->s_maxbytes)
3122
                {
3123
                        if (*count || pos > inode->i_sb->s_maxbytes) {
3124
                                send_sig(SIGXFSZ, current, 0);
3125
                                err = -EFBIG;
3126
                                goto out;
3127
                        }
3128
                        /* zero-length writes at ->s_maxbytes are OK */
3129
                }
3130
 
3131
                if (pos + *count > inode->i_sb->s_maxbytes)
3132
                        *count = inode->i_sb->s_maxbytes - pos;
3133
        } else {
3134
                if (is_read_only(inode->i_rdev)) {
3135
                        err = -EPERM;
3136
                        goto out;
3137
                }
3138
                if (pos >= inode->i_size) {
3139
                        if (*count || pos > inode->i_size) {
3140
                                err = -ENOSPC;
3141
                                goto out;
3142
                        }
3143
                }
3144
 
3145
                if (pos + *count > inode->i_size)
3146
                        *count = inode->i_size - pos;
3147
        }
3148
 
3149
        err = 0;
3150
out:
3151
        return err;
3152
}
3153
 
3154
/*
3155
 * Write to a file through the page cache.
3156
 *
3157
 * We currently put everything into the page cache prior to writing it.
3158
 * This is not a problem when writing full pages. With partial pages,
3159
 * however, we first have to read the data into the cache, then
3160
 * dirty the page, and finally schedule it for writing. Alternatively, we
3161
 * could write-through just the portion of data that would go into that
3162
 * page, but that would kill performance for applications that write data
3163
 * line by line, and it's prone to race conditions.
3164
 *
3165
 * Note that this routine doesn't try to keep track of dirty pages. Each
3166
 * file system has to do this all by itself, unfortunately.
3167
 *                                                      okir@monad.swb.de
3168
 */
3169
ssize_t
3170
do_generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
3171
{
3172
        struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
3173
        struct inode    *inode = mapping->host;
3174
        loff_t          pos;
3175
        struct page     *page, *cached_page;
3176
        ssize_t         written;
3177
        long            status = 0;
3178
        ssize_t         err;
3179
        unsigned        bytes;
3180
 
3181
        cached_page = NULL;
3182
        pos = *ppos;
3183
        written = 0;
3184
 
3185
        err = precheck_file_write(file, inode, &count, &pos);
3186
        if (err != 0 || count == 0)
3187
                goto out;
3188
 
3189
        remove_suid(inode);
3190
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
3191
        mark_inode_dirty_sync(inode);
3192
 
3193
        do {
3194
                unsigned long index, offset;
3195
                long page_fault;
3196
                char *kaddr;
3197
 
3198
                /*
3199
                 * Try to find the page in the cache. If it isn't there,
3200
                 * allocate a free page.
3201
                 */
3202
                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
3203
                index = pos >> PAGE_CACHE_SHIFT;
3204
                bytes = PAGE_CACHE_SIZE - offset;
3205
                if (bytes > count)
3206
                        bytes = count;
3207
 
3208
                /*
3209
                 * Bring in the user page that we will copy from _first_.
3210
                 * Otherwise there's a nasty deadlock on copying from the
3211
                 * same page as we're writing to, without it being marked
3212
                 * up-to-date.
3213
                 */
3214
                { volatile unsigned char dummy;
3215
                        __get_user(dummy, buf);
3216
                        __get_user(dummy, buf+bytes-1);
3217
                }
3218
 
3219
                status = -ENOMEM;       /* we'll assign it later anyway */
3220
                page = __grab_cache_page(mapping, index, &cached_page);
3221
                if (!page)
3222
                        break;
3223
 
3224
                /* We have exclusive IO access to the page.. */
3225
                if (!PageLocked(page)) {
3226
                        PAGE_BUG(page);
3227
                }
3228
 
3229
                kaddr = kmap(page);
3230
                status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
3231
                if (status)
3232
                        goto sync_failure;
3233
                page_fault = __copy_from_user(kaddr+offset, buf, bytes);
3234
                flush_dcache_page(page);
3235
                status = mapping->a_ops->commit_write(file, page, offset, offset+bytes);
3236
                if (page_fault)
3237
                        goto fail_write;
3238
                if (!status)
3239
                        status = bytes;
3240
 
3241
                if (status >= 0) {
3242
                        written += status;
3243
                        count -= status;
3244
                        pos += status;
3245
                        buf += status;
3246
                }
3247
unlock:
3248
                kunmap(page);
3249
                /* Mark it unlocked again and drop the page.. */
3250
                SetPageReferenced(page);
3251
                UnlockPage(page);
3252
                page_cache_release(page);
3253
 
3254
                if (status < 0)
3255
                        break;
3256
        } while (count);
3257
done:
3258
        *ppos = pos;
3259
 
3260
        if (cached_page)
3261
                page_cache_release(cached_page);
3262
 
3263
        /* For now, when the user asks for O_SYNC, we'll actually
3264
         * provide O_DSYNC. */
3265
        if (status >= 0) {
3266
                if ((file->f_flags & O_SYNC) || IS_SYNC(inode))
3267
                        status = generic_osync_inode(inode, OSYNC_METADATA|OSYNC_DATA);
3268
        }
3269
 
3270
        err = written ? written : status;
3271
out:
3272
 
3273
        return err;
3274
fail_write:
3275
        status = -EFAULT;
3276
        goto unlock;
3277
 
3278
sync_failure:
3279
        /*
3280
         * If blocksize < pagesize, prepare_write() may have instantiated a
3281
         * few blocks outside i_size.  Trim these off again.
3282
         */
3283
        kunmap(page);
3284
        UnlockPage(page);
3285
        page_cache_release(page);
3286
        if (pos + bytes > inode->i_size)
3287
                vmtruncate(inode, inode->i_size);
3288
        goto done;
3289
}
3290
 
3291
ssize_t
3292
do_generic_direct_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
3293
{
3294
        struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
3295
        struct inode    *inode = mapping->host;
3296
        loff_t          pos;
3297
        ssize_t         written;
3298
        long            status = 0;
3299
        ssize_t         err;
3300
 
3301
        pos = *ppos;
3302
        written = 0;
3303
 
3304
        err = precheck_file_write(file, inode, &count, &pos);
3305
        if (err != 0 || count == 0)
3306
                goto out;
3307
 
3308
        if (!(file->f_flags & O_DIRECT))
3309
                BUG();
3310
 
3311
        remove_suid(inode);
3312
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
3313
        mark_inode_dirty_sync(inode);
3314
 
3315
        written = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos);
3316
        if (written > 0) {
3317
                loff_t end = pos + written;
3318
                if (end > inode->i_size && !S_ISBLK(inode->i_mode)) {
3319
                        inode->i_size = end;
3320
                        mark_inode_dirty(inode);
3321
                }
3322
                *ppos = end;
3323
                invalidate_inode_pages2(mapping);
3324
        }
3325
        /*
3326
         * Sync the fs metadata but not the minor inode changes and
3327
         * of course not the data as we did direct DMA for the IO.
3328
         */
3329
        if (written >= 0 && (file->f_flags & O_SYNC))
3330
                status = generic_osync_inode(inode, OSYNC_METADATA);
3331
 
3332
        err = written ? written : status;
3333
out:
3334
        return err;
3335
}
3336
 
3337
static int do_odirect_fallback(struct file *file, struct inode *inode,
3338
                               const char *buf, size_t count, loff_t *ppos)
3339
{
3340
        ssize_t ret;
3341
        int err;
3342
 
3343
        down(&inode->i_sem);
3344
        ret = do_generic_file_write(file, buf, count, ppos);
3345
        if (ret > 0) {
3346
                err = do_fdatasync(file);
3347
                if (err)
3348
                        ret = err;
3349
        }
3350
        up(&inode->i_sem);
3351
        return ret;
3352
}
3353
 
3354
ssize_t
3355
generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
3356
{
3357
        struct inode    *inode = file->f_dentry->d_inode->i_mapping->host;
3358
        ssize_t         err;
3359
 
3360
        if ((ssize_t) count < 0)
3361
                return -EINVAL;
3362
 
3363
        if (!access_ok(VERIFY_READ, buf, count))
3364
                return -EFAULT;
3365
 
3366
        if (file->f_flags & O_DIRECT) {
3367
                /* do_generic_direct_write may drop i_sem during the
3368
                   actual IO */
3369
                down_read(&inode->i_alloc_sem);
3370
                down(&inode->i_sem);
3371
                err = do_generic_direct_write(file, buf, count, ppos);
3372
                up(&inode->i_sem);
3373
                up_read(&inode->i_alloc_sem);
3374
                if (unlikely(err == -ENOTBLK))
3375
                        err = do_odirect_fallback(file, inode, buf, count, ppos);
3376
        } else {
3377
                down(&inode->i_sem);
3378
                err = do_generic_file_write(file, buf, count, ppos);
3379
                up(&inode->i_sem);
3380
        }
3381
 
3382
        return err;
3383
}
3384
 
3385
void __init page_cache_init(unsigned long mempages)
3386
{
3387
        unsigned long htable_size, order;
3388
 
3389
        htable_size = mempages;
3390
        htable_size *= sizeof(struct page *);
3391
        for(order = 0; (PAGE_SIZE << order) < htable_size; order++)
3392
                ;
3393
 
3394
        do {
3395
                unsigned long tmp = (PAGE_SIZE << order) / sizeof(struct page *);
3396
 
3397
                page_hash_bits = 0;
3398
                while((tmp >>= 1UL) != 0UL)
3399
                        page_hash_bits++;
3400
 
3401
                page_hash_table = (struct page **)
3402
                        __get_free_pages(GFP_ATOMIC, order);
3403
        } while(page_hash_table == NULL && --order > 0);
3404
 
3405
        printk("Page-cache hash table entries: %d (order: %ld, %ld bytes)\n",
3406
               (1 << page_hash_bits), order, (PAGE_SIZE << order));
3407
        if (!page_hash_table)
3408
                panic("Failed to allocate page hash table\n");
3409
        memset((void *)page_hash_table, 0, PAGE_HASH_SIZE * sizeof(struct page *));
3410
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.