OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [mmnommu/] [filemap.c] - Blame information for rev 1777

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1634 jcastillo
/*
2
 *      linux/mm/filemap.c
3
 *
4
 * Copyright (C) 1994, 1995  Linus Torvalds
5
 *
6
 * uClinux revisions
7
 * Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
8
 *                     The Silver Hammer Group, Ltd.
9
 * Copyright (C) 1999  D. Jeff Dionne <jeff@uclinux.org>,
10
 *                     Rt-Control, Inc.
11
 */
12
 
13
/*
14
 * This file handles the generic file mmap semantics used by
15
 * most "normal" filesystems (but you don't /have/ to use this:
16
 * the NFS filesystem does this differently, for example)
17
 */
18
#include <linux/config.h> /* CONFIG_READA_SMALL */
19
#include <linux/stat.h>
20
#include <linux/sched.h>
21
#include <linux/kernel.h>
22
#include <linux/mm.h>
23
#include <linux/shm.h>
24
#include <linux/errno.h>
25
#include <linux/mman.h>
26
#include <linux/string.h>
27
#include <linux/malloc.h>
28
#include <linux/fs.h>
29
#include <linux/locks.h>
30
#include <linux/pagemap.h>
31
#include <linux/swap.h>
32
 
33
#include <asm/segment.h>
34
#include <asm/system.h>
35
#include <asm/pgtable.h>
36
 
37
/*
38
 * Shared mappings implemented 30.11.1994. It's not fully working yet,
39
 * though.
40
 *
41
 * Shared mappings now work. 15.8.1995  Bruno.
42
 */
43
 
44
unsigned long page_cache_size = 0;
45
struct page * page_hash_table[PAGE_HASH_SIZE];
46
 
47
/*
48
 * Simple routines for both non-shared and shared mappings.
49
 */
50
 
51
#define release_page(page) __free_page((page))
52
 
53
/*
54
 * Invalidate the pages of an inode, removing all pages that aren't
55
 * locked down (those are sure to be up-to-date anyway, so we shouldn't
56
 * invalidate them).
57
 */
58
void invalidate_inode_pages(struct inode * inode)
59
{
60
        struct page ** p;
61
        struct page * page;
62
 
63
        p = &inode->i_pages;
64
        while ((page = *p) != NULL) {
65
                if (PageLocked(page)) {
66
                        p = &page->next;
67
                        continue;
68
                }
69
                inode->i_nrpages--;
70
                if ((*p = page->next) != NULL)
71
                        (*p)->prev = page->prev;
72
                page->dirty = 0;
73
                page->next = NULL;
74
                page->prev = NULL;
75
                remove_page_from_hash_queue(page);
76
                page->inode = NULL;
77
                __free_page(page);
78
                continue;
79
        }
80
}
81
 
82
/*
83
 * Truncate the page cache at a set offset, removing the pages
84
 * that are beyond that offset (and zeroing out partial pages).
85
 */
86
void truncate_inode_pages(struct inode * inode, unsigned long start)
87
{
88
        struct page ** p;
89
        struct page * page;
90
 
91
repeat:
92
        p = &inode->i_pages;
93
        while ((page = *p) != NULL) {
94
                unsigned long offset = page->offset;
95
 
96
                /* page wholly truncated - free it */
97
                if (offset >= start) {
98
                        if (PageLocked(page)) {
99
                                __wait_on_page(page);
100
                                goto repeat;
101
                        }
102
                        inode->i_nrpages--;
103
                        if ((*p = page->next) != NULL)
104
                                (*p)->prev = page->prev;
105
                        page->dirty = 0;
106
                        page->next = NULL;
107
                        page->prev = NULL;
108
                        remove_page_from_hash_queue(page);
109
                        page->inode = NULL;
110
                        __free_page(page);
111
                        continue;
112
                }
113
                p = &page->next;
114
                offset = start - offset;
115
                /* partial truncate, clear end of page */
116
                if (offset < PAGE_SIZE) {
117
                        unsigned long address = page_address(page);
118
                        memset((void *) (offset + address), 0, PAGE_SIZE - offset);
119
                        flush_page_to_ram(address);
120
                }
121
        }
122
}
123
 
124
/*
125
 * This is called from try_to_swap_out() when we try to get rid of some
126
 * pages..  If we're unmapping the last occurrence of this page, we also
127
 * free it from the page hash-queues etc, as we don't want to keep it
128
 * in-core unnecessarily.
129
 */
130
unsigned long page_unuse(unsigned long page)
131
{
132
        struct page * p = mem_map + MAP_NR(page);
133
        int count = p->count;
134
 
135
        if (count != 2)
136
                return count;
137
        if (!p->inode)
138
                return count;
139
        remove_page_from_hash_queue(p);
140
        remove_page_from_inode_queue(p);
141
        free_page(page);
142
        return 1;
143
}
144
 
145
/*
146
 * Update a page cache copy, when we're doing a "write()" system call
147
 * See also "update_vm_cache()".
148
 */
149
void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
150
{
151
        unsigned long offset, len;
152
 
153
        offset = (pos & ~PAGE_MASK);
154
        pos = pos & PAGE_MASK;
155
        len = PAGE_SIZE - offset;
156
        do {
157
                struct page * page;
158
 
159
                if (len > count)
160
                        len = count;
161
                page = find_page(inode, pos);
162
                if (page) {
163
                        wait_on_page(page);
164
                        memcpy((void *) (offset + page_address(page)), buf, len);
165
                        release_page(page);
166
                }
167
                count -= len;
168
                buf += len;
169
                len = PAGE_SIZE;
170
                offset = 0;
171
                pos += PAGE_SIZE;
172
        } while (count);
173
}
174
 
175
static inline void add_to_page_cache(struct page * page,
176
        struct inode * inode, unsigned long offset,
177
        struct page **hash)
178
{
179
        page->count++;
180
        page->flags &= ~((1 << PG_uptodate) | (1 << PG_error));
181
        page->offset = offset;
182
        add_page_to_inode_queue(inode, page);
183
        __add_page_to_hash_queue(page, hash);
184
}
185
 
186
/*
187
 * Try to read ahead in the file. "page_cache" is a potentially free page
188
 * that we could use for the cache (if it is 0 we can try to create one,
189
 * this is all overlapped with the IO on the previous page finishing anyway)
190
 */
191
static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
192
{
193
        struct page * page;
194
        struct page ** hash;
195
 
196
        offset &= PAGE_MASK;
197
        switch (page_cache) {
198
        case 0:
199
                page_cache = __get_free_page(GFP_KERNEL);
200
                if (!page_cache)
201
                        break;
202
        default:
203
                if (offset >= inode->i_size)
204
                        break;
205
                hash = page_hash(inode, offset);
206
                page = __find_page(inode, offset, *hash);
207
                if (!page) {
208
                        /*
209
                         * Ok, add the new page to the hash-queues...
210
                         */
211
                        page = mem_map + MAP_NR(page_cache);
212
                        add_to_page_cache(page, inode, offset, hash);
213
                        inode->i_op->readpage(inode, page);
214
                        page_cache = 0;
215
                }
216
                release_page(page);
217
        }
218
        return page_cache;
219
}
220
 
221
/*
222
 * Wait for IO to complete on a locked page.
223
 *
224
 * This must be called with the caller "holding" the page,
225
 * ie with increased "page->count" so that the page won't
226
 * go away during the wait..
227
 */
228
void __wait_on_page(struct page *page)
229
{
230
        struct wait_queue wait = { current, NULL };
231
 
232
        add_wait_queue(&page->wait, &wait);
233
repeat:
234
        run_task_queue(&tq_disk);
235
        current->state = TASK_UNINTERRUPTIBLE;
236
        if (PageLocked(page)) {
237
                schedule();
238
                goto repeat;
239
        }
240
        remove_wait_queue(&page->wait, &wait);
241
        current->state = TASK_RUNNING;
242
}
243
 
244
#if 0
245
#define PROFILE_READAHEAD
246
#define DEBUG_READAHEAD
247
#endif
248
 
249
/*
250
 * Read-ahead profiling information
251
 * --------------------------------
252
 * Every PROFILE_MAXREADCOUNT, the following information is written
253
 * to the syslog:
254
 *   Percentage of asynchronous read-ahead.
255
 *   Average of read-ahead fields context value.
256
 * If DEBUG_READAHEAD is defined, a snapshot of these fields is written
257
 * to the syslog.
258
 */
259
 
260
#ifdef PROFILE_READAHEAD
261
 
262
#define PROFILE_MAXREADCOUNT 1000
263
 
264
static unsigned long total_reada;
265
static unsigned long total_async;
266
static unsigned long total_ramax;
267
static unsigned long total_ralen;
268
static unsigned long total_rawin;
269
 
270
static void profile_readahead(int async, struct file *filp)
271
{
272
        unsigned long flags;
273
 
274
        ++total_reada;
275
        if (async)
276
                ++total_async;
277
 
278
        total_ramax     += filp->f_ramax;
279
        total_ralen     += filp->f_ralen;
280
        total_rawin     += filp->f_rawin;
281
 
282
        if (total_reada > PROFILE_MAXREADCOUNT) {
283
                save_flags(flags);
284
                cli();
285
                if (!(total_reada > PROFILE_MAXREADCOUNT)) {
286
                        restore_flags(flags);
287
                        return;
288
                }
289
 
290
                printk("Readahead average:  max=%ld, len=%ld, win=%ld, async=%ld%%\n",
291
                        total_ramax/total_reada,
292
                        total_ralen/total_reada,
293
                        total_rawin/total_reada,
294
                        (total_async*100)/total_reada);
295
#ifdef DEBUG_READAHEAD
296
                printk("Readahead snapshot: max=%ld, len=%ld, win=%ld, raend=%ld\n",
297
                        filp->f_ramax, filp->f_ralen, filp->f_rawin, filp->f_raend);
298
#endif
299
 
300
                total_reada     = 0;
301
                total_async     = 0;
302
                total_ramax     = 0;
303
                total_ralen     = 0;
304
                total_rawin     = 0;
305
 
306
                restore_flags(flags);
307
        }
308
}
309
#endif  /* defined PROFILE_READAHEAD */
310
 
311
/*
312
 * Read-ahead context:
313
 * -------------------
314
 * The read ahead context fields of the "struct file" are the following:
315
 * - f_raend : position of the first byte after the last page we tried to
316
 *             read ahead.
317
 * - f_ramax : current read-ahead maximum size.
318
 * - f_ralen : length of the current IO read block we tried to read-ahead.
319
 * - f_rawin : length of the current read-ahead window.
320
 *             if last read-ahead was synchronous then
321
 *                  f_rawin = f_ralen
322
 *             otherwise (was asynchronous)
323
 *                  f_rawin = previous value of f_ralen + f_ralen
324
 *
325
 * Read-ahead limits:
326
 * ------------------
327
 * MIN_READAHEAD   : minimum read-ahead size when read-ahead.
328
 * MAX_READAHEAD   : maximum read-ahead size when read-ahead.
329
 *
330
 * Synchronous read-ahead benefits:
331
 * --------------------------------
332
 * Using reasonable IO xfer length from peripheral devices increase system
333
 * performances.
334
 * Reasonable means, in this context, not too large but not too small.
335
 * The actual maximum value is:
336
 *      MAX_READAHEAD + PAGE_SIZE = 76k is CONFIG_READA_SMALL is undefined
337
 *      and 32K if defined (4K page size assumed).
338
 *
339
 * Asynchronous read-ahead benefits:
340
 * ---------------------------------
341
 * Overlapping next read request and user process execution increase system
342
 * performance.
343
 *
344
 * Read-ahead risks:
345
 * -----------------
346
 * We have to guess which further data are needed by the user process.
347
 * If these data are often not really needed, it's bad for system
348
 * performances.
349
 * However, we know that files are often accessed sequentially by
350
 * application programs and it seems that it is possible to have some good
351
 * strategy in that guessing.
352
 * We only try to read-ahead files that seems to be read sequentially.
353
 *
354
 * Asynchronous read-ahead risks:
355
 * ------------------------------
356
 * In order to maximize overlapping, we must start some asynchronous read
357
 * request from the device, as soon as possible.
358
 * We must be very careful about:
359
 * - The number of effective pending IO read requests.
360
 *   ONE seems to be the only reasonable value.
361
 * - The total memory pool usage for the file access stream.
362
 *   This maximum memory usage is implicitly 2 IO read chunks:
363
 *   2*(MAX_READAHEAD + PAGE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
364
 *   64k if defined (4K page size assumed).
365
 */
366
 
367
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
368
 
369
#ifdef CONFIG_READA_SMALL  /* small readahead */
370
#define MAX_READAHEAD PageAlignSize(4096*7)
371
#define MIN_READAHEAD PageAlignSize(4096*2)
372
#else /* large readahead */
373
#define MAX_READAHEAD PageAlignSize(4096*18)
374
#define MIN_READAHEAD PageAlignSize(4096*3)
375
#endif
376
 
377
static inline unsigned long generic_file_readahead(int reada_ok, struct file * filp, struct inode * inode,
378
        unsigned long ppos, struct page * page,
379
        unsigned long page_cache)
380
{
381
        unsigned long max_ahead, ahead;
382
        unsigned long raend;
383
 
384
        raend = filp->f_raend & PAGE_MASK;
385
        max_ahead = 0;
386
 
387
/*
388
 * The current page is locked.
389
 * If the current position is inside the previous read IO request, do not
390
 * try to reread previously read ahead pages.
391
 * Otherwise decide or not to read ahead some pages synchronously.
392
 * If we are not going to read ahead, set the read ahead context for this
393
 * page only.
394
 */
395
        if (PageLocked(page)) {
396
                if (!filp->f_ralen || ppos >= raend || ppos + filp->f_ralen < raend) {
397
                        raend = ppos;
398
                        if (raend < inode->i_size)
399
                                max_ahead = filp->f_ramax;
400
                        filp->f_rawin = 0;
401
                        filp->f_ralen = PAGE_SIZE;
402
                        if (!max_ahead) {
403
                                filp->f_raend  = ppos + filp->f_ralen;
404
                                filp->f_rawin += filp->f_ralen;
405
                        }
406
                }
407
        }
408
/*
409
 * The current page is not locked.
410
 * If we were reading ahead and,
411
 * if the current max read ahead size is not zero and,
412
 * if the current position is inside the last read-ahead IO request,
413
 *   it is the moment to try to read ahead asynchronously.
414
 * We will later force unplug device in order to force asynchronous read IO.
415
 */
416
        else if (reada_ok && filp->f_ramax && raend >= PAGE_SIZE &&
417
                 ppos <= raend && ppos + filp->f_ralen >= raend) {
418
/*
419
 * Add ONE page to max_ahead in order to try to have about the same IO max size
420
 * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_SIZE.
421
 * Compute the position of the last page we have tried to read in order to
422
 * begin to read ahead just at the next page.
423
 */
424
                raend -= PAGE_SIZE;
425
                if (raend < inode->i_size)
426
                        max_ahead = filp->f_ramax + PAGE_SIZE;
427
 
428
                if (max_ahead) {
429
                        filp->f_rawin = filp->f_ralen;
430
                        filp->f_ralen = 0;
431
                        reada_ok      = 2;
432
                }
433
        }
434
/*
435
 * Try to read ahead pages.
436
 * We hope that ll_rw_blk() plug/unplug, coalescence, requests sort and the
437
 * scheduler, will work enough for us to avoid too bad actuals IO requests.
438
 */
439
        ahead = 0;
440
        while (ahead < max_ahead) {
441
                ahead += PAGE_SIZE;
442
                page_cache = try_to_read_ahead(inode, raend + ahead, page_cache);
443
        }
444
/*
445
 * If we tried to read ahead some pages,
446
 * If we tried to read ahead asynchronously,
447
 *   Try to force unplug of the device in order to start an asynchronous
448
 *   read IO request.
449
 * Update the read-ahead context.
450
 * Store the length of the current read-ahead window.
451
 * Double the current max read ahead size.
452
 *   That heuristic avoid to do some large IO for files that are not really
453
 *   accessed sequentially.
454
 */
455
        if (ahead) {
456
                if (reada_ok == 2) {
457
                        run_task_queue(&tq_disk);
458
                }
459
 
460
                filp->f_ralen += ahead;
461
                filp->f_rawin += filp->f_ralen;
462
                filp->f_raend = raend + ahead + PAGE_SIZE;
463
 
464
                filp->f_ramax += filp->f_ramax;
465
 
466
                if (filp->f_ramax > MAX_READAHEAD)
467
                        filp->f_ramax = MAX_READAHEAD;
468
 
469
#ifdef PROFILE_READAHEAD
470
                profile_readahead((reada_ok == 2), filp);
471
#endif
472
        }
473
 
474
        return page_cache;
475
}
476
 
477
 
478
/*
479
 * This is a generic file read routine, and uses the
480
 * inode->i_op->readpage() function for the actual low-level
481
 * stuff.
482
 *
483
 * This is really ugly. But the goto's actually try to clarify some
484
 * of the logic when it comes to error handling etc.
485
 */
486
 
487
int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
488
{
489
        int error, read;
490
        unsigned long pos, ppos, page_cache;
491
        int reada_ok;
492
 
493
        error = 0;
494
        read = 0;
495
        page_cache = 0;
496
 
497
        pos = filp->f_pos;
498
        ppos = pos & PAGE_MASK;
499
 
500
#ifdef MAGIC_ROM_PTR
501
        /* Logic: if romptr f_op is available, try to get a pointer into ROM
502
         * for the data, bypassing the buffer cache entirely. This is only a
503
         * win if the ROM is reasonably fast, of course.
504
         *
505
         * Note that this path only requires that the pointer (and the data
506
         * it points to) to be valid until the memcpy_tofs is complete.
507
         *
508
         *      -- Kenneth Albanowski
509
         */
510
 
511
        if (filp->f_op->romptr) {
512
                struct vm_area_struct vma;
513
                vma.vm_start = 0;
514
                vma.vm_offset = pos;
515
                vma.vm_flags = VM_READ;
516
                if (!filp->f_op->romptr(inode, filp, &vma)) {
517
                        if (count > inode->i_size - pos)
518
                                count = inode->i_size - pos;
519
                        memcpy_tofs(buf, (void*)vma.vm_start, count);
520
                        filp->f_pos += count;
521
                        return count;
522
                }
523
        }
524
#endif /* MAGIC_ROM_PTR */
525
 
526
/*
527
 * If the current position is outside the previous read-ahead window,
528
 * we reset the current read-ahead context and set read ahead max to zero
529
 * (will be set to just needed value later),
530
 * otherwise, we assume that the file accesses are sequential enough to
531
 * continue read-ahead.
532
 */
533
        if (ppos > filp->f_raend || ppos + filp->f_rawin < filp->f_raend) {
534
                reada_ok = 0;
535
                filp->f_raend = 0;
536
                filp->f_ralen = 0;
537
                filp->f_ramax = 0;
538
                filp->f_rawin = 0;
539
        } else {
540
                reada_ok = 1;
541
        }
542
/*
543
 * Adjust the current value of read-ahead max.
544
 * If the read operation stay in the first half page, force no readahead.
545
 * Otherwise try to increase read ahead max just enough to do the read request.
546
 * Then, at least MIN_READAHEAD if read ahead is ok,
547
 * and at most MAX_READAHEAD in all cases.
548
 */
549
        if (pos + count <= (PAGE_SIZE >> 1)) {
550
                filp->f_ramax = 0;
551
        } else {
552
                unsigned long needed;
553
 
554
                needed = ((pos + count) & PAGE_MASK) - ppos;
555
 
556
                if (filp->f_ramax < needed)
557
                        filp->f_ramax = needed;
558
 
559
                if (reada_ok && filp->f_ramax < MIN_READAHEAD)
560
                                filp->f_ramax = MIN_READAHEAD;
561
                if (filp->f_ramax > MAX_READAHEAD)
562
                        filp->f_ramax = MAX_READAHEAD;
563
        }
564
 
565
        for (;;) {
566
                struct page *page, **hash;
567
 
568
                if (pos >= inode->i_size)
569
                        break;
570
 
571
                /*
572
                 * Try to find the data in the page cache..
573
                 */
574
                hash = page_hash(inode, pos & PAGE_MASK);
575
                page = __find_page(inode, pos & PAGE_MASK, *hash);
576
                if (!page)
577
                        goto no_cached_page;
578
 
579
found_page:
580
/*
581
 * Try to read ahead only if the current page is filled or being filled.
582
 * Otherwise, if we were reading ahead, decrease max read ahead size to
583
 * the minimum value.
584
 * In this context, that seems to may happen only on some read error or if
585
 * the page has been rewritten.
586
 */
587
                if (PageUptodate(page) || PageLocked(page))
588
                        page_cache = generic_file_readahead(reada_ok, filp, inode, pos & PAGE_MASK, page, page_cache);
589
                else if (reada_ok && filp->f_ramax > MIN_READAHEAD)
590
                                filp->f_ramax = MIN_READAHEAD;
591
 
592
                wait_on_page(page);
593
 
594
                if (!PageUptodate(page))
595
                        goto page_read_error;
596
 
597
success:
598
                /*
599
                 * Ok, we have the page, it's up-to-date and ok,
600
                 * so now we can finally copy it to user space...
601
                 */
602
        {
603
                unsigned long offset, nr;
604
                offset = pos & ~PAGE_MASK;
605
                nr = PAGE_SIZE - offset;
606
                if (nr > count)
607
                        nr = count;
608
 
609
                if (nr > inode->i_size - pos)
610
                        nr = inode->i_size - pos;
611
                memcpy_tofs(buf, (void *) (page_address(page) + offset), nr);
612
                release_page(page);
613
                buf += nr;
614
                pos += nr;
615
                read += nr;
616
                count -= nr;
617
                if (count) {
618
                        /*
619
                         * to prevent hogging the CPU on well-cached systems,
620
                         * schedule if needed, it's safe to do it here:
621
                         */
622
                        if (need_resched)
623
                                schedule();
624
                        continue;
625
                }
626
                break;
627
        }
628
 
629
no_cached_page:
630
                /*
631
                 * Ok, it wasn't cached, so we need to create a new
632
                 * page..
633
                 */
634
                if (!page_cache) {
635
                        page_cache = __get_free_page(GFP_KERNEL);
636
                        /*
637
                         * That could have slept, so go around to the
638
                         * very beginning..
639
                         */
640
                        if (page_cache)
641
                                continue;
642
                        error = -ENOMEM;
643
                        break;
644
                }
645
 
646
                /*
647
                 * Ok, add the new page to the hash-queues...
648
                 */
649
                page = mem_map + MAP_NR(page_cache);
650
                page_cache = 0;
651
                add_to_page_cache(page, inode, pos & PAGE_MASK, hash);
652
 
653
                /*
654
                 * Error handling is tricky. If we get a read error,
655
                 * the cached page stays in the cache (but uptodate=0),
656
                 * and the next process that accesses it will try to
657
                 * re-read it. This is needed for NFS etc, where the
658
                 * identity of the reader can decide if we can read the
659
                 * page or not..
660
                 */
661
/*
662
 * We have to read the page.
663
 * If we were reading ahead, we had previously tried to read this page,
664
 * That means that the page has probably been removed from the cache before
665
 * the application process needs it, or has been rewritten.
666
 * Decrease max readahead size to the minimum value in that situation.
667
 */
668
                if (reada_ok && filp->f_ramax > MIN_READAHEAD)
669
                        filp->f_ramax = MIN_READAHEAD;
670
 
671
                error = inode->i_op->readpage(inode, page);
672
                if (!error)
673
                        goto found_page;
674
                release_page(page);
675
                break;
676
 
677
page_read_error:
678
                /*
679
                 * We found the page, but it wasn't up-to-date.
680
                 * Try to re-read it _once_. We do this synchronously,
681
                 * because this happens only if there were errors.
682
                 */
683
                error = inode->i_op->readpage(inode, page);
684
                if (!error) {
685
                        wait_on_page(page);
686
                        if (PageUptodate(page) && !PageError(page))
687
                                goto success;
688
                        error = -EIO; /* Some unspecified error occurred.. */
689
                }
690
                release_page(page);
691
                break;
692
        }
693
 
694
        filp->f_pos = pos;
695
        filp->f_reada = 1;
696
        if (page_cache)
697
                free_page(page_cache);
698
        UPDATE_ATIME(inode)
699
        if (!read)
700
                read = error;
701
        return read;
702
}
703
 
704
int shrink_mmap(int priority, int dma, int free_buf)
705
{
706
        static int clock = 0;
707
        struct page * page;
708
        unsigned long limit = MAP_NR(high_memory);
709
        struct buffer_head *tmp, *bh;
710
        int count_max, count_min;
711
 
712
        count_max = (limit<<1) >> (priority>>1);
713
        count_min = (limit<<1) >> (priority);
714
 
715
        page = mem_map + clock;
716
 
717
        do {
718
                count_max--;
719
                if (page->inode || page->buffers)
720
                        count_min--;
721
 
722
                if (PageLocked(page))
723
                        goto next;
724
                if (dma && !PageDMA(page))
725
                        goto next;
726
                /* First of all, regenerate the page's referenced bit
727
                   from any buffers in the page */
728
                bh = page->buffers;
729
                if (bh) {
730
                        tmp = bh;
731
                        do {
732
                                if (buffer_touched(tmp)) {
733
                                        clear_bit(BH_Touched, &tmp->b_state);
734
                                        set_bit(PG_referenced, &page->flags);
735
                                }
736
                                tmp = tmp->b_this_page;
737
                        } while (tmp != bh);
738
                }
739
 
740
                /* We can't throw away shared pages, but we do mark
741
                   them as referenced.  This relies on the fact that
742
                   no page is currently in both the page cache and the
743
                   buffer cache; we'd have to modify the following
744
                   test to allow for that case. */
745
 
746
                switch (page->count) {
747
                        case 1:
748
                                /* If it has been referenced recently, don't free it */
749
                                if (clear_bit(PG_referenced, &page->flags)) {
750
                                        /* age this page potential used */
751
                                        if (priority < 4)
752
                                                age_page(page);
753
                                        break;
754
                                }
755
 
756
                                /* is it a page cache page? */
757
                                if (page->inode) {
758
                                        remove_page_from_hash_queue(page);
759
                                        remove_page_from_inode_queue(page);
760
                                        __free_page(page);
761
                                        return 1;
762
                                }
763
 
764
                                /* is it a buffer cache page? */
765
                                if (free_buf && bh && try_to_free_buffer(bh, &bh, 6))
766
                                        return 1;
767
                                break;
768
 
769
                        default:
770
                                /* more than one users: we can't throw it away */
771
                                set_bit(PG_referenced, &page->flags);
772
                                /* fall through */
773
                        case 0:
774
                                /* nothing */
775
                }
776
next:
777
                page++;
778
                clock++;
779
                if (clock >= limit) {
780
                        clock = 0;
781
                        page = mem_map;
782
                }
783
        } while (count_max > 0 && count_min > 0);
784
        return 0;
785
}
786
 
787
asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
788
{
789
        return 0;
790
}
791
 
792
int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
793
{
794
        return -ENOSYS;
795
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.