OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [fs/] [jfs/] [jfs_metapage.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *   Copyright (C) International Business Machines Corp., 2000-2005
3
 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
4
 *
5
 *   This program is free software;  you can redistribute it and/or modify
6
 *   it under the terms of the GNU General Public License as published by
7
 *   the Free Software Foundation; either version 2 of the License, or
8
 *   (at your option) any later version.
9
 *
10
 *   This program is distributed in the hope that it will be useful,
11
 *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
12
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
13
 *   the GNU General Public License for more details.
14
 *
15
 *   You should have received a copy of the GNU General Public License
16
 *   along with this program;  if not, write to the Free Software
17
 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
 */
19
 
20
#include <linux/fs.h>
21
#include <linux/mm.h>
22
#include <linux/bio.h>
23
#include <linux/init.h>
24
#include <linux/buffer_head.h>
25
#include <linux/mempool.h>
26
#include "jfs_incore.h"
27
#include "jfs_superblock.h"
28
#include "jfs_filsys.h"
29
#include "jfs_metapage.h"
30
#include "jfs_txnmgr.h"
31
#include "jfs_debug.h"
32
 
33
#ifdef CONFIG_JFS_STATISTICS
34
static struct {
35
        uint    pagealloc;      /* # of page allocations */
36
        uint    pagefree;       /* # of page frees */
37
        uint    lockwait;       /* # of sleeping lock_metapage() calls */
38
} mpStat;
39
#endif
40
 
41
#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
42
#define trylock_metapage(mp) test_and_set_bit(META_locked, &(mp)->flag)
43
 
44
static inline void unlock_metapage(struct metapage *mp)
45
{
46
        clear_bit(META_locked, &mp->flag);
47
        wake_up(&mp->wait);
48
}
49
 
50
static inline void __lock_metapage(struct metapage *mp)
51
{
52
        DECLARE_WAITQUEUE(wait, current);
53
        INCREMENT(mpStat.lockwait);
54
        add_wait_queue_exclusive(&mp->wait, &wait);
55
        do {
56
                set_current_state(TASK_UNINTERRUPTIBLE);
57
                if (metapage_locked(mp)) {
58
                        unlock_page(mp->page);
59
                        io_schedule();
60
                        lock_page(mp->page);
61
                }
62
        } while (trylock_metapage(mp));
63
        __set_current_state(TASK_RUNNING);
64
        remove_wait_queue(&mp->wait, &wait);
65
}
66
 
67
/*
68
 * Must have mp->page locked
69
 */
70
static inline void lock_metapage(struct metapage *mp)
71
{
72
        if (trylock_metapage(mp))
73
                __lock_metapage(mp);
74
}
75
 
76
#define METAPOOL_MIN_PAGES 32
77
static struct kmem_cache *metapage_cache;
78
static mempool_t *metapage_mempool;
79
 
80
#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
81
 
82
#if MPS_PER_PAGE > 1
83
 
84
struct meta_anchor {
85
        int mp_count;
86
        atomic_t io_count;
87
        struct metapage *mp[MPS_PER_PAGE];
88
};
89
#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
90
 
91
static inline struct metapage *page_to_mp(struct page *page, uint offset)
92
{
93
        if (!PagePrivate(page))
94
                return NULL;
95
        return mp_anchor(page)->mp[offset >> L2PSIZE];
96
}
97
 
98
static inline int insert_metapage(struct page *page, struct metapage *mp)
99
{
100
        struct meta_anchor *a;
101
        int index;
102
        int l2mp_blocks;        /* log2 blocks per metapage */
103
 
104
        if (PagePrivate(page))
105
                a = mp_anchor(page);
106
        else {
107
                a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
108
                if (!a)
109
                        return -ENOMEM;
110
                set_page_private(page, (unsigned long)a);
111
                SetPagePrivate(page);
112
                kmap(page);
113
        }
114
 
115
        if (mp) {
116
                l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
117
                index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
118
                a->mp_count++;
119
                a->mp[index] = mp;
120
        }
121
 
122
        return 0;
123
}
124
 
125
static inline void remove_metapage(struct page *page, struct metapage *mp)
126
{
127
        struct meta_anchor *a = mp_anchor(page);
128
        int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
129
        int index;
130
 
131
        index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
132
 
133
        BUG_ON(a->mp[index] != mp);
134
 
135
        a->mp[index] = NULL;
136
        if (--a->mp_count == 0) {
137
                kfree(a);
138
                set_page_private(page, 0);
139
                ClearPagePrivate(page);
140
                kunmap(page);
141
        }
142
}
143
 
144
static inline void inc_io(struct page *page)
145
{
146
        atomic_inc(&mp_anchor(page)->io_count);
147
}
148
 
149
static inline void dec_io(struct page *page, void (*handler) (struct page *))
150
{
151
        if (atomic_dec_and_test(&mp_anchor(page)->io_count))
152
                handler(page);
153
}
154
 
155
#else
156
static inline struct metapage *page_to_mp(struct page *page, uint offset)
157
{
158
        return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
159
}
160
 
161
static inline int insert_metapage(struct page *page, struct metapage *mp)
162
{
163
        if (mp) {
164
                set_page_private(page, (unsigned long)mp);
165
                SetPagePrivate(page);
166
                kmap(page);
167
        }
168
        return 0;
169
}
170
 
171
static inline void remove_metapage(struct page *page, struct metapage *mp)
172
{
173
        set_page_private(page, 0);
174
        ClearPagePrivate(page);
175
        kunmap(page);
176
}
177
 
178
#define inc_io(page) do {} while(0)
179
#define dec_io(page, handler) handler(page)
180
 
181
#endif
182
 
183
static void init_once(struct kmem_cache *cachep, void *foo)
184
{
185
        struct metapage *mp = (struct metapage *)foo;
186
 
187
        mp->lid = 0;
188
        mp->lsn = 0;
189
        mp->flag = 0;
190
        mp->data = NULL;
191
        mp->clsn = 0;
192
        mp->log = NULL;
193
        set_bit(META_free, &mp->flag);
194
        init_waitqueue_head(&mp->wait);
195
}
196
 
197
static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
198
{
199
        return mempool_alloc(metapage_mempool, gfp_mask);
200
}
201
 
202
static inline void free_metapage(struct metapage *mp)
203
{
204
        mp->flag = 0;
205
        set_bit(META_free, &mp->flag);
206
 
207
        mempool_free(mp, metapage_mempool);
208
}
209
 
210
int __init metapage_init(void)
211
{
212
        /*
213
         * Allocate the metapage structures
214
         */
215
        metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
216
                                           0, 0, init_once);
217
        if (metapage_cache == NULL)
218
                return -ENOMEM;
219
 
220
        metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
221
                                                    metapage_cache);
222
 
223
        if (metapage_mempool == NULL) {
224
                kmem_cache_destroy(metapage_cache);
225
                return -ENOMEM;
226
        }
227
 
228
        return 0;
229
}
230
 
231
void metapage_exit(void)
232
{
233
        mempool_destroy(metapage_mempool);
234
        kmem_cache_destroy(metapage_cache);
235
}
236
 
237
static inline void drop_metapage(struct page *page, struct metapage *mp)
238
{
239
        if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
240
            test_bit(META_io, &mp->flag))
241
                return;
242
        remove_metapage(page, mp);
243
        INCREMENT(mpStat.pagefree);
244
        free_metapage(mp);
245
}
246
 
247
/*
248
 * Metapage address space operations
249
 */
250
 
251
static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
252
                                    unsigned int *len)
253
{
254
        int rc = 0;
255
        int xflag;
256
        s64 xaddr;
257
        sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
258
                               inode->i_blkbits;
259
 
260
        if (lblock >= file_blocks)
261
                return 0;
262
        if (lblock + *len > file_blocks)
263
                *len = file_blocks - lblock;
264
 
265
        if (inode->i_ino) {
266
                rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
267
                if ((rc == 0) && *len)
268
                        lblock = (sector_t)xaddr;
269
                else
270
                        lblock = 0;
271
        } /* else no mapping */
272
 
273
        return lblock;
274
}
275
 
276
static void last_read_complete(struct page *page)
277
{
278
        if (!PageError(page))
279
                SetPageUptodate(page);
280
        unlock_page(page);
281
}
282
 
283
static void metapage_read_end_io(struct bio *bio, int err)
284
{
285
        struct page *page = bio->bi_private;
286
 
287
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
288
                printk(KERN_ERR "metapage_read_end_io: I/O error\n");
289
                SetPageError(page);
290
        }
291
 
292
        dec_io(page, last_read_complete);
293
        bio_put(bio);
294
}
295
 
296
static void remove_from_logsync(struct metapage *mp)
297
{
298
        struct jfs_log *log = mp->log;
299
        unsigned long flags;
300
/*
301
 * This can race.  Recheck that log hasn't been set to null, and after
302
 * acquiring logsync lock, recheck lsn
303
 */
304
        if (!log)
305
                return;
306
 
307
        LOGSYNC_LOCK(log, flags);
308
        if (mp->lsn) {
309
                mp->log = NULL;
310
                mp->lsn = 0;
311
                mp->clsn = 0;
312
                log->count--;
313
                list_del(&mp->synclist);
314
        }
315
        LOGSYNC_UNLOCK(log, flags);
316
}
317
 
318
static void last_write_complete(struct page *page)
319
{
320
        struct metapage *mp;
321
        unsigned int offset;
322
 
323
        for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
324
                mp = page_to_mp(page, offset);
325
                if (mp && test_bit(META_io, &mp->flag)) {
326
                        if (mp->lsn)
327
                                remove_from_logsync(mp);
328
                        clear_bit(META_io, &mp->flag);
329
                }
330
                /*
331
                 * I'd like to call drop_metapage here, but I don't think it's
332
                 * safe unless I have the page locked
333
                 */
334
        }
335
        end_page_writeback(page);
336
}
337
 
338
static void metapage_write_end_io(struct bio *bio, int err)
339
{
340
        struct page *page = bio->bi_private;
341
 
342
        BUG_ON(!PagePrivate(page));
343
 
344
        if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
345
                printk(KERN_ERR "metapage_write_end_io: I/O error\n");
346
                SetPageError(page);
347
        }
348
        dec_io(page, last_write_complete);
349
        bio_put(bio);
350
}
351
 
352
static int metapage_writepage(struct page *page, struct writeback_control *wbc)
353
{
354
        struct bio *bio = NULL;
355
        unsigned int block_offset;      /* block offset of mp within page */
356
        struct inode *inode = page->mapping->host;
357
        unsigned int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
358
        unsigned int len;
359
        unsigned int xlen;
360
        struct metapage *mp;
361
        int redirty = 0;
362
        sector_t lblock;
363
        sector_t pblock;
364
        sector_t next_block = 0;
365
        sector_t page_start;
366
        unsigned long bio_bytes = 0;
367
        unsigned long bio_offset = 0;
368
        unsigned int offset;
369
 
370
        page_start = (sector_t)page->index <<
371
                     (PAGE_CACHE_SHIFT - inode->i_blkbits);
372
        BUG_ON(!PageLocked(page));
373
        BUG_ON(PageWriteback(page));
374
 
375
        for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
376
                mp = page_to_mp(page, offset);
377
 
378
                if (!mp || !test_bit(META_dirty, &mp->flag))
379
                        continue;
380
 
381
                if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
382
                        redirty = 1;
383
                        /*
384
                         * Make sure this page isn't blocked indefinitely.
385
                         * If the journal isn't undergoing I/O, push it
386
                         */
387
                        if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
388
                                jfs_flush_journal(mp->log, 0);
389
                        continue;
390
                }
391
 
392
                clear_bit(META_dirty, &mp->flag);
393
                block_offset = offset >> inode->i_blkbits;
394
                lblock = page_start + block_offset;
395
                if (bio) {
396
                        if (xlen && lblock == next_block) {
397
                                /* Contiguous, in memory & on disk */
398
                                len = min(xlen, blocks_per_mp);
399
                                xlen -= len;
400
                                bio_bytes += len << inode->i_blkbits;
401
                                set_bit(META_io, &mp->flag);
402
                                continue;
403
                        }
404
                        /* Not contiguous */
405
                        if (bio_add_page(bio, page, bio_bytes, bio_offset) <
406
                            bio_bytes)
407
                                goto add_failed;
408
                        /*
409
                         * Increment counter before submitting i/o to keep
410
                         * count from hitting zero before we're through
411
                         */
412
                        inc_io(page);
413
                        if (!bio->bi_size)
414
                                goto dump_bio;
415
                        submit_bio(WRITE, bio);
416
                        bio = NULL;
417
                } else {
418
                        set_page_writeback(page);
419
                        inc_io(page);
420
                }
421
                xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
422
                pblock = metapage_get_blocks(inode, lblock, &xlen);
423
                if (!pblock) {
424
                        /* Need better error handling */
425
                        printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
426
                        dec_io(page, last_write_complete);
427
                        continue;
428
                }
429
                set_bit(META_io, &mp->flag);
430
                len = min(xlen, (uint) JFS_SBI(inode->i_sb)->nbperpage);
431
 
432
                bio = bio_alloc(GFP_NOFS, 1);
433
                bio->bi_bdev = inode->i_sb->s_bdev;
434
                bio->bi_sector = pblock << (inode->i_blkbits - 9);
435
                bio->bi_end_io = metapage_write_end_io;
436
                bio->bi_private = page;
437
 
438
                /* Don't call bio_add_page yet, we may add to this vec */
439
                bio_offset = offset;
440
                bio_bytes = len << inode->i_blkbits;
441
 
442
                xlen -= len;
443
                next_block = lblock + len;
444
        }
445
        if (bio) {
446
                if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
447
                                goto add_failed;
448
                if (!bio->bi_size)
449
                        goto dump_bio;
450
 
451
                submit_bio(WRITE, bio);
452
        }
453
        if (redirty)
454
                redirty_page_for_writepage(wbc, page);
455
 
456
        unlock_page(page);
457
 
458
        return 0;
459
add_failed:
460
        /* We should never reach here, since we're only adding one vec */
461
        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
462
        goto skip;
463
dump_bio:
464
        print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
465
                       4, bio, sizeof(*bio), 0);
466
skip:
467
        bio_put(bio);
468
        unlock_page(page);
469
        dec_io(page, last_write_complete);
470
 
471
        return -EIO;
472
}
473
 
474
static int metapage_readpage(struct file *fp, struct page *page)
475
{
476
        struct inode *inode = page->mapping->host;
477
        struct bio *bio = NULL;
478
        unsigned int block_offset;
479
        unsigned int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
480
        sector_t page_start;    /* address of page in fs blocks */
481
        sector_t pblock;
482
        unsigned int xlen;
483
        unsigned int len;
484
        unsigned int offset;
485
 
486
        BUG_ON(!PageLocked(page));
487
        page_start = (sector_t)page->index <<
488
                     (PAGE_CACHE_SHIFT - inode->i_blkbits);
489
 
490
        block_offset = 0;
491
        while (block_offset < blocks_per_page) {
492
                xlen = blocks_per_page - block_offset;
493
                pblock = metapage_get_blocks(inode, page_start + block_offset,
494
                                             &xlen);
495
                if (pblock) {
496
                        if (!PagePrivate(page))
497
                                insert_metapage(page, NULL);
498
                        inc_io(page);
499
                        if (bio)
500
                                submit_bio(READ, bio);
501
 
502
                        bio = bio_alloc(GFP_NOFS, 1);
503
                        bio->bi_bdev = inode->i_sb->s_bdev;
504
                        bio->bi_sector = pblock << (inode->i_blkbits - 9);
505
                        bio->bi_end_io = metapage_read_end_io;
506
                        bio->bi_private = page;
507
                        len = xlen << inode->i_blkbits;
508
                        offset = block_offset << inode->i_blkbits;
509
                        if (bio_add_page(bio, page, len, offset) < len)
510
                                goto add_failed;
511
                        block_offset += xlen;
512
                } else
513
                        block_offset++;
514
        }
515
        if (bio)
516
                submit_bio(READ, bio);
517
        else
518
                unlock_page(page);
519
 
520
        return 0;
521
 
522
add_failed:
523
        printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
524
        bio_put(bio);
525
        dec_io(page, last_read_complete);
526
        return -EIO;
527
}
528
 
529
static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
530
{
531
        struct metapage *mp;
532
        int ret = 1;
533
        unsigned int offset;
534
 
535
        for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
536
                mp = page_to_mp(page, offset);
537
 
538
                if (!mp)
539
                        continue;
540
 
541
                jfs_info("metapage_releasepage: mp = 0x%p", mp);
542
                if (mp->count || mp->nohomeok ||
543
                    test_bit(META_dirty, &mp->flag)) {
544
                        jfs_info("count = %ld, nohomeok = %d", mp->count,
545
                                 mp->nohomeok);
546
                        ret = 0;
547
                        continue;
548
                }
549
                if (mp->lsn)
550
                        remove_from_logsync(mp);
551
                remove_metapage(page, mp);
552
                INCREMENT(mpStat.pagefree);
553
                free_metapage(mp);
554
        }
555
        return ret;
556
}
557
 
558
static void metapage_invalidatepage(struct page *page, unsigned long offset)
559
{
560
        BUG_ON(offset);
561
 
562
        BUG_ON(PageWriteback(page));
563
 
564
        metapage_releasepage(page, 0);
565
}
566
 
567
const struct address_space_operations jfs_metapage_aops = {
568
        .readpage       = metapage_readpage,
569
        .writepage      = metapage_writepage,
570
        .sync_page      = block_sync_page,
571
        .releasepage    = metapage_releasepage,
572
        .invalidatepage = metapage_invalidatepage,
573
        .set_page_dirty = __set_page_dirty_nobuffers,
574
};
575
 
576
struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
577
                                unsigned int size, int absolute,
578
                                unsigned long new)
579
{
580
        int l2BlocksPerPage;
581
        int l2bsize;
582
        struct address_space *mapping;
583
        struct metapage *mp = NULL;
584
        struct page *page;
585
        unsigned long page_index;
586
        unsigned long page_offset;
587
 
588
        jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
589
                 inode->i_ino, lblock, absolute);
590
 
591
        l2bsize = inode->i_blkbits;
592
        l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
593
        page_index = lblock >> l2BlocksPerPage;
594
        page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
595
        if ((page_offset + size) > PAGE_CACHE_SIZE) {
596
                jfs_err("MetaData crosses page boundary!!");
597
                jfs_err("lblock = %lx, size  = %d", lblock, size);
598
                dump_stack();
599
                return NULL;
600
        }
601
        if (absolute)
602
                mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
603
        else {
604
                /*
605
                 * If an nfs client tries to read an inode that is larger
606
                 * than any existing inodes, we may try to read past the
607
                 * end of the inode map
608
                 */
609
                if ((lblock << inode->i_blkbits) >= inode->i_size)
610
                        return NULL;
611
                mapping = inode->i_mapping;
612
        }
613
 
614
        if (new && (PSIZE == PAGE_CACHE_SIZE)) {
615
                page = grab_cache_page(mapping, page_index);
616
                if (!page) {
617
                        jfs_err("grab_cache_page failed!");
618
                        return NULL;
619
                }
620
                SetPageUptodate(page);
621
        } else {
622
                page = read_mapping_page(mapping, page_index, NULL);
623
                if (IS_ERR(page) || !PageUptodate(page)) {
624
                        jfs_err("read_mapping_page failed!");
625
                        return NULL;
626
                }
627
                lock_page(page);
628
        }
629
 
630
        mp = page_to_mp(page, page_offset);
631
        if (mp) {
632
                if (mp->logical_size != size) {
633
                        jfs_error(inode->i_sb,
634
                                  "__get_metapage: mp->logical_size != size");
635
                        jfs_err("logical_size = %d, size = %d",
636
                                mp->logical_size, size);
637
                        dump_stack();
638
                        goto unlock;
639
                }
640
                mp->count++;
641
                lock_metapage(mp);
642
                if (test_bit(META_discard, &mp->flag)) {
643
                        if (!new) {
644
                                jfs_error(inode->i_sb,
645
                                          "__get_metapage: using a "
646
                                          "discarded metapage");
647
                                discard_metapage(mp);
648
                                goto unlock;
649
                        }
650
                        clear_bit(META_discard, &mp->flag);
651
                }
652
        } else {
653
                INCREMENT(mpStat.pagealloc);
654
                mp = alloc_metapage(GFP_NOFS);
655
                mp->page = page;
656
                mp->flag = 0;
657
                mp->xflag = COMMIT_PAGE;
658
                mp->count = 1;
659
                mp->nohomeok = 0;
660
                mp->logical_size = size;
661
                mp->data = page_address(page) + page_offset;
662
                mp->index = lblock;
663
                if (unlikely(insert_metapage(page, mp))) {
664
                        free_metapage(mp);
665
                        goto unlock;
666
                }
667
                lock_metapage(mp);
668
        }
669
 
670
        if (new) {
671
                jfs_info("zeroing mp = 0x%p", mp);
672
                memset(mp->data, 0, PSIZE);
673
        }
674
 
675
        unlock_page(page);
676
        jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
677
        return mp;
678
 
679
unlock:
680
        unlock_page(page);
681
        return NULL;
682
}
683
 
684
void grab_metapage(struct metapage * mp)
685
{
686
        jfs_info("grab_metapage: mp = 0x%p", mp);
687
        page_cache_get(mp->page);
688
        lock_page(mp->page);
689
        mp->count++;
690
        lock_metapage(mp);
691
        unlock_page(mp->page);
692
}
693
 
694
void force_metapage(struct metapage *mp)
695
{
696
        struct page *page = mp->page;
697
        jfs_info("force_metapage: mp = 0x%p", mp);
698
        set_bit(META_forcewrite, &mp->flag);
699
        clear_bit(META_sync, &mp->flag);
700
        page_cache_get(page);
701
        lock_page(page);
702
        set_page_dirty(page);
703
        write_one_page(page, 1);
704
        clear_bit(META_forcewrite, &mp->flag);
705
        page_cache_release(page);
706
}
707
 
708
void hold_metapage(struct metapage *mp)
709
{
710
        lock_page(mp->page);
711
}
712
 
713
void put_metapage(struct metapage *mp)
714
{
715
        if (mp->count || mp->nohomeok) {
716
                /* Someone else will release this */
717
                unlock_page(mp->page);
718
                return;
719
        }
720
        page_cache_get(mp->page);
721
        mp->count++;
722
        lock_metapage(mp);
723
        unlock_page(mp->page);
724
        release_metapage(mp);
725
}
726
 
727
void release_metapage(struct metapage * mp)
728
{
729
        struct page *page = mp->page;
730
        jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
731
 
732
        BUG_ON(!page);
733
 
734
        lock_page(page);
735
        unlock_metapage(mp);
736
 
737
        assert(mp->count);
738
        if (--mp->count || mp->nohomeok) {
739
                unlock_page(page);
740
                page_cache_release(page);
741
                return;
742
        }
743
 
744
        if (test_bit(META_dirty, &mp->flag)) {
745
                set_page_dirty(page);
746
                if (test_bit(META_sync, &mp->flag)) {
747
                        clear_bit(META_sync, &mp->flag);
748
                        write_one_page(page, 1);
749
                        lock_page(page); /* write_one_page unlocks the page */
750
                }
751
        } else if (mp->lsn)     /* discard_metapage doesn't remove it */
752
                remove_from_logsync(mp);
753
 
754
        /* Try to keep metapages from using up too much memory */
755
        drop_metapage(page, mp);
756
 
757
        unlock_page(page);
758
        page_cache_release(page);
759
}
760
 
761
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
762
{
763
        sector_t lblock;
764
        int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
765
        int BlocksPerPage = 1 << l2BlocksPerPage;
766
        /* All callers are interested in block device's mapping */
767
        struct address_space *mapping =
768
                JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
769
        struct metapage *mp;
770
        struct page *page;
771
        unsigned int offset;
772
 
773
        /*
774
         * Mark metapages to discard.  They will eventually be
775
         * released, but should not be written.
776
         */
777
        for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
778
             lblock += BlocksPerPage) {
779
                page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
780
                if (!page)
781
                        continue;
782
                for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
783
                        mp = page_to_mp(page, offset);
784
                        if (!mp)
785
                                continue;
786
                        if (mp->index < addr)
787
                                continue;
788
                        if (mp->index >= addr + len)
789
                                break;
790
 
791
                        clear_bit(META_dirty, &mp->flag);
792
                        set_bit(META_discard, &mp->flag);
793
                        if (mp->lsn)
794
                                remove_from_logsync(mp);
795
                }
796
                unlock_page(page);
797
                page_cache_release(page);
798
        }
799
}
800
 
801
#ifdef CONFIG_JFS_STATISTICS
802
int jfs_mpstat_read(char *buffer, char **start, off_t offset, int length,
803
                    int *eof, void *data)
804
{
805
        int len = 0;
806
        off_t begin;
807
 
808
        len += sprintf(buffer,
809
                       "JFS Metapage statistics\n"
810
                       "=======================\n"
811
                       "page allocations = %d\n"
812
                       "page frees = %d\n"
813
                       "lock waits = %d\n",
814
                       mpStat.pagealloc,
815
                       mpStat.pagefree,
816
                       mpStat.lockwait);
817
 
818
        begin = offset;
819
        *start = buffer + begin;
820
        len -= begin;
821
 
822
        if (len > length)
823
                len = length;
824
        else
825
                *eof = 1;
826
 
827
        if (len < 0)
828
                len = 0;
829
 
830
        return len;
831
}
832
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.