OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [fs/] [jfs/] [jfs_metapage.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *   Copyright (C) International Business Machines Corp., 2000-2003
3
 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
4
 *
5
 *   This program is free software;  you can redistribute it and/or modify
6
 *   it under the terms of the GNU General Public License as published by
7
 *   the Free Software Foundation; either version 2 of the License, or
8
 *   (at your option) any later version.
9
 *
10
 *   This program is distributed in the hope that it will be useful,
11
 *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
12
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
13
 *   the GNU General Public License for more details.
14
 *
15
 *   You should have received a copy of the GNU General Public License
16
 *   along with this program;  if not, write to the Free Software
17
 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
 */
19
 
20
#include <linux/fs.h>
21
#include <linux/init.h>
22
#include "jfs_incore.h"
23
#include "jfs_superblock.h"
24
#include "jfs_filsys.h"
25
#include "jfs_metapage.h"
26
#include "jfs_txnmgr.h"
27
#include "jfs_debug.h"
28
 
29
extern struct task_struct *jfsCommitTask;
30
static spinlock_t meta_lock = SPIN_LOCK_UNLOCKED;
31
static wait_queue_head_t meta_wait;
32
 
33
#ifdef CONFIG_JFS_STATISTICS
34
struct {
35
        uint    pagealloc;      /* # of page allocations */
36
        uint    pagefree;       /* # of page frees */
37
        uint    lockwait;       /* # of sleeping lock_metapage() calls */
38
        uint    allocwait;      /* # of sleeping alloc_metapage() calls */
39
} mpStat;
40
#endif
41
 
42
 
43
#define HASH_BITS 10            /* This makes hash_table 1 4K page */
44
#define HASH_SIZE (1 << HASH_BITS)
45
static struct metapage **hash_table = NULL;
46
static unsigned long hash_order;
47
 
48
 
49
static inline int metapage_locked(struct metapage *mp)
50
{
51
        return test_bit(META_locked, &mp->flag);
52
}
53
 
54
static inline int trylock_metapage(struct metapage *mp)
55
{
56
        return test_and_set_bit(META_locked, &mp->flag);
57
}
58
 
59
static inline void unlock_metapage(struct metapage *mp)
60
{
61
        clear_bit(META_locked, &mp->flag);
62
        wake_up(&mp->wait);
63
}
64
 
65
static void __lock_metapage(struct metapage *mp)
66
{
67
        DECLARE_WAITQUEUE(wait, current);
68
 
69
        INCREMENT(mpStat.lockwait);
70
 
71
        add_wait_queue_exclusive(&mp->wait, &wait);
72
        do {
73
                set_current_state(TASK_UNINTERRUPTIBLE);
74
                if (metapage_locked(mp)) {
75
                        spin_unlock(&meta_lock);
76
                        schedule();
77
                        spin_lock(&meta_lock);
78
                }
79
        } while (trylock_metapage(mp));
80
        __set_current_state(TASK_RUNNING);
81
        remove_wait_queue(&mp->wait, &wait);
82
}
83
 
84
/* needs meta_lock */
85
static inline void lock_metapage(struct metapage *mp)
86
{
87
        if (trylock_metapage(mp))
88
                __lock_metapage(mp);
89
}
90
 
91
/*
92
 * metapage pool is based on Linux 2.5's mempool
93
 *
94
 * Tap into reserved structures in critical paths where waiting on a
95
 * memory allocation could cause deadlock
96
 */
97
#define METAPOOL_MIN_PAGES 32
98
static struct metapage *reserved_metapages[METAPOOL_MIN_PAGES];
99
static int num_reserved = 0;
100
kmem_cache_t *metapage_cache;
101
 
102
static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
103
{
104
        struct metapage *mp = (struct metapage *)foo;
105
 
106
        if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
107
            SLAB_CTOR_CONSTRUCTOR) {
108
                mp->lid = 0;
109
                mp->lsn = 0;
110
                mp->flag = 0;
111
                mp->data = NULL;
112
                mp->clsn = 0;
113
                mp->log = NULL;
114
                set_bit(META_free, &mp->flag);
115
                init_waitqueue_head(&mp->wait);
116
        }
117
}
118
 
119
static void empty_reserved(void)
120
{
121
        while (num_reserved--)
122
                kmem_cache_free(metapage_cache,
123
                                reserved_metapages[num_reserved]);
124
}
125
 
126
static struct metapage *alloc_metapage(int *dropped_lock, int no_wait)
127
{
128
        struct metapage *new;
129
 
130
        *dropped_lock = 0;
131
 
132
        /*
133
         * Always try an atomic alloc first, to avoid dropping the
134
         * spinlock
135
         */
136
        new = kmem_cache_alloc(metapage_cache, GFP_ATOMIC);
137
        if (new)
138
                return new;
139
 
140
        if (no_wait && num_reserved)
141
                return reserved_metapages[--num_reserved];
142
 
143
        *dropped_lock = 1;
144
        spin_unlock(&meta_lock);
145
        new = kmem_cache_alloc(metapage_cache, GFP_NOFS);
146
        spin_lock(&meta_lock);
147
        return new;
148
}
149
 
150
static void __free_metapage(struct metapage *mp)
151
{
152
        mp->flag = 0;
153
        set_bit(META_free, &mp->flag);
154
 
155
        if (num_reserved < METAPOOL_MIN_PAGES)
156
                reserved_metapages[num_reserved++] = mp;
157
        else
158
                kmem_cache_free(metapage_cache, mp);
159
}
160
 
161
static inline void free_metapage(struct metapage * mp)
162
{
163
        spin_lock(&meta_lock);
164
        __free_metapage(mp);
165
        spin_unlock(&meta_lock);
166
}
167
 
168
int __init metapage_init(void)
169
{
170
        struct metapage *mp;
171
 
172
        /*
173
         * Initialize wait queue
174
         */
175
        init_waitqueue_head(&meta_wait);
176
 
177
        /*
178
         * Allocate the metapage structures
179
         */
180
        metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
181
                                           0, 0, init_once, NULL);
182
        if (metapage_cache == NULL)
183
                return -ENOMEM;
184
 
185
        while (num_reserved < METAPOOL_MIN_PAGES) {
186
                mp = kmem_cache_alloc(metapage_cache, GFP_NOFS);
187
                if (mp)
188
                        reserved_metapages[num_reserved++] = mp;
189
                else {
190
                        empty_reserved();
191
                        kmem_cache_destroy(metapage_cache);
192
                        return -ENOMEM;
193
                }
194
        }
195
        /*
196
         * Now the hash list
197
         */
198
        for (hash_order = 0;
199
             ((PAGE_SIZE << hash_order) / sizeof(void *)) < HASH_SIZE;
200
             hash_order++);
201
        hash_table =
202
            (struct metapage **) __get_free_pages(GFP_KERNEL, hash_order);
203
        assert(hash_table);
204
        memset(hash_table, 0, PAGE_SIZE << hash_order);
205
 
206
        return 0;
207
}
208
 
209
void metapage_exit(void)
210
{
211
        empty_reserved();
212
        kmem_cache_destroy(metapage_cache);
213
}
214
 
215
/*
216
 * Basically same hash as in pagemap.h, but using our hash table
217
 */
218
static struct metapage **meta_hash(struct address_space *mapping,
219
                                   unsigned long index)
220
{
221
#define i (((unsigned long)mapping)/ \
222
           (sizeof(struct inode) & ~(sizeof(struct inode) -1 )))
223
#define s(x) ((x) + ((x) >> HASH_BITS))
224
        return hash_table + (s(i + index) & (HASH_SIZE - 1));
225
#undef i
226
#undef s
227
}
228
 
229
static struct metapage *search_hash(struct metapage ** hash_ptr,
230
                                    struct address_space *mapping,
231
                               unsigned long index)
232
{
233
        struct metapage *ptr;
234
 
235
        for (ptr = *hash_ptr; ptr; ptr = ptr->hash_next) {
236
                if ((ptr->mapping == mapping) && (ptr->index == index))
237
                        return ptr;
238
        }
239
 
240
        return NULL;
241
}
242
 
243
static void add_to_hash(struct metapage * mp, struct metapage ** hash_ptr)
244
{
245
        if (*hash_ptr)
246
                (*hash_ptr)->hash_prev = mp;
247
 
248
        mp->hash_prev = NULL;
249
        mp->hash_next = *hash_ptr;
250
        *hash_ptr = mp;
251
}
252
 
253
static void remove_from_hash(struct metapage * mp, struct metapage ** hash_ptr)
254
{
255
        if (mp->hash_prev)
256
                mp->hash_prev->hash_next = mp->hash_next;
257
        else {
258
                assert(*hash_ptr == mp);
259
                *hash_ptr = mp->hash_next;
260
        }
261
 
262
        if (mp->hash_next)
263
                mp->hash_next->hash_prev = mp->hash_prev;
264
}
265
 
266
struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
267
                                unsigned int size, int absolute,
268
                                unsigned long new)
269
{
270
        int dropped_lock;
271
        struct metapage **hash_ptr;
272
        int l2BlocksPerPage;
273
        int l2bsize;
274
        int no_wait;
275
        struct address_space *mapping;
276
        struct metapage *mp;
277
        unsigned long page_index;
278
        unsigned long page_offset;
279
 
280
        jfs_info("__get_metapage: inode = 0x%p, lblock = 0x%lx", inode, lblock);
281
 
282
        if (absolute)
283
                mapping = inode->i_sb->s_bdev->bd_inode->i_mapping;
284
        else
285
                mapping = inode->i_mapping;
286
 
287
        spin_lock(&meta_lock);
288
 
289
        hash_ptr = meta_hash(mapping, lblock);
290
 
291
        mp = search_hash(hash_ptr, mapping, lblock);
292
        if (mp) {
293
              page_found:
294
                if (test_bit(META_discard, &mp->flag)) {
295
                        if (!new) {
296
                                spin_unlock(&meta_lock);
297
                                jfs_error(inode->i_sb,
298
                                          "__get_metapage: using a "
299
                                          "discarded metapage");
300
                                return NULL;
301
                        }
302
                        clear_bit(META_discard, &mp->flag);
303
                }
304
                mp->count++;
305
                jfs_info("__get_metapage: found 0x%p, in hash", mp);
306
                if (mp->logical_size != size) {
307
                        spin_unlock(&meta_lock);
308
                        jfs_error(inode->i_sb,
309
                                  "__get_metapage: mp->logical_size != size");
310
                        return NULL;
311
                }
312
                lock_metapage(mp);
313
                spin_unlock(&meta_lock);
314
        } else {
315
                l2bsize = inode->i_blkbits;
316
                l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
317
                page_index = lblock >> l2BlocksPerPage;
318
                page_offset = (lblock - (page_index << l2BlocksPerPage)) <<
319
                    l2bsize;
320
                if ((page_offset + size) > PAGE_CACHE_SIZE) {
321
                        spin_unlock(&meta_lock);
322
                        jfs_err("MetaData crosses page boundary!!");
323
                        return NULL;
324
                }
325
 
326
                /*
327
                 * Locks held on aggregate inode pages are usually
328
                 * not held long, and they are taken in critical code
329
                 * paths (committing dirty inodes, txCommit thread)
330
                 *
331
                 * Attempt to get metapage without blocking, tapping into
332
                 * reserves if necessary.
333
                 */
334
                if (JFS_IP(inode)->fileset == AGGREGATE_I)
335
                        no_wait = 1;
336
                else
337
                        no_wait = 0;
338
 
339
                mp = alloc_metapage(&dropped_lock, no_wait);
340
                if (dropped_lock) {
341
                        /* alloc_metapage blocked, we need to search the hash
342
                         * again.
343
                         */
344
                        struct metapage *mp2;
345
                        mp2 = search_hash(hash_ptr, mapping, lblock);
346
                        if (mp2) {
347
                                __free_metapage(mp);
348
                                mp = mp2;
349
                                goto page_found;
350
                        }
351
                }
352
                mp->flag = 0;
353
                lock_metapage(mp);
354
                if (absolute)
355
                        set_bit(META_absolute, &mp->flag);
356
                mp->xflag = COMMIT_PAGE;
357
                mp->count = 1;
358
                atomic_set(&mp->nohomeok,0);
359
                mp->mapping = mapping;
360
                mp->index = lblock;
361
                mp->page = 0;
362
                mp->logical_size = size;
363
                add_to_hash(mp, hash_ptr);
364
                spin_unlock(&meta_lock);
365
 
366
                if (new) {
367
                        jfs_info("__get_metapage: Calling grab_cache_page");
368
                        mp->page = grab_cache_page(mapping, page_index);
369
                        if (!mp->page) {
370
                                jfs_err("grab_cache_page failed!");
371
                                goto freeit;
372
                        } else {
373
                                INCREMENT(mpStat.pagealloc);
374
                                UnlockPage(mp->page);
375
                        }
376
                } else {
377
                        jfs_info("__get_metapage: Calling read_cache_page");
378
                        mp->page = read_cache_page(mapping, lblock,
379
                                    (filler_t *)mapping->a_ops->readpage, NULL);
380
                        if (IS_ERR(mp->page)) {
381
                                jfs_err("read_cache_page failed!");
382
                                goto freeit;
383
                        } else
384
                                INCREMENT(mpStat.pagealloc);
385
                }
386
                mp->data = kmap(mp->page) + page_offset;
387
        }
388
 
389
        if (new)
390
                memset(mp->data, 0, PSIZE);
391
 
392
        jfs_info("__get_metapage: returning = 0x%p", mp);
393
        return mp;
394
 
395
freeit:
396
        spin_lock(&meta_lock);
397
        remove_from_hash(mp, hash_ptr);
398
        __free_metapage(mp);
399
        spin_unlock(&meta_lock);
400
        return NULL;
401
}
402
 
403
void hold_metapage(struct metapage * mp, int force)
404
{
405
        spin_lock(&meta_lock);
406
 
407
        mp->count++;
408
 
409
        if (force) {
410
                ASSERT (!(test_bit(META_forced, &mp->flag)));
411
                if (trylock_metapage(mp))
412
                        set_bit(META_forced, &mp->flag);
413
        } else
414
                lock_metapage(mp);
415
 
416
        spin_unlock(&meta_lock);
417
}
418
 
419
static void __write_metapage(struct metapage * mp)
420
{
421
        int l2bsize = mp->mapping->host->i_blkbits;
422
        int l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
423
        unsigned long page_index;
424
        unsigned long page_offset;
425
        int rc;
426
 
427
        jfs_info("__write_metapage: mp = 0x%p", mp);
428
 
429
        if (test_bit(META_discard, &mp->flag)) {
430
                /*
431
                 * This metadata is no longer valid
432
                 */
433
                clear_bit(META_dirty, &mp->flag);
434
                return;
435
        }
436
 
437
        page_index = mp->page->index;
438
        page_offset =
439
            (mp->index - (page_index << l2BlocksPerPage)) << l2bsize;
440
 
441
        lock_page(mp->page);
442
        rc = mp->mapping->a_ops->prepare_write(NULL, mp->page, page_offset,
443
                                               page_offset +
444
                                               mp->logical_size);
445
        if (rc) {
446
                jfs_err("prepare_write return %d!", rc);
447
                ClearPageUptodate(mp->page);
448
                UnlockPage(mp->page);
449
                kunmap(mp->page);
450
                clear_bit(META_dirty, &mp->flag);
451
                return;
452
        }
453
        rc = mp->mapping->a_ops->commit_write(NULL, mp->page, page_offset,
454
                                              page_offset +
455
                                              mp->logical_size);
456
        if (rc) {
457
                jfs_err("commit_write returned %d", rc);
458
        }
459
 
460
        UnlockPage(mp->page);
461
        clear_bit(META_dirty, &mp->flag);
462
 
463
        jfs_info("__write_metapage done");
464
}
465
 
466
static inline void sync_metapage(struct metapage *mp)
467
{
468
        struct page *page = mp->page;
469
 
470
        page_cache_get(page);
471
        lock_page(page);
472
 
473
        /* we're done with this page - no need to check for errors */
474
        if (page->buffers) {
475
                writeout_one_page(page);
476
                waitfor_one_page(page);
477
        }
478
 
479
        UnlockPage(page);
480
        page_cache_release(page);
481
}
482
 
483
void release_metapage(struct metapage * mp)
484
{
485
        struct jfs_log *log;
486
 
487
        jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
488
 
489
        spin_lock(&meta_lock);
490
        if (test_bit(META_forced, &mp->flag)) {
491
                clear_bit(META_forced, &mp->flag);
492
                mp->count--;
493
                spin_unlock(&meta_lock);
494
                return;
495
        }
496
 
497
        assert(mp->count);
498
        if (--mp->count || atomic_read(&mp->nohomeok)) {
499
                unlock_metapage(mp);
500
                spin_unlock(&meta_lock);
501
        } else {
502
                remove_from_hash(mp, meta_hash(mp->mapping, mp->index));
503
                spin_unlock(&meta_lock);
504
 
505
                if (mp->page) {
506
                        kunmap(mp->page);
507
                        mp->data = 0;
508
                        if (test_bit(META_dirty, &mp->flag))
509
                                __write_metapage(mp);
510
                        if (test_bit(META_sync, &mp->flag)) {
511
                                sync_metapage(mp);
512
                                clear_bit(META_sync, &mp->flag);
513
                        }
514
 
515
                        if (test_bit(META_discard, &mp->flag)) {
516
                                lock_page(mp->page);
517
                                block_flushpage(mp->page, 0);
518
                                UnlockPage(mp->page);
519
                        }
520
 
521
                        page_cache_release(mp->page);
522
                        INCREMENT(mpStat.pagefree);
523
                }
524
 
525
                if (mp->lsn) {
526
                        /*
527
                         * Remove metapage from logsynclist.
528
                         */
529
                        log = mp->log;
530
                        LOGSYNC_LOCK(log);
531
                        mp->log = 0;
532
                        mp->lsn = 0;
533
                        mp->clsn = 0;
534
                        log->count--;
535
                        list_del(&mp->synclist);
536
                        LOGSYNC_UNLOCK(log);
537
                }
538
 
539
                free_metapage(mp);
540
        }
541
}
542
 
543
void __invalidate_metapages(struct inode *ip, s64 addr, int len)
544
{
545
        struct metapage **hash_ptr;
546
        unsigned long lblock;
547
        int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
548
        /* All callers are interested in block device's mapping */
549
        struct address_space *mapping = ip->i_sb->s_bdev->bd_inode->i_mapping;
550
        struct metapage *mp;
551
        struct page *page;
552
 
553
        /*
554
         * First, mark metapages to discard.  They will eventually be
555
         * released, but should not be written.
556
         */
557
        for (lblock = addr; lblock < addr + len;
558
             lblock += 1 << l2BlocksPerPage) {
559
                hash_ptr = meta_hash(mapping, lblock);
560
                spin_lock(&meta_lock);
561
                mp = search_hash(hash_ptr, mapping, lblock);
562
                if (mp) {
563
                        set_bit(META_discard, &mp->flag);
564
                        spin_unlock(&meta_lock);
565
                } else {
566
                        spin_unlock(&meta_lock);
567
                        page = find_lock_page(mapping, lblock>>l2BlocksPerPage);
568
                        if (page) {
569
                                block_flushpage(page, 0);
570
                                UnlockPage(page);
571
                        }
572
                }
573
        }
574
}
575
 
576
#ifdef CONFIG_JFS_STATISTICS
577
int jfs_mpstat_read(char *buffer, char **start, off_t offset, int length,
578
                    int *eof, void *data)
579
{
580
        int len = 0;
581
        off_t begin;
582
 
583
        len += sprintf(buffer,
584
                       "JFS Metapage statistics\n"
585
                       "=======================\n"
586
                       "page allocations = %d\n"
587
                       "page frees = %d\n"
588
                       "lock waits = %d\n"
589
                       "allocation waits = %d\n",
590
                       mpStat.pagealloc,
591
                       mpStat.pagefree,
592
                       mpStat.lockwait,
593
                       mpStat.allocwait);
594
 
595
        begin = offset;
596
        *start = buffer + begin;
597
        len -= begin;
598
 
599
        if (len > length)
600
                len = length;
601
        else
602
                *eof = 1;
603
 
604
        if (len < 0)
605
                len = 0;
606
 
607
        return len;
608
}
609
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.