OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [fs/] [xfs/] [xfs_iget.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3
 *
4
 * This program is free software; you can redistribute it and/or modify it
5
 * under the terms of version 2 of the GNU General Public License as
6
 * published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it would be useful, but
9
 * WITHOUT ANY WARRANTY; without even the implied warranty of
10
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * Further, this software is distributed without any warranty that it is
13
 * free of the rightful claim of any third person regarding infringement
14
 * or the like.  Any license provided herein, whether implied or
15
 * otherwise, applies only to this software file.  Patent licenses, if
16
 * any, provided herein do not apply to combinations of this program with
17
 * other software, or any other product whatsoever.
18
 *
19
 * You should have received a copy of the GNU General Public License along
20
 * with this program; if not, write the Free Software Foundation, Inc., 59
21
 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22
 *
23
 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24
 * Mountain View, CA  94043, or:
25
 *
26
 * http://www.sgi.com
27
 *
28
 * For further information regarding this notice, see:
29
 *
30
 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31
 */
32
 
33
#include "xfs.h"
34
 
35
#include "xfs_macros.h"
36
#include "xfs_types.h"
37
#include "xfs_inum.h"
38
#include "xfs_log.h"
39
#include "xfs_trans.h"
40
#include "xfs_sb.h"
41
#include "xfs_ag.h"
42
#include "xfs_dir.h"
43
#include "xfs_dir2.h"
44
#include "xfs_dmapi.h"
45
#include "xfs_mount.h"
46
#include "xfs_alloc_btree.h"
47
#include "xfs_bmap_btree.h"
48
#include "xfs_ialloc_btree.h"
49
#include "xfs_btree.h"
50
#include "xfs_ialloc.h"
51
#include "xfs_attr_sf.h"
52
#include "xfs_dir_sf.h"
53
#include "xfs_dir2_sf.h"
54
#include "xfs_dinode.h"
55
#include "xfs_inode.h"
56
#include "xfs_quota.h"
57
#include "xfs_utils.h"
58
 
59
/*
60
 * Initialize the inode hash table for the newly mounted file system.
61
 *
62
 * mp -- this is the mount point structure for the file system being
63
 *       initialized
64
 */
65
void
66
xfs_ihash_init(xfs_mount_t *mp)
67
{
68
        int     i;
69
 
70
        mp->m_ihsize = XFS_BUCKETS(mp);
71
        mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize
72
                                      * sizeof(xfs_ihash_t), KM_SLEEP);
73
        ASSERT(mp->m_ihash != NULL);
74
        for (i = 0; i < mp->m_ihsize; i++) {
75
                rwlock_init(&(mp->m_ihash[i].ih_lock));
76
        }
77
}
78
 
79
/*
80
 * Free up structures allocated by xfs_ihash_init, at unmount time.
81
 */
82
void
83
xfs_ihash_free(xfs_mount_t *mp)
84
{
85
        kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t));
86
        mp->m_ihash = NULL;
87
}
88
 
89
/*
90
 * Initialize the inode cluster hash table for the newly mounted file system.
91
 *
92
 * mp -- this is the mount point structure for the file system being
93
 *       initialized
94
 */
95
void
96
xfs_chash_init(xfs_mount_t *mp)
97
{
98
        int     i;
99
 
100
        /*
101
         * m_chash size is based on m_ihash
102
         * with a minimum of 37 entries
103
         */
104
        mp->m_chsize = (XFS_BUCKETS(mp)) /
105
                         (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
106
        if (mp->m_chsize < 37) {
107
                mp->m_chsize = 37;
108
        }
109
        mp->m_chash = (xfs_chash_t *)kmem_zalloc(mp->m_chsize
110
                                                 * sizeof(xfs_chash_t),
111
                                                 KM_SLEEP);
112
        ASSERT(mp->m_chash != NULL);
113
 
114
        for (i = 0; i < mp->m_chsize; i++) {
115
                spinlock_init(&mp->m_chash[i].ch_lock,"xfshash");
116
        }
117
}
118
 
119
/*
120
 * Free up structures allocated by xfs_chash_init, at unmount time.
121
 */
122
void
123
xfs_chash_free(xfs_mount_t *mp)
124
{
125
        int     i;
126
 
127
        for (i = 0; i < mp->m_chsize; i++) {
128
                spinlock_destroy(&mp->m_chash[i].ch_lock);
129
        }
130
 
131
        kmem_free(mp->m_chash, mp->m_chsize*sizeof(xfs_chash_t));
132
        mp->m_chash = NULL;
133
}
134
 
135
/*
136
 * Look up an inode by number in the given file system.
137
 * The inode is looked up in the hash table for the file system
138
 * represented by the mount point parameter mp.  Each bucket of
139
 * the hash table is guarded by an individual semaphore.
140
 *
141
 * If the inode is found in the hash table, its corresponding vnode
142
 * is obtained with a call to vn_get().  This call takes care of
143
 * coordination with the reclamation of the inode and vnode.  Note
144
 * that the vmap structure is filled in while holding the hash lock.
145
 * This gives us the state of the inode/vnode when we found it and
146
 * is used for coordination in vn_get().
147
 *
148
 * If it is not in core, read it in from the file system's device and
149
 * add the inode into the hash table.
150
 *
151
 * The inode is locked according to the value of the lock_flags parameter.
152
 * This flag parameter indicates how and if the inode's IO lock and inode lock
153
 * should be taken.
154
 *
155
 * mp -- the mount point structure for the current file system.  It points
156
 *       to the inode hash table.
157
 * tp -- a pointer to the current transaction if there is one.  This is
158
 *       simply passed through to the xfs_iread() call.
159
 * ino -- the number of the inode desired.  This is the unique identifier
160
 *        within the file system for the inode being requested.
161
 * lock_flags -- flags indicating how to lock the inode.  See the comment
162
 *               for xfs_ilock() for a list of valid values.
163
 * bno -- the block number starting the buffer containing the inode,
164
 *        if known (as by bulkstat), else 0.
165
 */
166
STATIC int
167
xfs_iget_core(
168
        vnode_t         *vp,
169
        xfs_mount_t     *mp,
170
        xfs_trans_t     *tp,
171
        xfs_ino_t       ino,
172
        uint            lock_flags,
173
        xfs_inode_t     **ipp,
174
        xfs_daddr_t     bno)
175
{
176
        xfs_ihash_t     *ih;
177
        xfs_inode_t     *ip;
178
        xfs_inode_t     *iq;
179
        vnode_t         *inode_vp;
180
        ulong           version;
181
        int             error;
182
        /* REFERENCED */
183
        int             newnode;
184
        xfs_chash_t     *ch;
185
        xfs_chashlist_t *chl, *chlnew;
186
        SPLDECL(s);
187
 
188
 
189
        ih = XFS_IHASH(mp, ino);
190
 
191
again:
192
        read_lock(&ih->ih_lock);
193
 
194
        for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
195
                if (ip->i_ino == ino) {
196
 
197
                        inode_vp = XFS_ITOV_NULL(ip);
198
 
199
                        if (inode_vp == NULL) {
200
                                /* If IRECLAIM is set this inode is
201
                                 * on its way out of the system,
202
                                 * we need to pause and try again.
203
                                 */
204
                                if (ip->i_flags & XFS_IRECLAIM) {
205
                                        read_unlock(&ih->ih_lock);
206
                                        delay(1);
207
                                        XFS_STATS_INC(xs_ig_frecycle);
208
 
209
                                        goto again;
210
                                }
211
 
212
                                vn_trace_exit(vp, "xfs_iget.alloc",
213
                                        (inst_t *)__return_address);
214
 
215
                                XFS_STATS_INC(xs_ig_found);
216
 
217
                                ip->i_flags &= ~XFS_IRECLAIMABLE;
218
                                read_unlock(&ih->ih_lock);
219
 
220
                                XFS_MOUNT_ILOCK(mp);
221
                                list_del_init(&ip->i_reclaim);
222
                                XFS_MOUNT_IUNLOCK(mp);
223
 
224
                                goto finish_inode;
225
 
226
                        } else if (vp != inode_vp) {
227
                                struct inode *inode = LINVFS_GET_IP(inode_vp);
228
 
229
                                /* The inode is being torn down, pause and
230
                                 * try again.
231
                                 */
232
                                if (inode->i_state & (I_FREEING | I_CLEAR)) {
233
                                        read_unlock(&ih->ih_lock);
234
                                        delay(1);
235
                                        XFS_STATS_INC(xs_ig_frecycle);
236
 
237
                                        goto again;
238
                                }
239
/* Chances are the other vnode (the one in the inode) is being torn
240
 * down right now, and we landed on top of it. Question is, what do
241
 * we do? Unhook the old inode and hook up the new one?
242
 */
243
                                cmn_err(CE_PANIC,
244
                        "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
245
                                                inode_vp, vp);
246
                        }
247
 
248
                        read_unlock(&ih->ih_lock);
249
 
250
                        XFS_STATS_INC(xs_ig_found);
251
 
252
finish_inode:
253
                        if (lock_flags != 0) {
254
                                xfs_ilock(ip, lock_flags);
255
                        }
256
 
257
                        newnode = (ip->i_d.di_mode == 0);
258
                        if (newnode) {
259
                                xfs_iocore_inode_reinit(ip);
260
                        }
261
                        ip->i_flags &= ~XFS_ISTALE;
262
 
263
                        vn_trace_exit(vp, "xfs_iget.found",
264
                                                (inst_t *)__return_address);
265
                        goto return_ip;
266
                }
267
        }
268
 
269
        /*
270
         * Inode cache miss: save the hash chain version stamp and unlock
271
         * the chain, so we don't deadlock in vn_alloc.
272
         */
273
        XFS_STATS_INC(xs_ig_missed);
274
 
275
        version = ih->ih_version;
276
 
277
        read_unlock(&ih->ih_lock);
278
 
279
        /*
280
         * Read the disk inode attributes into a new inode structure and get
281
         * a new vnode for it. This should also initialize i_ino and i_mount.
282
         */
283
        error = xfs_iread(mp, tp, ino, &ip, bno);
284
        if (error) {
285
                return error;
286
        }
287
 
288
        vn_trace_exit(vp, "xfs_iget.alloc", (inst_t *)__return_address);
289
 
290
        xfs_inode_lock_init(ip, vp);
291
        xfs_iocore_inode_init(ip);
292
 
293
        if (lock_flags != 0) {
294
                xfs_ilock(ip, lock_flags);
295
        }
296
 
297
        /*
298
         * Put ip on its hash chain, unless someone else hashed a duplicate
299
         * after we released the hash lock.
300
         */
301
        write_lock(&ih->ih_lock);
302
 
303
        if (ih->ih_version != version) {
304
                for (iq = ih->ih_next; iq != NULL; iq = iq->i_next) {
305
                        if (iq->i_ino == ino) {
306
                                write_unlock(&ih->ih_lock);
307
                                xfs_idestroy(ip);
308
 
309
                                XFS_STATS_INC(xs_ig_dup);
310
                                goto again;
311
                        }
312
                }
313
        }
314
 
315
        /*
316
         * These values _must_ be set before releasing ihlock!
317
         */
318
        ip->i_hash = ih;
319
        if ((iq = ih->ih_next)) {
320
                iq->i_prevp = &ip->i_next;
321
        }
322
        ip->i_next = iq;
323
        ip->i_prevp = &ih->ih_next;
324
        ih->ih_next = ip;
325
        ip->i_udquot = ip->i_gdquot = NULL;
326
        ih->ih_version++;
327
 
328
        write_unlock(&ih->ih_lock);
329
 
330
        /*
331
         * put ip on its cluster's hash chain
332
         */
333
        ASSERT(ip->i_chash == NULL && ip->i_cprev == NULL &&
334
               ip->i_cnext == NULL);
335
 
336
        chlnew = NULL;
337
        ch = XFS_CHASH(mp, ip->i_blkno);
338
 chlredo:
339
        s = mutex_spinlock(&ch->ch_lock);
340
        for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
341
                if (chl->chl_blkno == ip->i_blkno) {
342
 
343
                        /* insert this inode into the doubly-linked list
344
                         * where chl points */
345
                        if ((iq = chl->chl_ip)) {
346
                                ip->i_cprev = iq->i_cprev;
347
                                iq->i_cprev->i_cnext = ip;
348
                                iq->i_cprev = ip;
349
                                ip->i_cnext = iq;
350
                        } else {
351
                                ip->i_cnext = ip;
352
                                ip->i_cprev = ip;
353
                        }
354
                        chl->chl_ip = ip;
355
                        ip->i_chash = chl;
356
                        break;
357
                }
358
        }
359
 
360
        /* no hash list found for this block; add a new hash list */
361
        if (chl == NULL)  {
362
                if (chlnew == NULL) {
363
                        mutex_spinunlock(&ch->ch_lock, s);
364
                        ASSERT(xfs_chashlist_zone != NULL);
365
                        chlnew = (xfs_chashlist_t *)
366
                                        kmem_zone_alloc(xfs_chashlist_zone,
367
                                                KM_SLEEP);
368
                        ASSERT(chlnew != NULL);
369
                        goto chlredo;
370
                } else {
371
                        ip->i_cnext = ip;
372
                        ip->i_cprev = ip;
373
                        ip->i_chash = chlnew;
374
                        chlnew->chl_ip = ip;
375
                        chlnew->chl_blkno = ip->i_blkno;
376
                        chlnew->chl_next = ch->ch_list;
377
                        ch->ch_list = chlnew;
378
                        chlnew = NULL;
379
                }
380
        } else {
381
                if (chlnew != NULL) {
382
                        kmem_zone_free(xfs_chashlist_zone, chlnew);
383
                }
384
        }
385
 
386
        mutex_spinunlock(&ch->ch_lock, s);
387
 
388
 
389
        /*
390
         * Link ip to its mount and thread it on the mount's inode list.
391
         */
392
        XFS_MOUNT_ILOCK(mp);
393
        if ((iq = mp->m_inodes)) {
394
                ASSERT(iq->i_mprev->i_mnext == iq);
395
                ip->i_mprev = iq->i_mprev;
396
                iq->i_mprev->i_mnext = ip;
397
                iq->i_mprev = ip;
398
                ip->i_mnext = iq;
399
        } else {
400
                ip->i_mnext = ip;
401
                ip->i_mprev = ip;
402
        }
403
        mp->m_inodes = ip;
404
 
405
        XFS_MOUNT_IUNLOCK(mp);
406
 
407
        newnode = 1;
408
 
409
 return_ip:
410
        ASSERT(ip->i_df.if_ext_max ==
411
               XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
412
 
413
        ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
414
               ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
415
 
416
        *ipp = ip;
417
 
418
        /*
419
         * If we have a real type for an on-disk inode, we can set ops(&unlock)
420
         * now.  If it's a new inode being created, xfs_ialloc will handle it.
421
         */
422
        VFS_INIT_VNODE(XFS_MTOVFS(mp), vp, XFS_ITOBHV(ip), 1);
423
 
424
        return 0;
425
}
426
 
427
 
428
/*
429
 * The 'normal' internal xfs_iget, if needed it will
430
 * 'allocate', or 'get', the vnode.
431
 */
432
int
433
xfs_iget(
434
        xfs_mount_t     *mp,
435
        xfs_trans_t     *tp,
436
        xfs_ino_t       ino,
437
        uint            lock_flags,
438
        xfs_inode_t     **ipp,
439
        xfs_daddr_t     bno)
440
{
441
        struct inode    *inode;
442
        vnode_t         *vp = NULL;
443
        int             error;
444
 
445
retry:
446
        XFS_STATS_INC(xs_ig_attempts);
447
 
448
        if ((inode = VFS_GET_INODE(XFS_MTOVFS(mp), ino, 0))) {
449
                bhv_desc_t      *bdp;
450
                xfs_inode_t     *ip;
451
                int             newnode;
452
 
453
                vp = LINVFS_GET_VP(inode);
454
                if (inode->i_state & I_NEW) {
455
inode_allocate:
456
                        vn_initialize(inode);
457
                        error = xfs_iget_core(vp, mp, tp, ino,
458
                                                lock_flags, ipp, bno);
459
                        if (error) {
460
                                remove_inode_hash(inode);
461
                                make_bad_inode(inode);
462
                                if (inode->i_state & I_NEW)
463
                                        unlock_new_inode(inode);
464
                                iput(inode);
465
                        }
466
                } else {
467
                        /* These are true if the inode is in inactive or
468
                         * reclaim. The linux inode is about to go away,
469
                         * wait for that path to finish, and try again.
470
                         */
471
                        if (vp->v_flag & (VINACT | VRECLM)) {
472
                                vn_wait(vp);
473
                                iput(inode);
474
                                goto retry;
475
                        }
476
 
477
                        if (is_bad_inode(inode)) {
478
                                iput(inode);
479
                                return EIO;
480
                        }
481
 
482
                        bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
483
                        if (bdp == NULL) {
484
                                XFS_STATS_INC(xs_ig_dup);
485
                                goto inode_allocate;
486
                        }
487
                        ip = XFS_BHVTOI(bdp);
488
                        if (lock_flags != 0)
489
                                xfs_ilock(ip, lock_flags);
490
                        newnode = (ip->i_d.di_mode == 0);
491
                        if (newnode)
492
                                xfs_iocore_inode_reinit(ip);
493
                        XFS_STATS_INC(xs_ig_found);
494
                        *ipp = ip;
495
                        error = 0;
496
                }
497
        } else
498
                error = ENOMEM; /* If we got no inode we are out of memory */
499
 
500
        return error;
501
}
502
 
503
/*
504
 * Do the setup for the various locks within the incore inode.
505
 */
506
void
507
xfs_inode_lock_init(
508
        xfs_inode_t     *ip,
509
        vnode_t         *vp)
510
{
511
        mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
512
                     "xfsino", (long)vp->v_number);
513
        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", vp->v_number);
514
        init_waitqueue_head(&ip->i_ipin_wait);
515
        atomic_set(&ip->i_pincount, 0);
516
        init_sema(&ip->i_flock, 1, "xfsfino", vp->v_number);
517
}
518
 
519
/*
520
 * Look for the inode corresponding to the given ino in the hash table.
521
 * If it is there and its i_transp pointer matches tp, return it.
522
 * Otherwise, return NULL.
523
 */
524
xfs_inode_t *
525
xfs_inode_incore(xfs_mount_t    *mp,
526
                 xfs_ino_t      ino,
527
                 xfs_trans_t    *tp)
528
{
529
        xfs_ihash_t     *ih;
530
        xfs_inode_t     *ip;
531
 
532
        ih = XFS_IHASH(mp, ino);
533
        read_lock(&ih->ih_lock);
534
        for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
535
                if (ip->i_ino == ino) {
536
                        /*
537
                         * If we find it and tp matches, return it.
538
                         * Otherwise break from the loop and return
539
                         * NULL.
540
                         */
541
                        if (ip->i_transp == tp) {
542
                                read_unlock(&ih->ih_lock);
543
                                return (ip);
544
                        }
545
                        break;
546
                }
547
        }
548
        read_unlock(&ih->ih_lock);
549
        return (NULL);
550
}
551
 
552
/*
553
 * Decrement reference count of an inode structure and unlock it.
554
 *
555
 * ip -- the inode being released
556
 * lock_flags -- this parameter indicates the inode's locks to be
557
 *       to be released.  See the comment on xfs_iunlock() for a list
558
 *       of valid values.
559
 */
560
void
561
xfs_iput(xfs_inode_t    *ip,
562
         uint           lock_flags)
563
{
564
        vnode_t *vp = XFS_ITOV(ip);
565
 
566
        vn_trace_entry(vp, "xfs_iput", (inst_t *)__return_address);
567
 
568
        xfs_iunlock(ip, lock_flags);
569
 
570
        VN_RELE(vp);
571
}
572
 
573
/*
574
 * Special iput for brand-new inodes that are still locked
575
 */
576
void
577
xfs_iput_new(xfs_inode_t        *ip,
578
             uint               lock_flags)
579
{
580
        vnode_t         *vp = XFS_ITOV(ip);
581
        struct inode    *inode = LINVFS_GET_IP(vp);
582
 
583
        vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
584
 
585
        /* We shouldn't get here without this being true, but just in case */
586
        if (inode->i_state & I_NEW) {
587
                remove_inode_hash(inode);
588
                make_bad_inode(inode);
589
                unlock_new_inode(inode);
590
        }
591
        if (lock_flags)
592
                xfs_iunlock(ip, lock_flags);
593
        VN_RELE(vp);
594
}
595
 
596
 
597
/*
598
 * This routine embodies the part of the reclaim code that pulls
599
 * the inode from the inode hash table and the mount structure's
600
 * inode list.
601
 * This should only be called from xfs_reclaim().
602
 */
603
void
604
xfs_ireclaim(xfs_inode_t *ip)
605
{
606
        vnode_t         *vp;
607
 
608
        /*
609
         * Remove from old hash list and mount list.
610
         */
611
        XFS_STATS_INC(xs_ig_reclaims);
612
 
613
        xfs_iextract(ip);
614
 
615
        /*
616
         * Here we do a spurious inode lock in order to coordinate with
617
         * xfs_sync().  This is because xfs_sync() references the inodes
618
         * in the mount list without taking references on the corresponding
619
         * vnodes.  We make that OK here by ensuring that we wait until
620
         * the inode is unlocked in xfs_sync() before we go ahead and
621
         * free it.  We get both the regular lock and the io lock because
622
         * the xfs_sync() code may need to drop the regular one but will
623
         * still hold the io lock.
624
         */
625
        xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
626
 
627
        /*
628
         * Release dquots (and their references) if any. An inode may escape
629
         * xfs_inactive and get here via vn_alloc->vn_reclaim path.
630
         */
631
        XFS_QM_DQDETACH(ip->i_mount, ip);
632
 
633
        /*
634
         * Pull our behavior descriptor from the vnode chain.
635
         */
636
        vp = XFS_ITOV_NULL(ip);
637
        if (vp) {
638
                vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
639
        }
640
 
641
        /*
642
         * Free all memory associated with the inode.
643
         */
644
        xfs_idestroy(ip);
645
}
646
 
647
/*
648
 * This routine removes an about-to-be-destroyed inode from
649
 * all of the lists in which it is located with the exception
650
 * of the behavior chain.
651
 */
652
void
653
xfs_iextract(
654
        xfs_inode_t     *ip)
655
{
656
        xfs_ihash_t     *ih;
657
        xfs_inode_t     *iq;
658
        xfs_mount_t     *mp;
659
        xfs_chash_t     *ch;
660
        xfs_chashlist_t *chl, *chm;
661
        SPLDECL(s);
662
 
663
        ih = ip->i_hash;
664
        write_lock(&ih->ih_lock);
665
        if ((iq = ip->i_next)) {
666
                iq->i_prevp = ip->i_prevp;
667
        }
668
        *ip->i_prevp = iq;
669
        write_unlock(&ih->ih_lock);
670
 
671
        /*
672
         * Remove from cluster hash list
673
         *   1) delete the chashlist if this is the last inode on the chashlist
674
         *   2) unchain from list of inodes
675
         *   3) point chashlist->chl_ip to 'chl_next' if to this inode.
676
         */
677
        mp = ip->i_mount;
678
        ch = XFS_CHASH(mp, ip->i_blkno);
679
        s = mutex_spinlock(&ch->ch_lock);
680
 
681
        if (ip->i_cnext == ip) {
682
                /* Last inode on chashlist */
683
                ASSERT(ip->i_cnext == ip && ip->i_cprev == ip);
684
                ASSERT(ip->i_chash != NULL);
685
                chm=NULL;
686
                for (chl = ch->ch_list; chl != NULL; chl = chl->chl_next) {
687
                        if (chl->chl_blkno == ip->i_blkno) {
688
                                if (chm == NULL) {
689
                                        /* first item on the list */
690
                                        ch->ch_list = chl->chl_next;
691
                                } else {
692
                                        chm->chl_next = chl->chl_next;
693
                                }
694
                                kmem_zone_free(xfs_chashlist_zone, chl);
695
                                break;
696
                        } else {
697
                                ASSERT(chl->chl_ip != ip);
698
                                chm = chl;
699
                        }
700
                }
701
                ASSERT_ALWAYS(chl != NULL);
702
       } else {
703
                /* delete one inode from a non-empty list */
704
                iq = ip->i_cnext;
705
                iq->i_cprev = ip->i_cprev;
706
                ip->i_cprev->i_cnext = iq;
707
                if (ip->i_chash->chl_ip == ip) {
708
                        ip->i_chash->chl_ip = iq;
709
                }
710
                ip->i_chash = __return_address;
711
                ip->i_cprev = __return_address;
712
                ip->i_cnext = __return_address;
713
        }
714
        mutex_spinunlock(&ch->ch_lock, s);
715
 
716
        /*
717
         * Remove from mount's inode list.
718
         */
719
        XFS_MOUNT_ILOCK(mp);
720
        ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL));
721
        iq = ip->i_mnext;
722
        iq->i_mprev = ip->i_mprev;
723
        ip->i_mprev->i_mnext = iq;
724
 
725
        /*
726
         * Fix up the head pointer if it points to the inode being deleted.
727
         */
728
        if (mp->m_inodes == ip) {
729
                if (ip == iq) {
730
                        mp->m_inodes = NULL;
731
                } else {
732
                        mp->m_inodes = iq;
733
                }
734
        }
735
 
736
        /* Deal with the deleted inodes list */
737
        list_del_init(&ip->i_reclaim);
738
 
739
        mp->m_ireclaims++;
740
        XFS_MOUNT_IUNLOCK(mp);
741
}
742
 
743
/*
744
 * This is a wrapper routine around the xfs_ilock() routine
745
 * used to centralize some grungy code.  It is used in places
746
 * that wish to lock the inode solely for reading the extents.
747
 * The reason these places can't just call xfs_ilock(SHARED)
748
 * is that the inode lock also guards to bringing in of the
749
 * extents from disk for a file in b-tree format.  If the inode
750
 * is in b-tree format, then we need to lock the inode exclusively
751
 * until the extents are read in.  Locking it exclusively all
752
 * the time would limit our parallelism unnecessarily, though.
753
 * What we do instead is check to see if the extents have been
754
 * read in yet, and only lock the inode exclusively if they
755
 * have not.
756
 *
757
 * The function returns a value which should be given to the
758
 * corresponding xfs_iunlock_map_shared().  This value is
759
 * the mode in which the lock was actually taken.
760
 */
761
uint
762
xfs_ilock_map_shared(
763
        xfs_inode_t     *ip)
764
{
765
        uint    lock_mode;
766
 
767
        if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
768
            ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
769
                lock_mode = XFS_ILOCK_EXCL;
770
        } else {
771
                lock_mode = XFS_ILOCK_SHARED;
772
        }
773
 
774
        xfs_ilock(ip, lock_mode);
775
 
776
        return lock_mode;
777
}
778
 
779
/*
780
 * This is simply the unlock routine to go with xfs_ilock_map_shared().
781
 * All it does is call xfs_iunlock() with the given lock_mode.
782
 */
783
void
784
xfs_iunlock_map_shared(
785
        xfs_inode_t     *ip,
786
        unsigned int    lock_mode)
787
{
788
        xfs_iunlock(ip, lock_mode);
789
}
790
 
791
/*
792
 * The xfs inode contains 2 locks: a multi-reader lock called the
793
 * i_iolock and a multi-reader lock called the i_lock.  This routine
794
 * allows either or both of the locks to be obtained.
795
 *
796
 * The 2 locks should always be ordered so that the IO lock is
797
 * obtained first in order to prevent deadlock.
798
 *
799
 * ip -- the inode being locked
800
 * lock_flags -- this parameter indicates the inode's locks
801
 *       to be locked.  It can be:
802
 *              XFS_IOLOCK_SHARED,
803
 *              XFS_IOLOCK_EXCL,
804
 *              XFS_ILOCK_SHARED,
805
 *              XFS_ILOCK_EXCL,
806
 *              XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
807
 *              XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
808
 *              XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
809
 *              XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
810
 */
811
void
812
xfs_ilock(xfs_inode_t   *ip,
813
          uint          lock_flags)
814
{
815
        /*
816
         * You can't set both SHARED and EXCL for the same lock,
817
         * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
818
         * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
819
         */
820
        ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
821
               (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
822
        ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
823
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
824
        ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
825
 
826
        if (lock_flags & XFS_IOLOCK_EXCL) {
827
                mrupdate(&ip->i_iolock);
828
        } else if (lock_flags & XFS_IOLOCK_SHARED) {
829
                mraccess(&ip->i_iolock);
830
        }
831
        if (lock_flags & XFS_ILOCK_EXCL) {
832
                mrupdate(&ip->i_lock);
833
        } else if (lock_flags & XFS_ILOCK_SHARED) {
834
                mraccess(&ip->i_lock);
835
        }
836
        xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
837
}
838
 
839
/*
840
 * This is just like xfs_ilock(), except that the caller
841
 * is guaranteed not to sleep.  It returns 1 if it gets
842
 * the requested locks and 0 otherwise.  If the IO lock is
843
 * obtained but the inode lock cannot be, then the IO lock
844
 * is dropped before returning.
845
 *
846
 * ip -- the inode being locked
847
 * lock_flags -- this parameter indicates the inode's locks to be
848
 *       to be locked.  See the comment for xfs_ilock() for a list
849
 *       of valid values.
850
 *
851
 */
852
int
853
xfs_ilock_nowait(xfs_inode_t    *ip,
854
                 uint           lock_flags)
855
{
856
        int     iolocked;
857
        int     ilocked;
858
 
859
        /*
860
         * You can't set both SHARED and EXCL for the same lock,
861
         * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
862
         * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
863
         */
864
        ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
865
               (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
866
        ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
867
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
868
        ASSERT((lock_flags & ~XFS_LOCK_MASK) == 0);
869
 
870
        iolocked = 0;
871
        if (lock_flags & XFS_IOLOCK_EXCL) {
872
                iolocked = mrtryupdate(&ip->i_iolock);
873
                if (!iolocked) {
874
                        return 0;
875
                }
876
        } else if (lock_flags & XFS_IOLOCK_SHARED) {
877
                iolocked = mrtryaccess(&ip->i_iolock);
878
                if (!iolocked) {
879
                        return 0;
880
                }
881
        }
882
        if (lock_flags & XFS_ILOCK_EXCL) {
883
                ilocked = mrtryupdate(&ip->i_lock);
884
                if (!ilocked) {
885
                        if (iolocked) {
886
                                mrunlock(&ip->i_iolock);
887
                        }
888
                        return 0;
889
                }
890
        } else if (lock_flags & XFS_ILOCK_SHARED) {
891
                ilocked = mrtryaccess(&ip->i_lock);
892
                if (!ilocked) {
893
                        if (iolocked) {
894
                                mrunlock(&ip->i_iolock);
895
                        }
896
                        return 0;
897
                }
898
        }
899
        xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
900
        return 1;
901
}
902
 
903
/*
904
 * xfs_iunlock() is used to drop the inode locks acquired with
905
 * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
906
 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
907
 * that we know which locks to drop.
908
 *
909
 * ip -- the inode being unlocked
910
 * lock_flags -- this parameter indicates the inode's locks to be
911
 *       to be unlocked.  See the comment for xfs_ilock() for a list
912
 *       of valid values for this parameter.
913
 *
914
 */
915
void
916
xfs_iunlock(xfs_inode_t *ip,
917
            uint        lock_flags)
918
{
919
        /*
920
         * You can't set both SHARED and EXCL for the same lock,
921
         * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
922
         * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
923
         */
924
        ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
925
               (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
926
        ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
927
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
928
        ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY)) == 0);
929
        ASSERT(lock_flags != 0);
930
 
931
        if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
932
                ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||
933
                       (ismrlocked(&ip->i_iolock, MR_ACCESS)));
934
                ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||
935
                       (ismrlocked(&ip->i_iolock, MR_UPDATE)));
936
                mrunlock(&ip->i_iolock);
937
        }
938
 
939
        if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {
940
                ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||
941
                       (ismrlocked(&ip->i_lock, MR_ACCESS)));
942
                ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||
943
                       (ismrlocked(&ip->i_lock, MR_UPDATE)));
944
                mrunlock(&ip->i_lock);
945
 
946
                /*
947
                 * Let the AIL know that this item has been unlocked in case
948
                 * it is in the AIL and anyone is waiting on it.  Don't do
949
                 * this if the caller has asked us not to.
950
                 */
951
                if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
952
                     ip->i_itemp != NULL) {
953
                        xfs_trans_unlocked_item(ip->i_mount,
954
                                                (xfs_log_item_t*)(ip->i_itemp));
955
                }
956
        }
957
        xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
958
}
959
 
960
/*
961
 * give up write locks.  the i/o lock cannot be held nested
962
 * if it is being demoted.
963
 */
964
void
965
xfs_ilock_demote(xfs_inode_t    *ip,
966
                 uint           lock_flags)
967
{
968
        ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
969
        ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
970
 
971
        if (lock_flags & XFS_ILOCK_EXCL) {
972
                ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
973
                mrdemote(&ip->i_lock);
974
        }
975
        if (lock_flags & XFS_IOLOCK_EXCL) {
976
                ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
977
                mrdemote(&ip->i_iolock);
978
        }
979
}
980
 
981
/*
982
 * The following three routines simply manage the i_flock
983
 * semaphore embedded in the inode.  This semaphore synchronizes
984
 * processes attempting to flush the in-core inode back to disk.
985
 */
986
void
987
xfs_iflock(xfs_inode_t *ip)
988
{
989
        psema(&(ip->i_flock), PINOD|PLTWAIT);
990
}
991
 
992
int
993
xfs_iflock_nowait(xfs_inode_t *ip)
994
{
995
        return (cpsema(&(ip->i_flock)));
996
}
997
 
998
void
999
xfs_ifunlock(xfs_inode_t *ip)
1000
{
1001
        ASSERT(valusema(&(ip->i_flock)) <= 0);
1002
        vsema(&(ip->i_flock));
1003
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.