OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [fs/] [xfs/] [xfs_refcache.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3
 *
4
 * This program is free software; you can redistribute it and/or modify it
5
 * under the terms of version 2 of the GNU General Public License as
6
 * published by the Free Software Foundation.
7
 *
8
 * This program is distributed in the hope that it would be useful, but
9
 * WITHOUT ANY WARRANTY; without even the implied warranty of
10
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11
 *
12
 * Further, this software is distributed without any warranty that it is
13
 * free of the rightful claim of any third person regarding infringement
14
 * or the like.  Any license provided herein, whether implied or
15
 * otherwise, applies only to this software file.  Patent licenses, if
16
 * any, provided herein do not apply to combinations of this program with
17
 * other software, or any other product whatsoever.
18
 *
19
 * You should have received a copy of the GNU General Public License along
20
 * with this program; if not, write the Free Software Foundation, Inc., 59
21
 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22
 *
23
 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24
 * Mountain View, CA  94043, or:
25
 *
26
 * http://www.sgi.com
27
 *
28
 * For further information regarding this notice, see:
29
 *
30
 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31
 */
32
 
33
#include "xfs.h"
34
#include "xfs_macros.h"
35
#include "xfs_types.h"
36
#include "xfs_inum.h"
37
#include "xfs_log.h"
38
#include "xfs_trans.h"
39
#include "xfs_sb.h"
40
#include "xfs_ag.h"
41
#include "xfs_dir.h"
42
#include "xfs_dir2.h"
43
#include "xfs_dmapi.h"
44
#include "xfs_mount.h"
45
#include "xfs_alloc_btree.h"
46
#include "xfs_bmap_btree.h"
47
#include "xfs_ialloc_btree.h"
48
#include "xfs_itable.h"
49
#include "xfs_btree.h"
50
#include "xfs_alloc.h"
51
#include "xfs_ialloc.h"
52
#include "xfs_attr.h"
53
#include "xfs_attr_sf.h"
54
#include "xfs_dir_sf.h"
55
#include "xfs_dir2_sf.h"
56
#include "xfs_dinode.h"
57
#include "xfs_inode_item.h"
58
#include "xfs_inode.h"
59
#include "xfs_bmap.h"
60
#include "xfs_error.h"
61
#include "xfs_buf_item.h"
62
#include "xfs_refcache.h"
63
 
64
STATIC spinlock_t       xfs_refcache_lock = SPIN_LOCK_UNLOCKED;
65
STATIC xfs_inode_t      **xfs_refcache;
66
STATIC int              xfs_refcache_index;
67
STATIC int              xfs_refcache_busy;
68
STATIC int              xfs_refcache_count;
69
 
70
/*
71
 * Insert the given inode into the reference cache.
72
 */
73
void
74
xfs_refcache_insert(
75
        xfs_inode_t     *ip)
76
{
77
        vnode_t         *vp;
78
        xfs_inode_t     *release_ip;
79
        xfs_inode_t     **refcache;
80
 
81
        ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE));
82
 
83
        /*
84
         * If an unmount is busy blowing entries out of the cache,
85
         * then don't bother.
86
         */
87
        if (xfs_refcache_busy) {
88
                return;
89
        }
90
 
91
        /*
92
         * If we tuned the refcache down to zero, don't do anything.
93
         */
94
         if (!xfs_refcache_size) {
95
                return;
96
        }
97
 
98
        /*
99
         * The inode is already in the refcache, so don't bother
100
         * with it.
101
         */
102
        if (ip->i_refcache != NULL) {
103
                return;
104
        }
105
 
106
        vp = XFS_ITOV(ip);
107
        /* ASSERT(vp->v_count > 0); */
108
        VN_HOLD(vp);
109
 
110
        /*
111
         * We allocate the reference cache on use so that we don't
112
         * waste the memory on systems not being used as NFS servers.
113
         */
114
        if (xfs_refcache == NULL) {
115
                refcache = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
116
                                                       sizeof(xfs_inode_t *),
117
                                                       KM_SLEEP);
118
        } else {
119
                refcache = NULL;
120
        }
121
 
122
        spin_lock(&xfs_refcache_lock);
123
 
124
        /*
125
         * If we allocated memory for the refcache above and it still
126
         * needs it, then use the memory we allocated.  Otherwise we'll
127
         * free the memory below.
128
         */
129
        if (refcache != NULL) {
130
                if (xfs_refcache == NULL) {
131
                        xfs_refcache = refcache;
132
                        refcache = NULL;
133
                }
134
        }
135
 
136
        /*
137
         * If an unmount is busy clearing out the cache, don't add new
138
         * entries to it.
139
         */
140
        if (xfs_refcache_busy) {
141
                spin_unlock(&xfs_refcache_lock);
142
                VN_RELE(vp);
143
                /*
144
                 * If we allocated memory for the refcache above but someone
145
                 * else beat us to using it, then free the memory now.
146
                 */
147
                if (refcache != NULL) {
148
                        kmem_free(refcache,
149
                                  XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
150
                }
151
                return;
152
        }
153
        release_ip = xfs_refcache[xfs_refcache_index];
154
        if (release_ip != NULL) {
155
                release_ip->i_refcache = NULL;
156
                xfs_refcache_count--;
157
                ASSERT(xfs_refcache_count >= 0);
158
        }
159
        xfs_refcache[xfs_refcache_index] = ip;
160
        ASSERT(ip->i_refcache == NULL);
161
        ip->i_refcache = &(xfs_refcache[xfs_refcache_index]);
162
        xfs_refcache_count++;
163
        ASSERT(xfs_refcache_count <= xfs_refcache_size);
164
        xfs_refcache_index++;
165
        if (xfs_refcache_index == xfs_refcache_size) {
166
                xfs_refcache_index = 0;
167
        }
168
        spin_unlock(&xfs_refcache_lock);
169
 
170
        /*
171
         * Save the pointer to the inode to be released so that we can
172
         * VN_RELE it once we've dropped our inode locks in xfs_rwunlock().
173
         * The pointer may be NULL, but that's OK.
174
         */
175
        ip->i_release = release_ip;
176
 
177
        /*
178
         * If we allocated memory for the refcache above but someone
179
         * else beat us to using it, then free the memory now.
180
         */
181
        if (refcache != NULL) {
182
                kmem_free(refcache,
183
                          XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
184
        }
185
}
186
 
187
 
188
/*
189
 * If the given inode is in the reference cache, purge its entry and
190
 * release the reference on the vnode.
191
 */
192
void
193
xfs_refcache_purge_ip(
194
        xfs_inode_t     *ip)
195
{
196
        vnode_t *vp;
197
        int     error;
198
 
199
        /*
200
         * If we're not pointing to our entry in the cache, then
201
         * we must not be in the cache.
202
         */
203
        if (ip->i_refcache == NULL) {
204
                return;
205
        }
206
 
207
        spin_lock(&xfs_refcache_lock);
208
        if (ip->i_refcache == NULL) {
209
                spin_unlock(&xfs_refcache_lock);
210
                return;
211
        }
212
 
213
        /*
214
         * Clear both our pointer to the cache entry and its pointer
215
         * back to us.
216
         */
217
        ASSERT(*(ip->i_refcache) == ip);
218
        *(ip->i_refcache) = NULL;
219
        ip->i_refcache = NULL;
220
        xfs_refcache_count--;
221
        ASSERT(xfs_refcache_count >= 0);
222
        spin_unlock(&xfs_refcache_lock);
223
 
224
        vp = XFS_ITOV(ip);
225
        /* ASSERT(vp->v_count > 1); */
226
        VOP_RELEASE(vp, error);
227
        VN_RELE(vp);
228
}
229
 
230
 
231
/*
232
 * This is called from the XFS unmount code to purge all entries for the
233
 * given mount from the cache.  It uses the refcache busy counter to
234
 * make sure that new entries are not added to the cache as we purge them.
235
 */
236
void
237
xfs_refcache_purge_mp(
238
        xfs_mount_t     *mp)
239
{
240
        vnode_t         *vp;
241
        int             error, i;
242
        xfs_inode_t     *ip;
243
 
244
        if (xfs_refcache == NULL) {
245
                return;
246
        }
247
 
248
        spin_lock(&xfs_refcache_lock);
249
        /*
250
         * Bumping the busy counter keeps new entries from being added
251
         * to the cache.  We use a counter since multiple unmounts could
252
         * be in here simultaneously.
253
         */
254
        xfs_refcache_busy++;
255
 
256
        for (i = 0; i < xfs_refcache_size; i++) {
257
                ip = xfs_refcache[i];
258
                if ((ip != NULL) && (ip->i_mount == mp)) {
259
                        xfs_refcache[i] = NULL;
260
                        ip->i_refcache = NULL;
261
                        xfs_refcache_count--;
262
                        ASSERT(xfs_refcache_count >= 0);
263
                        spin_unlock(&xfs_refcache_lock);
264
                        vp = XFS_ITOV(ip);
265
                        VOP_RELEASE(vp, error);
266
                        VN_RELE(vp);
267
                        spin_lock(&xfs_refcache_lock);
268
                }
269
        }
270
 
271
        xfs_refcache_busy--;
272
        ASSERT(xfs_refcache_busy >= 0);
273
        spin_unlock(&xfs_refcache_lock);
274
}
275
 
276
 
277
/*
278
 * This is called from the XFS sync code to ensure that the refcache
279
 * is emptied out over time.  We purge a small number of entries with
280
 * each call.
281
 */
282
void
283
xfs_refcache_purge_some(xfs_mount_t *mp)
284
{
285
        int             error, i;
286
        xfs_inode_t     *ip;
287
        int             iplist_index;
288
        xfs_inode_t     **iplist;
289
 
290
        if ((xfs_refcache == NULL) || (xfs_refcache_count == 0)) {
291
                return;
292
        }
293
 
294
        iplist_index = 0;
295
        iplist = (xfs_inode_t **)kmem_zalloc(xfs_refcache_purge_count *
296
                                          sizeof(xfs_inode_t *), KM_SLEEP);
297
 
298
        spin_lock(&xfs_refcache_lock);
299
 
300
        /*
301
         * Store any inodes we find in the next several entries
302
         * into the iplist array to be released after dropping
303
         * the spinlock.  We always start looking from the currently
304
         * oldest place in the cache.  We move the refcache index
305
         * forward as we go so that we are sure to eventually clear
306
         * out the entire cache when the system goes idle.
307
         */
308
        for (i = 0; i < xfs_refcache_purge_count; i++) {
309
                ip = xfs_refcache[xfs_refcache_index];
310
                if (ip != NULL) {
311
                        xfs_refcache[xfs_refcache_index] = NULL;
312
                        ip->i_refcache = NULL;
313
                        xfs_refcache_count--;
314
                        ASSERT(xfs_refcache_count >= 0);
315
                        iplist[iplist_index] = ip;
316
                        iplist_index++;
317
                }
318
                xfs_refcache_index++;
319
                if (xfs_refcache_index == xfs_refcache_size) {
320
                        xfs_refcache_index = 0;
321
                }
322
        }
323
 
324
        spin_unlock(&xfs_refcache_lock);
325
 
326
        /*
327
         * Now drop the inodes we collected.
328
         */
329
        for (i = 0; i < iplist_index; i++) {
330
                VOP_RELEASE(XFS_ITOV(iplist[i]), error);
331
                VN_RELE(XFS_ITOV(iplist[i]));
332
        }
333
 
334
        kmem_free(iplist, xfs_refcache_purge_count *
335
                          sizeof(xfs_inode_t *));
336
}
337
 
338
/*
339
 * This is called when the refcache is dynamically resized
340
 * via a sysctl.
341
 *
342
 * If the new size is smaller than the old size, purge all
343
 * entries in slots greater than the new size, and move
344
 * the index if necessary.
345
 *
346
 * If the refcache hasn't even been allocated yet, or the
347
 * new size is larger than the old size, just set the value
348
 * of xfs_refcache_size.
349
 */
350
 
351
void
352
xfs_refcache_resize(int xfs_refcache_new_size)
353
{
354
        int             i;
355
        xfs_inode_t     *ip;
356
        int             iplist_index = 0;
357
        xfs_inode_t     **iplist;
358
        int             error;
359
 
360
        /*
361
         * If the new size is smaller than the current size,
362
         * purge entries to create smaller cache, and
363
         * reposition index if necessary.
364
         * Don't bother if no refcache yet.
365
         */
366
        if (xfs_refcache && (xfs_refcache_new_size < xfs_refcache_size)) {
367
 
368
                iplist = (xfs_inode_t **)kmem_zalloc(XFS_REFCACHE_SIZE_MAX *
369
                                sizeof(xfs_inode_t *), KM_SLEEP);
370
 
371
                spin_lock(&xfs_refcache_lock);
372
 
373
                for (i = xfs_refcache_new_size; i < xfs_refcache_size; i++) {
374
                        ip = xfs_refcache[i];
375
                        if (ip != NULL) {
376
                                xfs_refcache[i] = NULL;
377
                                ip->i_refcache = NULL;
378
                                xfs_refcache_count--;
379
                                ASSERT(xfs_refcache_count >= 0);
380
                                iplist[iplist_index] = ip;
381
                                iplist_index++;
382
                        }
383
                }
384
 
385
                xfs_refcache_size = xfs_refcache_new_size;
386
 
387
                /*
388
                 * Move index to beginning of cache if it's now past the end
389
                 */
390
                if (xfs_refcache_index >= xfs_refcache_new_size)
391
                        xfs_refcache_index = 0;
392
 
393
                spin_unlock(&xfs_refcache_lock);
394
 
395
                /*
396
                 * Now drop the inodes we collected.
397
                 */
398
                for (i = 0; i < iplist_index; i++) {
399
                        VOP_RELEASE(XFS_ITOV(iplist[i]), error);
400
                        VN_RELE(XFS_ITOV(iplist[i]));
401
                }
402
 
403
                kmem_free(iplist, XFS_REFCACHE_SIZE_MAX *
404
                                  sizeof(xfs_inode_t *));
405
        } else {
406
                spin_lock(&xfs_refcache_lock);
407
                xfs_refcache_size = xfs_refcache_new_size;
408
                spin_unlock(&xfs_refcache_lock);
409
        }
410
}
411
 
412
void
413
xfs_refcache_iunlock(
414
        xfs_inode_t     *ip,
415
        uint            lock_flags)
416
{
417
        xfs_inode_t     *release_ip;
418
        int             error;
419
 
420
        release_ip = ip->i_release;
421
        ip->i_release = NULL;
422
 
423
        xfs_iunlock(ip, lock_flags);
424
 
425
        if (release_ip != NULL) {
426
                VOP_RELEASE(XFS_ITOV(release_ip), error);
427
                VN_RELE(XFS_ITOV(release_ip));
428
        }
429
}
430
 
431
void
432
xfs_refcache_destroy(void)
433
{
434
        if (xfs_refcache) {
435
                kmem_free(xfs_refcache,
436
                        XFS_REFCACHE_SIZE_MAX * sizeof(xfs_inode_t *));
437
                xfs_refcache = NULL;
438
        }
439
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.