OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [fs/] [nfsd/] [nfscache.c] - Blame information for rev 79

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * linux/fs/nfsd/nfscache.c
3
 *
4
 * Request reply cache. This is currently a global cache, but this may
5
 * change in the future and be a per-client cache.
6
 *
7
 * This code is heavily inspired by the 44BSD implementation, although
8
 * it does things a bit differently.
9
 *
10
 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
11
 */
12
 
13
#include <linux/kernel.h>
14
#include <linux/time.h>
15
#include <linux/slab.h>
16
#include <linux/string.h>
17
#include <linux/spinlock.h>
18
#include <linux/list.h>
19
 
20
#include <linux/sunrpc/svc.h>
21
#include <linux/nfsd/nfsd.h>
22
#include <linux/nfsd/cache.h>
23
 
24
/* Size of reply cache. Common values are:
25
 * 4.3BSD:      128
26
 * 4.4BSD:      256
27
 * Solaris2:    1024
28
 * DEC Unix:    512-4096
29
 */
30
#define CACHESIZE               1024
31
#define HASHSIZE                64
32
#define REQHASH(xid)            (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1))
33
 
34
static struct hlist_head *      hash_list;
35
static struct list_head         lru_head;
36
static int                      cache_disabled = 1;
37
 
38
static int      nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
39
 
40
/*
41
 * locking for the reply cache:
42
 * A cache entry is "single use" if c_state == RC_INPROG
43
 * Otherwise, it when accessing _prev or _next, the lock must be held.
44
 */
45
static DEFINE_SPINLOCK(cache_lock);
46
 
47
void
48
nfsd_cache_init(void)
49
{
50
        struct svc_cacherep     *rp;
51
        int                     i;
52
 
53
        INIT_LIST_HEAD(&lru_head);
54
        i = CACHESIZE;
55
        while(i) {
56
                rp = kmalloc(sizeof(*rp), GFP_KERNEL);
57
                if (!rp) break;
58
                list_add(&rp->c_lru, &lru_head);
59
                rp->c_state = RC_UNUSED;
60
                rp->c_type = RC_NOCACHE;
61
                INIT_HLIST_NODE(&rp->c_hash);
62
                i--;
63
        }
64
 
65
        if (i)
66
                printk (KERN_ERR "nfsd: cannot allocate all %d cache entries, only got %d\n",
67
                        CACHESIZE, CACHESIZE-i);
68
 
69
        hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
70
        if (!hash_list) {
71
                nfsd_cache_shutdown();
72
                printk (KERN_ERR "nfsd: cannot allocate %Zd bytes for hash list\n",
73
                        HASHSIZE * sizeof(struct hlist_head));
74
                return;
75
        }
76
 
77
        cache_disabled = 0;
78
}
79
 
80
void
81
nfsd_cache_shutdown(void)
82
{
83
        struct svc_cacherep     *rp;
84
 
85
        while (!list_empty(&lru_head)) {
86
                rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
87
                if (rp->c_state == RC_DONE && rp->c_type == RC_REPLBUFF)
88
                        kfree(rp->c_replvec.iov_base);
89
                list_del(&rp->c_lru);
90
                kfree(rp);
91
        }
92
 
93
        cache_disabled = 1;
94
 
95
        kfree (hash_list);
96
        hash_list = NULL;
97
}
98
 
99
/*
100
 * Move cache entry to end of LRU list
101
 */
102
static void
103
lru_put_end(struct svc_cacherep *rp)
104
{
105
        list_move_tail(&rp->c_lru, &lru_head);
106
}
107
 
108
/*
109
 * Move a cache entry from one hash list to another
110
 */
111
static void
112
hash_refile(struct svc_cacherep *rp)
113
{
114
        hlist_del_init(&rp->c_hash);
115
        hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid));
116
}
117
 
118
/*
119
 * Try to find an entry matching the current call in the cache. When none
120
 * is found, we grab the oldest unlocked entry off the LRU list.
121
 * Note that no operation within the loop may sleep.
122
 */
123
int
124
nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
125
{
126
        struct hlist_node       *hn;
127
        struct hlist_head       *rh;
128
        struct svc_cacherep     *rp;
129
        __be32                  xid = rqstp->rq_xid;
130
        u32                     proto =  rqstp->rq_prot,
131
                                vers = rqstp->rq_vers,
132
                                proc = rqstp->rq_proc;
133
        unsigned long           age;
134
        int rtn;
135
 
136
        rqstp->rq_cacherep = NULL;
137
        if (cache_disabled || type == RC_NOCACHE) {
138
                nfsdstats.rcnocache++;
139
                return RC_DOIT;
140
        }
141
 
142
        spin_lock(&cache_lock);
143
        rtn = RC_DOIT;
144
 
145
        rh = &hash_list[REQHASH(xid)];
146
        hlist_for_each_entry(rp, hn, rh, c_hash) {
147
                if (rp->c_state != RC_UNUSED &&
148
                    xid == rp->c_xid && proc == rp->c_proc &&
149
                    proto == rp->c_prot && vers == rp->c_vers &&
150
                    time_before(jiffies, rp->c_timestamp + 120*HZ) &&
151
                    memcmp((char*)&rqstp->rq_addr, (char*)&rp->c_addr, sizeof(rp->c_addr))==0) {
152
                        nfsdstats.rchits++;
153
                        goto found_entry;
154
                }
155
        }
156
        nfsdstats.rcmisses++;
157
 
158
        /* This loop shouldn't take more than a few iterations normally */
159
        {
160
        int     safe = 0;
161
        list_for_each_entry(rp, &lru_head, c_lru) {
162
                if (rp->c_state != RC_INPROG)
163
                        break;
164
                if (safe++ > CACHESIZE) {
165
                        printk("nfsd: loop in repcache LRU list\n");
166
                        cache_disabled = 1;
167
                        goto out;
168
                }
169
        }
170
        }
171
 
172
        /* This should not happen */
173
        if (rp == NULL) {
174
                static int      complaints;
175
 
176
                printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
177
                if (++complaints > 5) {
178
                        printk(KERN_WARNING "nfsd: disabling repcache.\n");
179
                        cache_disabled = 1;
180
                }
181
                goto out;
182
        }
183
 
184
        rqstp->rq_cacherep = rp;
185
        rp->c_state = RC_INPROG;
186
        rp->c_xid = xid;
187
        rp->c_proc = proc;
188
        memcpy(&rp->c_addr, svc_addr_in(rqstp), sizeof(rp->c_addr));
189
        rp->c_prot = proto;
190
        rp->c_vers = vers;
191
        rp->c_timestamp = jiffies;
192
 
193
        hash_refile(rp);
194
 
195
        /* release any buffer */
196
        if (rp->c_type == RC_REPLBUFF) {
197
                kfree(rp->c_replvec.iov_base);
198
                rp->c_replvec.iov_base = NULL;
199
        }
200
        rp->c_type = RC_NOCACHE;
201
 out:
202
        spin_unlock(&cache_lock);
203
        return rtn;
204
 
205
found_entry:
206
        /* We found a matching entry which is either in progress or done. */
207
        age = jiffies - rp->c_timestamp;
208
        rp->c_timestamp = jiffies;
209
        lru_put_end(rp);
210
 
211
        rtn = RC_DROPIT;
212
        /* Request being processed or excessive rexmits */
213
        if (rp->c_state == RC_INPROG || age < RC_DELAY)
214
                goto out;
215
 
216
        /* From the hall of fame of impractical attacks:
217
         * Is this a user who tries to snoop on the cache? */
218
        rtn = RC_DOIT;
219
        if (!rqstp->rq_secure && rp->c_secure)
220
                goto out;
221
 
222
        /* Compose RPC reply header */
223
        switch (rp->c_type) {
224
        case RC_NOCACHE:
225
                break;
226
        case RC_REPLSTAT:
227
                svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
228
                rtn = RC_REPLY;
229
                break;
230
        case RC_REPLBUFF:
231
                if (!nfsd_cache_append(rqstp, &rp->c_replvec))
232
                        goto out;       /* should not happen */
233
                rtn = RC_REPLY;
234
                break;
235
        default:
236
                printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
237
                rp->c_state = RC_UNUSED;
238
        }
239
 
240
        goto out;
241
}
242
 
243
/*
244
 * Update a cache entry. This is called from nfsd_dispatch when
245
 * the procedure has been executed and the complete reply is in
246
 * rqstp->rq_res.
247
 *
248
 * We're copying around data here rather than swapping buffers because
249
 * the toplevel loop requires max-sized buffers, which would be a waste
250
 * of memory for a cache with a max reply size of 100 bytes (diropokres).
251
 *
252
 * If we should start to use different types of cache entries tailored
253
 * specifically for attrstat and fh's, we may save even more space.
254
 *
255
 * Also note that a cachetype of RC_NOCACHE can legally be passed when
256
 * nfsd failed to encode a reply that otherwise would have been cached.
257
 * In this case, nfsd_cache_update is called with statp == NULL.
258
 */
259
void
260
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
261
{
262
        struct svc_cacherep *rp;
263
        struct kvec     *resv = &rqstp->rq_res.head[0], *cachv;
264
        int             len;
265
 
266
        if (!(rp = rqstp->rq_cacherep) || cache_disabled)
267
                return;
268
 
269
        len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
270
        len >>= 2;
271
 
272
        /* Don't cache excessive amounts of data and XDR failures */
273
        if (!statp || len > (256 >> 2)) {
274
                rp->c_state = RC_UNUSED;
275
                return;
276
        }
277
 
278
        switch (cachetype) {
279
        case RC_REPLSTAT:
280
                if (len != 1)
281
                        printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
282
                rp->c_replstat = *statp;
283
                break;
284
        case RC_REPLBUFF:
285
                cachv = &rp->c_replvec;
286
                cachv->iov_base = kmalloc(len << 2, GFP_KERNEL);
287
                if (!cachv->iov_base) {
288
                        spin_lock(&cache_lock);
289
                        rp->c_state = RC_UNUSED;
290
                        spin_unlock(&cache_lock);
291
                        return;
292
                }
293
                cachv->iov_len = len << 2;
294
                memcpy(cachv->iov_base, statp, len << 2);
295
                break;
296
        }
297
        spin_lock(&cache_lock);
298
        lru_put_end(rp);
299
        rp->c_secure = rqstp->rq_secure;
300
        rp->c_type = cachetype;
301
        rp->c_state = RC_DONE;
302
        rp->c_timestamp = jiffies;
303
        spin_unlock(&cache_lock);
304
        return;
305
}
306
 
307
/*
308
 * Copy cached reply to current reply buffer. Should always fit.
309
 * FIXME as reply is in a page, we should just attach the page, and
310
 * keep a refcount....
311
 */
312
static int
313
nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
314
{
315
        struct kvec     *vec = &rqstp->rq_res.head[0];
316
 
317
        if (vec->iov_len + data->iov_len > PAGE_SIZE) {
318
                printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
319
                                data->iov_len);
320
                return 0;
321
        }
322
        memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
323
        vec->iov_len += data->iov_len;
324
        return 1;
325
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.