OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [fs/] [lockd/] [host.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * linux/fs/lockd/host.c
3
 *
4
 * Management for NLM peer hosts. The nlm_host struct is shared
5
 * between client and server implementation. The only reason to
6
 * do so is to reduce code bloat.
7
 *
8
 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9
 */
10
 
11
#include <linux/types.h>
12
#include <linux/slab.h>
13
#include <linux/in.h>
14
#include <linux/sunrpc/clnt.h>
15
#include <linux/sunrpc/svc.h>
16
#include <linux/lockd/lockd.h>
17
#include <linux/lockd/sm_inter.h>
18
#include <linux/mutex.h>
19
 
20
 
21
#define NLMDBG_FACILITY         NLMDBG_HOSTCACHE
22
#define NLM_HOST_MAX            64
23
#define NLM_HOST_NRHASH         32
24
#define NLM_ADDRHASH(addr)      (ntohl(addr) & (NLM_HOST_NRHASH-1))
25
#define NLM_HOST_REBIND         (60 * HZ)
26
#define NLM_HOST_EXPIRE         ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
27
#define NLM_HOST_COLLECT        ((nrhosts > NLM_HOST_MAX)? 120 * HZ :  60 * HZ)
28
 
29
static struct hlist_head        nlm_hosts[NLM_HOST_NRHASH];
30
static unsigned long            next_gc;
31
static int                      nrhosts;
32
static DEFINE_MUTEX(nlm_host_mutex);
33
 
34
 
35
static void                     nlm_gc_hosts(void);
36
static struct nsm_handle *      __nsm_find(const struct sockaddr_in *,
37
                                        const char *, int, int);
38
static struct nsm_handle *      nsm_find(const struct sockaddr_in *sin,
39
                                         const char *hostname,
40
                                         int hostname_len);
41
 
42
/*
43
 * Common host lookup routine for server & client
44
 */
45
static struct nlm_host *
46
nlm_lookup_host(int server, const struct sockaddr_in *sin,
47
                int proto, int version, const char *hostname,
48
                int hostname_len, const struct sockaddr_in *ssin)
49
{
50
        struct hlist_head *chain;
51
        struct hlist_node *pos;
52
        struct nlm_host *host;
53
        struct nsm_handle *nsm = NULL;
54
        int             hash;
55
 
56
        dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT
57
                        ", p=%d, v=%d, my role=%s, name=%.*s)\n",
58
                        NIPQUAD(ssin->sin_addr.s_addr),
59
                        NIPQUAD(sin->sin_addr.s_addr), proto, version,
60
                        server? "server" : "client",
61
                        hostname_len,
62
                        hostname? hostname : "<none>");
63
 
64
 
65
        hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
66
 
67
        /* Lock hash table */
68
        mutex_lock(&nlm_host_mutex);
69
 
70
        if (time_after_eq(jiffies, next_gc))
71
                nlm_gc_hosts();
72
 
73
        /* We may keep several nlm_host objects for a peer, because each
74
         * nlm_host is identified by
75
         * (address, protocol, version, server/client)
76
         * We could probably simplify this a little by putting all those
77
         * different NLM rpc_clients into one single nlm_host object.
78
         * This would allow us to have one nlm_host per address.
79
         */
80
        chain = &nlm_hosts[hash];
81
        hlist_for_each_entry(host, pos, chain, h_hash) {
82
                if (!nlm_cmp_addr(&host->h_addr, sin))
83
                        continue;
84
 
85
                /* See if we have an NSM handle for this client */
86
                if (!nsm)
87
                        nsm = host->h_nsmhandle;
88
 
89
                if (host->h_proto != proto)
90
                        continue;
91
                if (host->h_version != version)
92
                        continue;
93
                if (host->h_server != server)
94
                        continue;
95
                if (!nlm_cmp_addr(&host->h_saddr, ssin))
96
                        continue;
97
 
98
                /* Move to head of hash chain. */
99
                hlist_del(&host->h_hash);
100
                hlist_add_head(&host->h_hash, chain);
101
 
102
                nlm_get_host(host);
103
                goto out;
104
        }
105
        if (nsm)
106
                atomic_inc(&nsm->sm_count);
107
 
108
        host = NULL;
109
 
110
        /* Sadly, the host isn't in our hash table yet. See if
111
         * we have an NSM handle for it. If not, create one.
112
         */
113
        if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len)))
114
                goto out;
115
 
116
        host = kzalloc(sizeof(*host), GFP_KERNEL);
117
        if (!host) {
118
                nsm_release(nsm);
119
                goto out;
120
        }
121
        host->h_name       = nsm->sm_name;
122
        host->h_addr       = *sin;
123
        host->h_addr.sin_port = 0;       /* ouch! */
124
        host->h_saddr      = *ssin;
125
        host->h_version    = version;
126
        host->h_proto      = proto;
127
        host->h_rpcclnt    = NULL;
128
        mutex_init(&host->h_mutex);
129
        host->h_nextrebind = jiffies + NLM_HOST_REBIND;
130
        host->h_expires    = jiffies + NLM_HOST_EXPIRE;
131
        atomic_set(&host->h_count, 1);
132
        init_waitqueue_head(&host->h_gracewait);
133
        init_rwsem(&host->h_rwsem);
134
        host->h_state      = 0;                  /* pseudo NSM state */
135
        host->h_nsmstate   = 0;                  /* real NSM state */
136
        host->h_nsmhandle  = nsm;
137
        host->h_server     = server;
138
        hlist_add_head(&host->h_hash, chain);
139
        INIT_LIST_HEAD(&host->h_lockowners);
140
        spin_lock_init(&host->h_lock);
141
        INIT_LIST_HEAD(&host->h_granted);
142
        INIT_LIST_HEAD(&host->h_reclaim);
143
 
144
        if (++nrhosts > NLM_HOST_MAX)
145
                next_gc = 0;
146
 
147
out:
148
        mutex_unlock(&nlm_host_mutex);
149
        return host;
150
}
151
 
152
/*
153
 * Destroy a host
154
 */
155
static void
156
nlm_destroy_host(struct nlm_host *host)
157
{
158
        struct rpc_clnt *clnt;
159
 
160
        BUG_ON(!list_empty(&host->h_lockowners));
161
        BUG_ON(atomic_read(&host->h_count));
162
 
163
        /*
164
         * Release NSM handle and unmonitor host.
165
         */
166
        nsm_unmonitor(host);
167
 
168
        clnt = host->h_rpcclnt;
169
        if (clnt != NULL)
170
                rpc_shutdown_client(clnt);
171
        kfree(host);
172
}
173
 
174
/*
175
 * Find an NLM server handle in the cache. If there is none, create it.
176
 */
177
struct nlm_host *
178
nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
179
                        const char *hostname, int hostname_len)
180
{
181
        struct sockaddr_in ssin = {0};
182
 
183
        return nlm_lookup_host(0, sin, proto, version,
184
                               hostname, hostname_len, &ssin);
185
}
186
 
187
/*
188
 * Find an NLM client handle in the cache. If there is none, create it.
189
 */
190
struct nlm_host *
191
nlmsvc_lookup_host(struct svc_rqst *rqstp,
192
                        const char *hostname, int hostname_len)
193
{
194
        struct sockaddr_in ssin = {0};
195
 
196
        ssin.sin_addr = rqstp->rq_daddr.addr;
197
        return nlm_lookup_host(1, svc_addr_in(rqstp),
198
                               rqstp->rq_prot, rqstp->rq_vers,
199
                               hostname, hostname_len, &ssin);
200
}
201
 
202
/*
203
 * Create the NLM RPC client for an NLM peer
204
 */
205
struct rpc_clnt *
206
nlm_bind_host(struct nlm_host *host)
207
{
208
        struct rpc_clnt *clnt;
209
 
210
        dprintk("lockd: nlm_bind_host("NIPQUAD_FMT"->"NIPQUAD_FMT")\n",
211
                        NIPQUAD(host->h_saddr.sin_addr),
212
                        NIPQUAD(host->h_addr.sin_addr));
213
 
214
        /* Lock host handle */
215
        mutex_lock(&host->h_mutex);
216
 
217
        /* If we've already created an RPC client, check whether
218
         * RPC rebind is required
219
         */
220
        if ((clnt = host->h_rpcclnt) != NULL) {
221
                if (time_after_eq(jiffies, host->h_nextrebind)) {
222
                        rpc_force_rebind(clnt);
223
                        host->h_nextrebind = jiffies + NLM_HOST_REBIND;
224
                        dprintk("lockd: next rebind in %ld jiffies\n",
225
                                        host->h_nextrebind - jiffies);
226
                }
227
        } else {
228
                unsigned long increment = nlmsvc_timeout;
229
                struct rpc_timeout timeparms = {
230
                        .to_initval     = increment,
231
                        .to_increment   = increment,
232
                        .to_maxval      = increment * 6UL,
233
                        .to_retries     = 5U,
234
                };
235
                struct rpc_create_args args = {
236
                        .protocol       = host->h_proto,
237
                        .address        = (struct sockaddr *)&host->h_addr,
238
                        .addrsize       = sizeof(host->h_addr),
239
                        .saddress       = (struct sockaddr *)&host->h_saddr,
240
                        .timeout        = &timeparms,
241
                        .servername     = host->h_name,
242
                        .program        = &nlm_program,
243
                        .version        = host->h_version,
244
                        .authflavor     = RPC_AUTH_UNIX,
245
                        .flags          = (RPC_CLNT_CREATE_HARDRTRY |
246
                                           RPC_CLNT_CREATE_AUTOBIND),
247
                };
248
 
249
                clnt = rpc_create(&args);
250
                if (!IS_ERR(clnt))
251
                        host->h_rpcclnt = clnt;
252
                else {
253
                        printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
254
                        clnt = NULL;
255
                }
256
        }
257
 
258
        mutex_unlock(&host->h_mutex);
259
        return clnt;
260
}
261
 
262
/*
263
 * Force a portmap lookup of the remote lockd port
264
 */
265
void
266
nlm_rebind_host(struct nlm_host *host)
267
{
268
        dprintk("lockd: rebind host %s\n", host->h_name);
269
        if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
270
                rpc_force_rebind(host->h_rpcclnt);
271
                host->h_nextrebind = jiffies + NLM_HOST_REBIND;
272
        }
273
}
274
 
275
/*
276
 * Increment NLM host count
277
 */
278
struct nlm_host * nlm_get_host(struct nlm_host *host)
279
{
280
        if (host) {
281
                dprintk("lockd: get host %s\n", host->h_name);
282
                atomic_inc(&host->h_count);
283
                host->h_expires = jiffies + NLM_HOST_EXPIRE;
284
        }
285
        return host;
286
}
287
 
288
/*
289
 * Release NLM host after use
290
 */
291
void nlm_release_host(struct nlm_host *host)
292
{
293
        if (host != NULL) {
294
                dprintk("lockd: release host %s\n", host->h_name);
295
                BUG_ON(atomic_read(&host->h_count) < 0);
296
                if (atomic_dec_and_test(&host->h_count)) {
297
                        BUG_ON(!list_empty(&host->h_lockowners));
298
                        BUG_ON(!list_empty(&host->h_granted));
299
                        BUG_ON(!list_empty(&host->h_reclaim));
300
                }
301
        }
302
}
303
 
304
/*
305
 * We were notified that the host indicated by address &sin
306
 * has rebooted.
307
 * Release all resources held by that peer.
308
 */
309
void nlm_host_rebooted(const struct sockaddr_in *sin,
310
                                const char *hostname, int hostname_len,
311
                                u32 new_state)
312
{
313
        struct hlist_head *chain;
314
        struct hlist_node *pos;
315
        struct nsm_handle *nsm;
316
        struct nlm_host *host;
317
 
318
        dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n",
319
                        hostname, NIPQUAD(sin->sin_addr));
320
 
321
        /* Find the NSM handle for this peer */
322
        if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0)))
323
                return;
324
 
325
        /* When reclaiming locks on this peer, make sure that
326
         * we set up a new notification */
327
        nsm->sm_monitored = 0;
328
 
329
        /* Mark all hosts tied to this NSM state as having rebooted.
330
         * We run the loop repeatedly, because we drop the host table
331
         * lock for this.
332
         * To avoid processing a host several times, we match the nsmstate.
333
         */
334
again:  mutex_lock(&nlm_host_mutex);
335
        for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
336
                hlist_for_each_entry(host, pos, chain, h_hash) {
337
                        if (host->h_nsmhandle == nsm
338
                         && host->h_nsmstate != new_state) {
339
                                host->h_nsmstate = new_state;
340
                                host->h_state++;
341
 
342
                                nlm_get_host(host);
343
                                mutex_unlock(&nlm_host_mutex);
344
 
345
                                if (host->h_server) {
346
                                        /* We're server for this guy, just ditch
347
                                         * all the locks he held. */
348
                                        nlmsvc_free_host_resources(host);
349
                                } else {
350
                                        /* He's the server, initiate lock recovery. */
351
                                        nlmclnt_recovery(host);
352
                                }
353
 
354
                                nlm_release_host(host);
355
                                goto again;
356
                        }
357
                }
358
        }
359
 
360
        mutex_unlock(&nlm_host_mutex);
361
}
362
 
363
/*
364
 * Shut down the hosts module.
365
 * Note that this routine is called only at server shutdown time.
366
 */
367
void
368
nlm_shutdown_hosts(void)
369
{
370
        struct hlist_head *chain;
371
        struct hlist_node *pos;
372
        struct nlm_host *host;
373
 
374
        dprintk("lockd: shutting down host module\n");
375
        mutex_lock(&nlm_host_mutex);
376
 
377
        /* First, make all hosts eligible for gc */
378
        dprintk("lockd: nuking all hosts...\n");
379
        for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
380
                hlist_for_each_entry(host, pos, chain, h_hash)
381
                        host->h_expires = jiffies - 1;
382
        }
383
 
384
        /* Then, perform a garbage collection pass */
385
        nlm_gc_hosts();
386
        mutex_unlock(&nlm_host_mutex);
387
 
388
        /* complain if any hosts are left */
389
        if (nrhosts) {
390
                printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
391
                dprintk("lockd: %d hosts left:\n", nrhosts);
392
                for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
393
                        hlist_for_each_entry(host, pos, chain, h_hash) {
394
                                dprintk("       %s (cnt %d use %d exp %ld)\n",
395
                                        host->h_name, atomic_read(&host->h_count),
396
                                        host->h_inuse, host->h_expires);
397
                        }
398
                }
399
        }
400
}
401
 
402
/*
403
 * Garbage collect any unused NLM hosts.
404
 * This GC combines reference counting for async operations with
405
 * mark & sweep for resources held by remote clients.
406
 */
407
static void
408
nlm_gc_hosts(void)
409
{
410
        struct hlist_head *chain;
411
        struct hlist_node *pos, *next;
412
        struct nlm_host *host;
413
 
414
        dprintk("lockd: host garbage collection\n");
415
        for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
416
                hlist_for_each_entry(host, pos, chain, h_hash)
417
                        host->h_inuse = 0;
418
        }
419
 
420
        /* Mark all hosts that hold locks, blocks or shares */
421
        nlmsvc_mark_resources();
422
 
423
        for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
424
                hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
425
                        if (atomic_read(&host->h_count) || host->h_inuse
426
                         || time_before(jiffies, host->h_expires)) {
427
                                dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
428
                                        host->h_name, atomic_read(&host->h_count),
429
                                        host->h_inuse, host->h_expires);
430
                                continue;
431
                        }
432
                        dprintk("lockd: delete host %s\n", host->h_name);
433
                        hlist_del_init(&host->h_hash);
434
 
435
                        nlm_destroy_host(host);
436
                        nrhosts--;
437
                }
438
        }
439
 
440
        next_gc = jiffies + NLM_HOST_COLLECT;
441
}
442
 
443
 
444
/*
445
 * Manage NSM handles
446
 */
447
static LIST_HEAD(nsm_handles);
448
static DEFINE_MUTEX(nsm_mutex);
449
 
450
static struct nsm_handle *
451
__nsm_find(const struct sockaddr_in *sin,
452
                const char *hostname, int hostname_len,
453
                int create)
454
{
455
        struct nsm_handle *nsm = NULL;
456
        struct list_head *pos;
457
 
458
        if (!sin)
459
                return NULL;
460
 
461
        if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
462
                if (printk_ratelimit()) {
463
                        printk(KERN_WARNING "Invalid hostname \"%.*s\" "
464
                                            "in NFS lock request\n",
465
                                hostname_len, hostname);
466
                }
467
                return NULL;
468
        }
469
 
470
        mutex_lock(&nsm_mutex);
471
        list_for_each(pos, &nsm_handles) {
472
                nsm = list_entry(pos, struct nsm_handle, sm_link);
473
 
474
                if (hostname && nsm_use_hostnames) {
475
                        if (strlen(nsm->sm_name) != hostname_len
476
                         || memcmp(nsm->sm_name, hostname, hostname_len))
477
                                continue;
478
                } else if (!nlm_cmp_addr(&nsm->sm_addr, sin))
479
                        continue;
480
                atomic_inc(&nsm->sm_count);
481
                goto out;
482
        }
483
 
484
        if (!create) {
485
                nsm = NULL;
486
                goto out;
487
        }
488
 
489
        nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
490
        if (nsm != NULL) {
491
                nsm->sm_addr = *sin;
492
                nsm->sm_name = (char *) (nsm + 1);
493
                memcpy(nsm->sm_name, hostname, hostname_len);
494
                nsm->sm_name[hostname_len] = '\0';
495
                atomic_set(&nsm->sm_count, 1);
496
 
497
                list_add(&nsm->sm_link, &nsm_handles);
498
        }
499
 
500
out:
501
        mutex_unlock(&nsm_mutex);
502
        return nsm;
503
}
504
 
505
static struct nsm_handle *
506
nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
507
{
508
        return __nsm_find(sin, hostname, hostname_len, 1);
509
}
510
 
511
/*
512
 * Release an NSM handle
513
 */
514
void
515
nsm_release(struct nsm_handle *nsm)
516
{
517
        if (!nsm)
518
                return;
519
        if (atomic_dec_and_test(&nsm->sm_count)) {
520
                mutex_lock(&nsm_mutex);
521
                if (atomic_read(&nsm->sm_count) == 0) {
522
                        list_del(&nsm->sm_link);
523
                        kfree(nsm);
524
                }
525
                mutex_unlock(&nsm_mutex);
526
        }
527
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.