OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [fs/] [lockd/] [clntlock.c] - Blame information for rev 19

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * linux/fs/lockd/clntlock.c
3
 *
4
 * Lock handling for the client side NLM implementation
5
 *
6
 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7
 */
8
 
9
#include <linux/module.h>
10
#include <linux/types.h>
11
#include <linux/time.h>
12
#include <linux/nfs_fs.h>
13
#include <linux/sunrpc/clnt.h>
14
#include <linux/sunrpc/svc.h>
15
#include <linux/lockd/lockd.h>
16
#include <linux/smp_lock.h>
17
 
18
#define NLMDBG_FACILITY         NLMDBG_CLIENT
19
 
20
/*
21
 * Local function prototypes
22
 */
23
static int                      reclaimer(void *ptr);
24
 
25
/*
26
 * The following functions handle blocking and granting from the
27
 * client perspective.
28
 */
29
 
30
/*
31
 * This is the representation of a blocked client lock.
32
 */
33
struct nlm_wait {
34
        struct list_head        b_list;         /* linked list */
35
        wait_queue_head_t       b_wait;         /* where to wait on */
36
        struct nlm_host *       b_host;
37
        struct file_lock *      b_lock;         /* local file lock */
38
        unsigned short          b_reclaim;      /* got to reclaim lock */
39
        __be32                  b_status;       /* grant callback status */
40
};
41
 
42
static LIST_HEAD(nlm_blocked);
43
 
44
/*
45
 * Queue up a lock for blocking so that the GRANTED request can see it
46
 */
47
struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
48
{
49
        struct nlm_wait *block;
50
 
51
        block = kmalloc(sizeof(*block), GFP_KERNEL);
52
        if (block != NULL) {
53
                block->b_host = host;
54
                block->b_lock = fl;
55
                init_waitqueue_head(&block->b_wait);
56
                block->b_status = nlm_lck_blocked;
57
                list_add(&block->b_list, &nlm_blocked);
58
        }
59
        return block;
60
}
61
 
62
void nlmclnt_finish_block(struct nlm_wait *block)
63
{
64
        if (block == NULL)
65
                return;
66
        list_del(&block->b_list);
67
        kfree(block);
68
}
69
 
70
/*
71
 * Block on a lock
72
 */
73
int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
74
{
75
        long ret;
76
 
77
        /* A borken server might ask us to block even if we didn't
78
         * request it. Just say no!
79
         */
80
        if (block == NULL)
81
                return -EAGAIN;
82
 
83
        /* Go to sleep waiting for GRANT callback. Some servers seem
84
         * to lose callbacks, however, so we're going to poll from
85
         * time to time just to make sure.
86
         *
87
         * For now, the retry frequency is pretty high; normally
88
         * a 1 minute timeout would do. See the comment before
89
         * nlmclnt_lock for an explanation.
90
         */
91
        ret = wait_event_interruptible_timeout(block->b_wait,
92
                        block->b_status != nlm_lck_blocked,
93
                        timeout);
94
        if (ret < 0)
95
                return -ERESTARTSYS;
96
        req->a_res.status = block->b_status;
97
        return 0;
98
}
99
 
100
/*
101
 * The server lockd has called us back to tell us the lock was granted
102
 */
103
__be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock)
104
{
105
        const struct file_lock *fl = &lock->fl;
106
        const struct nfs_fh *fh = &lock->fh;
107
        struct nlm_wait *block;
108
        __be32 res = nlm_lck_denied;
109
 
110
        /*
111
         * Look up blocked request based on arguments.
112
         * Warning: must not use cookie to match it!
113
         */
114
        list_for_each_entry(block, &nlm_blocked, b_list) {
115
                struct file_lock *fl_blocked = block->b_lock;
116
 
117
                if (fl_blocked->fl_start != fl->fl_start)
118
                        continue;
119
                if (fl_blocked->fl_end != fl->fl_end)
120
                        continue;
121
                /*
122
                 * Careful! The NLM server will return the 32-bit "pid" that
123
                 * we put on the wire: in this case the lockowner "pid".
124
                 */
125
                if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
126
                        continue;
127
                if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
128
                        continue;
129
                if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0)
130
                        continue;
131
                /* Alright, we found a lock. Set the return status
132
                 * and wake up the caller
133
                 */
134
                block->b_status = nlm_granted;
135
                wake_up(&block->b_wait);
136
                res = nlm_granted;
137
        }
138
        return res;
139
}
140
 
141
/*
142
 * The following procedures deal with the recovery of locks after a
143
 * server crash.
144
 */
145
 
146
/*
147
 * Reclaim all locks on server host. We do this by spawning a separate
148
 * reclaimer thread.
149
 */
150
void
151
nlmclnt_recovery(struct nlm_host *host)
152
{
153
        if (!host->h_reclaiming++) {
154
                nlm_get_host(host);
155
                __module_get(THIS_MODULE);
156
                if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0)
157
                        module_put(THIS_MODULE);
158
        }
159
}
160
 
161
static int
162
reclaimer(void *ptr)
163
{
164
        struct nlm_host   *host = (struct nlm_host *) ptr;
165
        struct nlm_wait   *block;
166
        struct file_lock *fl, *next;
167
        u32 nsmstate;
168
 
169
        daemonize("%s-reclaim", host->h_name);
170
        allow_signal(SIGKILL);
171
 
172
        down_write(&host->h_rwsem);
173
 
174
        /* This one ensures that our parent doesn't terminate while the
175
         * reclaim is in progress */
176
        lock_kernel();
177
        lockd_up(0); /* note: this cannot fail as lockd is already running */
178
 
179
        dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
180
 
181
restart:
182
        nsmstate = host->h_nsmstate;
183
 
184
        /* Force a portmap getport - the peer's lockd will
185
         * most likely end up on a different port.
186
         */
187
        host->h_nextrebind = jiffies;
188
        nlm_rebind_host(host);
189
 
190
        /* First, reclaim all locks that have been granted. */
191
        list_splice_init(&host->h_granted, &host->h_reclaim);
192
        list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
193
                list_del_init(&fl->fl_u.nfs_fl.list);
194
 
195
                /* Why are we leaking memory here? --okir */
196
                if (signalled())
197
                        continue;
198
                if (nlmclnt_reclaim(host, fl) != 0)
199
                        continue;
200
                list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
201
                if (host->h_nsmstate != nsmstate) {
202
                        /* Argh! The server rebooted again! */
203
                        goto restart;
204
                }
205
        }
206
 
207
        host->h_reclaiming = 0;
208
        up_write(&host->h_rwsem);
209
        dprintk("NLM: done reclaiming locks for host %s\n", host->h_name);
210
 
211
        /* Now, wake up all processes that sleep on a blocked lock */
212
        list_for_each_entry(block, &nlm_blocked, b_list) {
213
                if (block->b_host == host) {
214
                        block->b_status = nlm_lck_denied_grace_period;
215
                        wake_up(&block->b_wait);
216
                }
217
        }
218
 
219
        /* Release host handle after use */
220
        nlm_release_host(host);
221
        lockd_down();
222
        unlock_kernel();
223
        module_put_and_exit(0);
224
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.