OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /or1k/trunk/linux/linux-2.4/fs/lockd
    from Rev 1275 to Rev 1765
    Reverse comparison

Rev 1275 → Rev 1765

/svcshare.c
0,0 → 1,111
/*
* linux/fs/lockd/svcshare.c
*
* Management of DOS shares.
*
* Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/slab.h>
 
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
 
static inline int
nlm_cmp_owner(struct nlm_share *share, struct xdr_netobj *oh)
{
return share->s_owner.len == oh->len
&& !memcmp(share->s_owner.data, oh->data, oh->len);
}
 
u32
nlmsvc_share_file(struct nlm_host *host, struct nlm_file *file,
struct nlm_args *argp)
{
struct nlm_share *share;
struct xdr_netobj *oh = &argp->lock.oh;
u8 *ohdata;
 
for (share = file->f_shares; share; share = share->s_next) {
if (share->s_host == host && nlm_cmp_owner(share, oh))
goto update;
if ((argp->fsm_access & share->s_mode)
|| (argp->fsm_mode & share->s_access ))
return nlm_lck_denied;
}
 
share = (struct nlm_share *) kmalloc(sizeof(*share) + oh->len,
GFP_KERNEL);
if (share == NULL)
return nlm_lck_denied_nolocks;
 
/* Copy owner handle */
ohdata = (u8 *) (share + 1);
memcpy(ohdata, oh->data, oh->len);
 
share->s_file = file;
share->s_host = host;
share->s_owner.data = ohdata;
share->s_owner.len = oh->len;
share->s_next = file->f_shares;
file->f_shares = share;
 
update:
share->s_access = argp->fsm_access;
share->s_mode = argp->fsm_mode;
return nlm_granted;
}
 
/*
* Delete a share.
*/
u32
nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
struct nlm_args *argp)
{
struct nlm_share *share, **shpp;
struct xdr_netobj *oh = &argp->lock.oh;
 
for (shpp = &file->f_shares; (share = *shpp); shpp = &share->s_next) {
if (share->s_host == host && nlm_cmp_owner(share, oh)) {
*shpp = share->s_next;
kfree(share);
return nlm_granted;
}
}
 
/* X/Open spec says return success even if there was no
* corresponding share. */
return nlm_granted;
}
 
/*
* Traverse all shares for a given file (and host).
* NLM_ACT_CHECK is handled by nlmsvc_inspect_file.
*/
int
nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action)
{
struct nlm_share *share, **shpp;
 
shpp = &file->f_shares;
while ((share = *shpp) != NULL) {
if (action == NLM_ACT_MARK)
share->s_host->h_inuse = 1;
else if (action == NLM_ACT_UNLOCK) {
if (host == NULL || host == share->s_host) {
*shpp = share->s_next;
kfree(share);
continue;
}
}
shpp = &share->s_next;
}
 
return 0;
}
/svcproc.c
0,0 → 1,616
/*
* linux/fs/lockd/svcproc.c
*
* Lockd server procedures. We don't implement the NLM_*_RES
* procedures because we don't use the async procedures.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfsd/nfsd.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
#include <linux/lockd/sm_inter.h>
 
 
#define NLMDBG_FACILITY NLMDBG_CLIENT
 
static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
static void nlmsvc_callback_exit(struct rpc_task *);
 
#ifdef CONFIG_LOCKD_V4
static u32
cast_to_nlm(u32 status, u32 vers)
{
/* Note: status is assumed to be in network byte order !!! */
if (vers != 4){
switch (status) {
case nlm_granted:
case nlm_lck_denied:
case nlm_lck_denied_nolocks:
case nlm_lck_blocked:
case nlm_lck_denied_grace_period:
break;
case nlm4_deadlock:
status = nlm_lck_denied;
break;
default:
status = nlm_lck_denied_nolocks;
}
}
 
return (status);
}
#define cast_status(status) (cast_to_nlm(status, rqstp->rq_vers))
#else
#define cast_status(status) (status)
#endif
 
/*
* Obtain client and file from arguments
*/
static u32
nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_host **hostp, struct nlm_file **filp)
{
struct nlm_host *host = NULL;
struct nlm_file *file = NULL;
struct nlm_lock *lock = &argp->lock;
u32 error;
 
/* nfsd callbacks must have been installed for this procedure */
if (!nlmsvc_ops)
return nlm_lck_denied_nolocks;
 
/* Obtain handle for client host */
if (rqstp->rq_client == NULL) {
printk(KERN_NOTICE
"lockd: unauthenticated request from (%08x:%d)\n",
ntohl(rqstp->rq_addr.sin_addr.s_addr),
ntohs(rqstp->rq_addr.sin_port));
return nlm_lck_denied_nolocks;
}
 
/* Obtain host handle */
if (!(host = nlmsvc_lookup_host(rqstp))
|| (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0))
goto no_locks;
*hostp = host;
 
/* Obtain file pointer. Not used by FREE_ALL call. */
if (filp != NULL) {
if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
goto no_locks;
*filp = file;
 
/* Set up the missing parts of the file_lock structure */
lock->fl.fl_file = &file->f_file;
lock->fl.fl_owner = (fl_owner_t) host;
}
 
return 0;
 
no_locks:
if (host)
nlm_release_host(host);
return nlm_lck_denied_nolocks;
}
 
/*
* NULL: Test for presence of service
*/
static int
nlmsvc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
{
dprintk("lockd: NULL called\n");
return rpc_success;
}
 
/*
* TEST: Check for conflicting lock
*/
static int
nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: TEST called\n");
resp->cookie = argp->cookie;
 
/* Don't accept test requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now check for conflicting locks */
resp->status = cast_status(nlmsvc_testlock(file, &argp->lock, &resp->lock));
 
dprintk("lockd: TEST status %d vers %d\n",
ntohl(resp->status), rqstp->rq_vers);
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
static int
nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: LOCK called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept new lock requests during grace period */
if (nlmsvc_grace_period && !argp->reclaim) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
#if 0
/* If supplied state doesn't match current state, we assume it's
* an old request that time-warped somehow. Any error return would
* do in this case because it's irrelevant anyway.
*
* NB: We don't retrieve the remote host's state yet.
*/
if (host->h_nsmstate && host->h_nsmstate != argp->state) {
resp->status = nlm_lck_denied_nolocks;
} else
#endif
 
/* Now try to lock the file */
resp->status = cast_status(nlmsvc_lock(rqstp, file, &argp->lock,
argp->block, &argp->cookie));
 
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
static int
nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: CANCEL called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Try to cancel request. */
resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
 
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* UNLOCK: release a lock
*/
static int
nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: UNLOCK called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept new lock requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now try to remove the lock */
resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
 
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* GRANTED: A server calls us to tell that a process' lock request
* was granted
*/
static int
nlmsvc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
resp->cookie = argp->cookie;
 
dprintk("lockd: GRANTED called\n");
resp->status = nlmclnt_grant(&argp->lock);
dprintk("lockd: GRANTED status %d\n", ntohl(resp->status));
return rpc_success;
}
 
/*
* `Async' versions of the above service routines. They aren't really,
* because we send the callback before the reply proper. I hope this
* doesn't break any clients.
*/
static int
nlmsvc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: TEST_MSG called\n");
 
memset(&res, 0, sizeof(res));
 
if ((stat = nlmsvc_proc_test(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_TEST_RES, &res);
return stat;
}
 
static int
nlmsvc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: LOCK_MSG called\n");
 
if ((stat = nlmsvc_proc_lock(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_LOCK_RES, &res);
return stat;
}
 
static int
nlmsvc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: CANCEL_MSG called\n");
 
if ((stat = nlmsvc_proc_cancel(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
return stat;
}
 
static int
nlmsvc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: UNLOCK_MSG called\n");
 
if ((stat = nlmsvc_proc_unlock(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
return stat;
}
 
static int
nlmsvc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: GRANTED_MSG called\n");
 
if ((stat = nlmsvc_proc_granted(rqstp, argp, &res)) == 0)
stat = nlmsvc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
return stat;
}
 
/*
* SHARE: create a DOS share or alter existing share.
*/
static int
nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: SHARE called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept new lock requests during grace period */
if (nlmsvc_grace_period && !argp->reclaim) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now try to create the share */
resp->status = cast_status(nlmsvc_share_file(host, file, argp));
 
dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* UNSHARE: Release a DOS share.
*/
static int
nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: UNSHARE called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now try to unshare the file */
resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
 
dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* NM_LOCK: Create an unmonitored lock
*/
static int
nlmsvc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
dprintk("lockd: NM_LOCK called\n");
 
argp->monitor = 0; /* just clean the monitor flag */
return nlmsvc_proc_lock(rqstp, argp, resp);
}
 
/*
* FREE_ALL: Release all locks and shares held by client
*/
static int
nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_host *host;
 
/* Obtain client */
if (nlmsvc_retrieve_args(rqstp, argp, &host, NULL))
return rpc_success;
 
nlmsvc_free_host_resources(host);
nlm_release_host(host);
return rpc_success;
}
 
/*
* SM_NOTIFY: private callback from statd (not part of official NLM proto)
*/
static int
nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
void *resp)
{
struct sockaddr_in saddr = rqstp->rq_addr;
int vers = rqstp->rq_vers;
int prot = rqstp->rq_prot;
struct nlm_host *host;
 
dprintk("lockd: SM_NOTIFY called\n");
if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
|| ntohs(saddr.sin_port) >= 1024) {
printk(KERN_WARNING
"lockd: rejected NSM callback from %08x:%d\n",
ntohl(rqstp->rq_addr.sin_addr.s_addr),
ntohs(rqstp->rq_addr.sin_port));
return rpc_system_err;
}
 
/* Obtain the host pointer for this NFS server and try to
* reclaim all locks we hold on this server.
*/
saddr.sin_addr.s_addr = argp->addr;
if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
nlmclnt_recovery(host, argp->state);
nlm_release_host(host);
}
 
/* If we run on an NFS server, delete all locks held by the client */
if (nlmsvc_ops != NULL) {
struct svc_client *clnt;
saddr.sin_addr.s_addr = argp->addr;
if ((clnt = nlmsvc_ops->exp_getclient(&saddr)) != NULL
&& (host = nlm_lookup_host(clnt, &saddr, 0, 0)) != NULL) {
nlmsvc_free_host_resources(host);
}
nlm_release_host(host);
}
 
return rpc_success;
}
 
/*
* client sent a GRANTED_RES, let's remove the associated block
*/
static int
nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
void *resp)
{
if (!nlmsvc_ops)
return rpc_success;
 
dprintk("lockd: GRANTED_RES called\n");
 
nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status);
return rpc_success;
}
 
/*
* This is the generic lockd callback for async RPC calls
*/
static u32
nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_rqst *call;
 
if (!(call = nlmclnt_alloc_call()))
return rpc_system_err;
 
host = nlmclnt_lookup_host(&rqstp->rq_addr,
rqstp->rq_prot, rqstp->rq_vers);
if (!host) {
kfree(call);
return rpc_system_err;
}
 
call->a_flags = RPC_TASK_ASYNC;
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
 
if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0)
goto error;
 
return rpc_success;
error:
nlm_release_host(host);
kfree(call);
return rpc_system_err;
}
 
static void
nlmsvc_callback_exit(struct rpc_task *task)
{
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
 
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
kfree(call);
}
 
/*
* NLM Server procedures.
*/
 
#define nlmsvc_encode_norep nlmsvc_encode_void
#define nlmsvc_decode_norep nlmsvc_decode_void
#define nlmsvc_decode_testres nlmsvc_decode_void
#define nlmsvc_decode_lockres nlmsvc_decode_void
#define nlmsvc_decode_unlockres nlmsvc_decode_void
#define nlmsvc_decode_cancelres nlmsvc_decode_void
#define nlmsvc_decode_grantedres nlmsvc_decode_void
 
#define nlmsvc_proc_none nlmsvc_proc_null
#define nlmsvc_proc_test_res nlmsvc_proc_null
#define nlmsvc_proc_lock_res nlmsvc_proc_null
#define nlmsvc_proc_cancel_res nlmsvc_proc_null
#define nlmsvc_proc_unlock_res nlmsvc_proc_null
 
struct nlm_void { int dummy; };
 
#define PROC(name, xargt, xrest, argt, rest, respsize) \
{ (svc_procfunc) nlmsvc_proc_##name, \
(kxdrproc_t) nlmsvc_decode_##xargt, \
(kxdrproc_t) nlmsvc_encode_##xrest, \
NULL, \
sizeof(struct nlm_##argt), \
sizeof(struct nlm_##rest), \
0, \
0, \
respsize, \
}
 
#define Ck (1+8) /* cookie */
#define St 1 /* status */
#define No (1+1024/4) /* Net Obj */
#define Rg 2 /* range - offset + size */
 
struct svc_procedure nlmsvc_procedures[] = {
PROC(null, void, void, void, void, 1),
PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg),
PROC(lock, lockargs, res, args, res, Ck+St),
PROC(cancel, cancargs, res, args, res, Ck+St),
PROC(unlock, unlockargs, res, args, res, Ck+St),
PROC(granted, testargs, res, args, res, Ck+St),
PROC(test_msg, testargs, norep, args, void, 1),
PROC(lock_msg, lockargs, norep, args, void, 1),
PROC(cancel_msg, cancargs, norep, args, void, 1),
PROC(unlock_msg, unlockargs, norep, args, void, 1),
PROC(granted_msg, testargs, norep, args, void, 1),
PROC(test_res, testres, norep, res, void, 1),
PROC(lock_res, lockres, norep, res, void, 1),
PROC(cancel_res, cancelres, norep, res, void, 1),
PROC(unlock_res, unlockres, norep, res, void, 1),
PROC(granted_res, res, norep, res, void, 1),
/* statd callback */
PROC(sm_notify, reboot, void, reboot, void, 1),
PROC(none, void, void, void, void, 1),
PROC(none, void, void, void, void, 1),
PROC(none, void, void, void, void, 1),
PROC(share, shareargs, shareres, args, res, Ck+St+1),
PROC(unshare, shareargs, shareres, args, res, Ck+St+1),
PROC(nm_lock, lockargs, res, args, res, Ck+St),
PROC(free_all, notify, void, args, void, 0),
 
};
/xdr4.c
0,0 → 1,612
/*
* linux/fs/lockd/xdr4.c
*
* XDR support for lockd and the lock client.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
* Copyright (C) 1999, Trond Myklebust <trond.myklebust@fys.uio.no>
*/
 
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/utsname.h>
#include <linux/nfs.h>
 
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/stats.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/sm_inter.h>
 
#define NLMDBG_FACILITY NLMDBG_XDR
 
static inline loff_t
s64_to_loff_t(__s64 offset)
{
return (loff_t)offset;
}
 
 
static inline s64
loff_t_to_s64(loff_t offset)
{
s64 res;
if (offset > NLM4_OFFSET_MAX)
res = NLM4_OFFSET_MAX;
else if (offset < -NLM4_OFFSET_MAX)
res = -NLM4_OFFSET_MAX;
else
res = offset;
return res;
}
 
/*
* XDR functions for basic NLM types
*/
static u32 *
nlm4_decode_cookie(u32 *p, struct nlm_cookie *c)
{
unsigned int len;
 
len = ntohl(*p++);
if(len==0)
{
c->len=4;
memset(c->data, 0, 4); /* hockeypux brain damage */
}
else if(len<=NLM_MAXCOOKIELEN)
{
c->len=len;
memcpy(c->data, p, len);
p+=(len+3)>>2;
}
else
{
printk(KERN_NOTICE
"lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN);
return NULL;
}
return p;
}
 
static u32 *
nlm4_encode_cookie(u32 *p, struct nlm_cookie *c)
{
*p++ = htonl(c->len);
memcpy(p, c->data, c->len);
p+=(c->len+3)>>2;
return p;
}
 
static u32 *
nlm4_decode_fh(u32 *p, struct nfs_fh *f)
{
memset(f->data, 0, sizeof(f->data));
f->size = ntohl(*p++);
if (f->size > NFS_MAXFHSIZE) {
printk(KERN_NOTICE
"lockd: bad fhandle size %d (should be <=%d)\n",
f->size, NFS_MAXFHSIZE);
return NULL;
}
memcpy(f->data, p, f->size);
return p + XDR_QUADLEN(f->size);
}
 
static u32 *
nlm4_encode_fh(u32 *p, struct nfs_fh *f)
{
*p++ = htonl(f->size);
if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
memcpy(p, f->data, f->size);
return p + XDR_QUADLEN(f->size);
}
 
/*
* Encode and decode owner handle
*/
static u32 *
nlm4_decode_oh(u32 *p, struct xdr_netobj *oh)
{
return xdr_decode_netobj(p, oh);
}
 
static u32 *
nlm4_encode_oh(u32 *p, struct xdr_netobj *oh)
{
return xdr_encode_netobj(p, oh);
}
 
static u32 *
nlm4_decode_lock(u32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
__s64 len, start, end;
 
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN))
|| !(p = nlm4_decode_fh(p, &lock->fh))
|| !(p = nlm4_decode_oh(p, &lock->oh)))
return NULL;
 
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = ntohl(*p++);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
p = xdr_decode_hyper(p, &start);
p = xdr_decode_hyper(p, &len);
end = start + len - 1;
 
fl->fl_start = s64_to_loff_t(start);
 
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s64_to_loff_t(end);
return p;
}
 
/*
* Encode a lock as part of an NLM call
*/
static u32 *
nlm4_encode_lock(u32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
__s64 start, len;
 
if (!(p = xdr_encode_string(p, lock->caller))
|| !(p = nlm4_encode_fh(p, &lock->fh))
|| !(p = nlm4_encode_oh(p, &lock->oh)))
return NULL;
 
if (fl->fl_start > NLM4_OFFSET_MAX
|| (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
return NULL;
 
*p++ = htonl(fl->fl_pid);
 
start = loff_t_to_s64(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
len = 0;
else
len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
 
p = xdr_encode_hyper(p, start);
p = xdr_encode_hyper(p, len);
 
return p;
}
 
/*
* Encode result of a TEST/TEST_MSG call
*/
static u32 *
nlm4_encode_testres(u32 *p, struct nlm_res *resp)
{
s64 start, len;
 
dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp);
if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
 
if (resp->status == nlm_lck_denied) {
struct file_lock *fl = &resp->lock.fl;
 
*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
*p++ = htonl(fl->fl_pid);
 
/* Encode owner handle. */
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
return 0;
 
start = loff_t_to_s64(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
len = 0;
else
len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
p = xdr_encode_hyper(p, start);
p = xdr_encode_hyper(p, len);
dprintk("xdr: encode_testres (status %d pid %d type %d start %Ld end %Ld)\n",
resp->status, fl->fl_pid, fl->fl_type,
(long long)fl->fl_start, (long long)fl->fl_end);
}
 
dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp);
return p;
}
 
 
/*
* Check buffer bounds after decoding arguments
*/
static int
xdr_argsize_check(struct svc_rqst *rqstp, u32 *p)
{
struct svc_buf *buf = &rqstp->rq_argbuf;
 
return p - buf->base <= buf->buflen;
}
 
static int
xdr_ressize_check(struct svc_rqst *rqstp, u32 *p)
{
struct svc_buf *buf = &rqstp->rq_resbuf;
 
buf->len = p - buf->base;
return (buf->len <= buf->buflen);
}
 
/*
* First, the server side XDR functions
*/
int
nlm4svc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
u32 exclusive;
 
if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
return 0;
 
exclusive = ntohl(*p++);
if (!(p = nlm4_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
 
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_encode_testres(p, resp)))
return 0;
return xdr_ressize_check(rqstp, p);
}
 
int
nlm4svc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
u32 exclusive;
 
if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm4_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
argp->reclaim = ntohl(*p++);
argp->state = ntohl(*p++);
argp->monitor = 1; /* monitor client by default */
 
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
u32 exclusive;
 
if (!(p = nlm4_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm4_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
if (!(p = nlm4_decode_cookie(p, &argp->cookie))
|| !(p = nlm4_decode_lock(p, &argp->lock)))
return 0;
argp->lock.fl.fl_type = F_UNLCK;
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->fl.fl_pid = ~(u32) 0;
 
if (!(p = nlm4_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN))
|| !(p = nlm4_decode_fh(p, &lock->fh))
|| !(p = nlm4_decode_oh(p, &lock->oh)))
return 0;
argp->fsm_mode = ntohl(*p++);
argp->fsm_access = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
*p++ = xdr_zero; /* sequence argument */
return xdr_ressize_check(rqstp, p);
}
 
int
nlm4svc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
return xdr_ressize_check(rqstp, p);
}
 
int
nlm4svc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
{
if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
/* Preserve the address in network byte order */
argp->addr = *p++;
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
return 0;
resp->status = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
{
return xdr_argsize_check(rqstp, p);
}
 
int
nlm4svc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
 
/*
* Now, the client side XDR functions
*/
static int
nlm4clt_encode_void(struct rpc_rqst *req, u32 *p, void *ptr)
{
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlm4clt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
{
return 0;
}
 
static int
nlm4clt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
if (!(p = nlm4_encode_lock(p, lock)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlm4clt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
return -EIO;
resp->status = ntohl(*p++);
if (resp->status == NLM_LCK_DENIED) {
struct file_lock *fl = &resp->lock.fl;
u32 excl;
s64 start, end, len;
 
memset(&resp->lock, 0, sizeof(resp->lock));
locks_init_lock(fl);
excl = ntohl(*p++);
fl->fl_pid = ntohl(*p++);
if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
return -EIO;
 
fl->fl_flags = FL_POSIX;
fl->fl_type = excl? F_WRLCK : F_RDLCK;
p = xdr_decode_hyper(p, &start);
p = xdr_decode_hyper(p, &len);
end = start + len - 1;
 
fl->fl_start = s64_to_loff_t(start);
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s64_to_loff_t(end);
}
return 0;
}
 
 
static int
nlm4clt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = argp->block? xdr_one : xdr_zero;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
if (!(p = nlm4_encode_lock(p, lock)))
return -EIO;
*p++ = argp->reclaim? xdr_one : xdr_zero;
*p++ = htonl(argp->state);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlm4clt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = argp->block? xdr_one : xdr_zero;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
if (!(p = nlm4_encode_lock(p, lock)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlm4clt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
return -EIO;
if (!(p = nlm4_encode_lock(p, lock)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlm4clt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
return -EIO;
*p++ = resp->status;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlm4clt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_encode_testres(p, resp)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlm4clt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
return -EIO;
resp->status = ntohl(*p++);
return 0;
}
 
/*
* Buffer requirements for NLM
*/
#define NLM4_void_sz 0
#define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
#define NLM4_caller_sz 1+XDR_QUADLEN(NLM_MAXSTRLEN)
#define NLM4_netobj_sz 1+XDR_QUADLEN(XDR_MAX_NETOBJ)
/* #define NLM4_owner_sz 1+XDR_QUADLEN(NLM4_MAXOWNER) */
#define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE)
#define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_netobj_sz+NLM4_fhandle_sz
#define NLM4_holder_sz 6+NLM4_netobj_sz
 
#define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz
#define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz
#define NLM4_cancargs_sz NLM4_cookie_sz+2+NLM4_lock_sz
#define NLM4_unlockargs_sz NLM4_cookie_sz+NLM4_lock_sz
 
#define NLM4_testres_sz NLM4_cookie_sz+1+NLM4_holder_sz
#define NLM4_res_sz NLM4_cookie_sz+1
#define NLM4_norep_sz 0
 
#ifndef MAX
# define MAX(a,b) (((a) > (b))? (a) : (b))
#endif
 
/*
* For NLM, a void procedure really returns nothing
*/
#define nlm4clt_decode_norep NULL
 
#define PROC(proc, argtype, restype) \
{ .p_procname = "nlm4_" #proc, \
.p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \
.p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \
.p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2 \
}
 
static struct rpc_procinfo nlm4_procedures[] = {
PROC(null, void, void),
PROC(test, testargs, testres),
PROC(lock, lockargs, res),
PROC(canc, cancargs, res),
PROC(unlock, unlockargs, res),
PROC(granted, testargs, res),
PROC(test_msg, testargs, norep),
PROC(lock_msg, lockargs, norep),
PROC(canc_msg, cancargs, norep),
PROC(unlock_msg, unlockargs, norep),
PROC(granted_msg, testargs, norep),
PROC(test_res, testres, norep),
PROC(lock_res, res, norep),
PROC(canc_res, res, norep),
PROC(unlock_res, res, norep),
PROC(granted_res, res, norep),
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
#ifdef NLMCLNT_SUPPORT_SHARES
PROC(share, shareargs, shareres),
PROC(unshare, shareargs, shareres),
PROC(nm_lock, lockargs, res),
PROC(free_all, notify, void),
#else
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
#endif
};
 
struct rpc_version nlm_version4 = {
4, 24, nlm4_procedures,
};
/svc4proc.c
0,0 → 1,588
/*
* linux/fs/lockd/svc4proc.c
*
* Lockd server procedures. We don't implement the NLM_*_RES
* procedures because we don't use the async procedures.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfsd/nfsd.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
#include <linux/lockd/sm_inter.h>
 
 
#define NLMDBG_FACILITY NLMDBG_CLIENT
 
static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
static void nlm4svc_callback_exit(struct rpc_task *);
 
/*
* Obtain client and file from arguments
*/
static u32
nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_host **hostp, struct nlm_file **filp)
{
struct nlm_host *host = NULL;
struct nlm_file *file = NULL;
struct nlm_lock *lock = &argp->lock;
u32 error = 0;
 
/* nfsd callbacks must have been installed for this procedure */
if (!nlmsvc_ops)
return nlm_lck_denied_nolocks;
 
/* Obtain handle for client host */
if (rqstp->rq_client == NULL) {
printk(KERN_NOTICE
"lockd: unauthenticated request from (%08x:%d)\n",
ntohl(rqstp->rq_addr.sin_addr.s_addr),
ntohs(rqstp->rq_addr.sin_port));
return nlm_lck_denied_nolocks;
}
 
/* Obtain host handle */
if (!(host = nlmsvc_lookup_host(rqstp))
|| (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0))
goto no_locks;
*hostp = host;
 
/* Obtain file pointer. Not used by FREE_ALL call. */
if (filp != NULL) {
if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
goto no_locks;
*filp = file;
 
/* Set up the missing parts of the file_lock structure */
lock->fl.fl_file = &file->f_file;
lock->fl.fl_owner = (fl_owner_t) host;
}
 
return 0;
 
no_locks:
if (host)
nlm_release_host(host);
if (error)
return error;
return nlm_lck_denied_nolocks;
}
 
/*
* NULL: Test for presence of service
*/
static int
nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
{
dprintk("lockd: NULL called\n");
return rpc_success;
}
 
/*
* TEST: Check for conflicting lock
*/
static int
nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: TEST4 called\n");
resp->cookie = argp->cookie;
 
/* Don't accept test requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now check for conflicting locks */
resp->status = nlmsvc_testlock(file, &argp->lock, &resp->lock);
 
dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
static int
nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: LOCK called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept new lock requests during grace period */
if (nlmsvc_grace_period && !argp->reclaim) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
#if 0
/* If supplied state doesn't match current state, we assume it's
* an old request that time-warped somehow. Any error return would
* do in this case because it's irrelevant anyway.
*
* NB: We don't retrieve the remote host's state yet.
*/
if (host->h_nsmstate && host->h_nsmstate != argp->state) {
resp->status = nlm_lck_denied_nolocks;
} else
#endif
 
/* Now try to lock the file */
resp->status = nlmsvc_lock(rqstp, file, &argp->lock,
argp->block, &argp->cookie);
 
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
static int
nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: CANCEL called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Try to cancel request. */
resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
 
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* UNLOCK: release a lock
*/
static int
nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: UNLOCK called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept new lock requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now try to remove the lock */
resp->status = nlmsvc_unlock(file, &argp->lock);
 
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* GRANTED: A server calls us to tell that a process' lock request
* was granted
*/
static int
nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
resp->cookie = argp->cookie;
 
dprintk("lockd: GRANTED called\n");
resp->status = nlmclnt_grant(&argp->lock);
dprintk("lockd: GRANTED status %d\n", ntohl(resp->status));
return rpc_success;
}
 
/*
* `Async' versions of the above service routines. They aren't really,
* because we send the callback before the reply proper. I hope this
* doesn't break any clients.
*/
static int
nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: TEST_MSG called\n");
 
memset(&res, 0, sizeof(res));
 
if ((stat = nlm4svc_proc_test(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_TEST_RES, &res);
return stat;
}
 
static int
nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: LOCK_MSG called\n");
 
if ((stat = nlm4svc_proc_lock(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, &res);
return stat;
}
 
static int
nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: CANCEL_MSG called\n");
 
if ((stat = nlm4svc_proc_cancel(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, &res);
return stat;
}
 
static int
nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: UNLOCK_MSG called\n");
 
if ((stat = nlm4svc_proc_unlock(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, &res);
return stat;
}
 
static int
nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_res res;
u32 stat;
 
dprintk("lockd: GRANTED_MSG called\n");
 
if ((stat = nlm4svc_proc_granted(rqstp, argp, &res)) == 0)
stat = nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, &res);
return stat;
}
 
/*
* SHARE: create a DOS share or alter existing share.
*/
static int
nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: SHARE called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept new lock requests during grace period */
if (nlmsvc_grace_period && !argp->reclaim) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now try to create the share */
resp->status = nlmsvc_share_file(host, file, argp);
 
dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* UNSHARE: Release a DOS share.
*/
static int
nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
 
dprintk("lockd: UNSHARE called\n");
 
resp->cookie = argp->cookie;
 
/* Don't accept requests during grace period */
if (nlmsvc_grace_period) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
 
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return rpc_success;
 
/* Now try to lock the file */
resp->status = nlmsvc_unshare_file(host, file, argp);
 
dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
nlm_release_host(host);
nlm_release_file(file);
return rpc_success;
}
 
/*
* NM_LOCK: Create an unmonitored lock
*/
static int
nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
dprintk("lockd: NM_LOCK called\n");
 
argp->monitor = 0; /* just clean the monitor flag */
return nlm4svc_proc_lock(rqstp, argp, resp);
}
 
/*
* FREE_ALL: Release all locks and shares held by client
*/
static int
nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_host *host;
 
/* Obtain client */
if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL))
return rpc_success;
 
nlmsvc_free_host_resources(host);
nlm_release_host(host);
return rpc_success;
}
 
/*
* SM_NOTIFY: private callback from statd (not part of official NLM proto)
*/
static int
nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
void *resp)
{
struct sockaddr_in saddr = rqstp->rq_addr;
int vers = rqstp->rq_vers;
int prot = rqstp->rq_prot;
struct nlm_host *host;
 
dprintk("lockd: SM_NOTIFY called\n");
if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
|| ntohs(saddr.sin_port) >= 1024) {
printk(KERN_WARNING
"lockd: rejected NSM callback from %08x:%d\n",
ntohl(rqstp->rq_addr.sin_addr.s_addr),
ntohs(rqstp->rq_addr.sin_port));
return rpc_system_err;
}
 
/* Obtain the host pointer for this NFS server and try to
* reclaim all locks we hold on this server.
*/
saddr.sin_addr.s_addr = argp->addr;
if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
nlmclnt_recovery(host, argp->state);
nlm_release_host(host);
}
 
/* If we run on an NFS server, delete all locks held by the client */
if (nlmsvc_ops != NULL) {
struct svc_client *clnt;
saddr.sin_addr.s_addr = argp->addr;
if ((clnt = nlmsvc_ops->exp_getclient(&saddr)) != NULL
&& (host = nlm_lookup_host(clnt, &saddr, 0, 0)) != NULL) {
nlmsvc_free_host_resources(host);
}
nlm_release_host(host);
}
 
return rpc_success;
}
 
/*
* client sent a GRANTED_RES, let's remove the associated block
*/
static int
nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
void *resp)
{
if (!nlmsvc_ops)
return rpc_success;
 
dprintk("lockd: GRANTED_RES called\n");
 
nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status);
return rpc_success;
}
 
 
 
/*
* This is the generic lockd callback for async RPC calls
*/
static u32
nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_rqst *call;
 
if (!(call = nlmclnt_alloc_call()))
return rpc_system_err;
 
host = nlmclnt_lookup_host(&rqstp->rq_addr,
rqstp->rq_prot, rqstp->rq_vers);
if (!host) {
kfree(call);
return rpc_system_err;
}
 
call->a_flags = RPC_TASK_ASYNC;
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
 
if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0)
goto error;
 
return rpc_success;
error:
kfree(call);
nlm_release_host(host);
return rpc_system_err;
}
 
static void
nlm4svc_callback_exit(struct rpc_task *task)
{
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
 
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
task->tk_pid, -task->tk_status);
}
nlm_release_host(call->a_host);
kfree(call);
}
 
/*
* NLM Server procedures.
*/
 
#define nlm4svc_encode_norep nlm4svc_encode_void
#define nlm4svc_decode_norep nlm4svc_decode_void
#define nlm4svc_decode_testres nlm4svc_decode_void
#define nlm4svc_decode_lockres nlm4svc_decode_void
#define nlm4svc_decode_unlockres nlm4svc_decode_void
#define nlm4svc_decode_cancelres nlm4svc_decode_void
#define nlm4svc_decode_grantedres nlm4svc_decode_void
 
#define nlm4svc_proc_none nlm4svc_proc_null
#define nlm4svc_proc_test_res nlm4svc_proc_null
#define nlm4svc_proc_lock_res nlm4svc_proc_null
#define nlm4svc_proc_cancel_res nlm4svc_proc_null
#define nlm4svc_proc_unlock_res nlm4svc_proc_null
 
struct nlm_void { int dummy; };
 
#define PROC(name, xargt, xrest, argt, rest, respsize) \
{ (svc_procfunc) nlm4svc_proc_##name, \
(kxdrproc_t) nlm4svc_decode_##xargt, \
(kxdrproc_t) nlm4svc_encode_##xrest, \
NULL, \
sizeof(struct nlm_##argt), \
sizeof(struct nlm_##rest), \
0, \
0, \
respsize, \
}
#define Ck (1+8) /* cookie */
#define No (1+1024/4) /* netobj */
#define St 1 /* status */
#define Rg 4 /* range (offset + length) */
struct svc_procedure nlmsvc_procedures4[] = {
PROC(null, void, void, void, void, 1),
PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg),
PROC(lock, lockargs, res, args, res, Ck+St),
PROC(cancel, cancargs, res, args, res, Ck+St),
PROC(unlock, unlockargs, res, args, res, Ck+St),
PROC(granted, testargs, res, args, res, Ck+St),
PROC(test_msg, testargs, norep, args, void, 1),
PROC(lock_msg, lockargs, norep, args, void, 1),
PROC(cancel_msg, cancargs, norep, args, void, 1),
PROC(unlock_msg, unlockargs, norep, args, void, 1),
PROC(granted_msg, testargs, norep, args, void, 1),
PROC(test_res, testres, norep, res, void, 1),
PROC(lock_res, lockres, norep, res, void, 1),
PROC(cancel_res, cancelres, norep, res, void, 1),
PROC(unlock_res, unlockres, norep, res, void, 1),
PROC(granted_res, res, norep, res, void, 1),
/* statd callback */
PROC(sm_notify, reboot, void, reboot, void, 1),
PROC(none, void, void, void, void, 0),
PROC(none, void, void, void, void, 0),
PROC(none, void, void, void, void, 0),
PROC(share, shareargs, shareres, args, res, Ck+St+1),
PROC(unshare, shareargs, shareres, args, res, Ck+St+1),
PROC(nm_lock, lockargs, res, args, res, Ck+St),
PROC(free_all, notify, void, args, void, 1),
 
};
/svclock.c
0,0 → 1,686
/*
* linux/fs/lockd/svclock.c
*
* Handling of server-side locks, mostly of the blocked variety.
* This is the ugliest part of lockd because we tread on very thin ice.
* GRANT and CANCEL calls may get stuck, meet in mid-flight, etc.
* IMNSHO introducing the grant callback into the NLM protocol was one
* of the worst ideas Sun ever had. Except maybe for the idea of doing
* NFS file locking at all.
*
* I'm trying hard to avoid race conditions by protecting most accesses
* to a file's list of blocked locks through a semaphore. The global
* list of blocked locks is not protected in this fashion however.
* Therefore, some functions (such as the RPC callback for the async grant
* call) move blocked locks towards the head of the list *while some other
* process might be traversing it*. This should not be a problem in
* practice, because this will only cause functions traversing the list
* to visit some blocks twice.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/config.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
 
#define NLMDBG_FACILITY NLMDBG_SVCLOCK
 
#ifdef CONFIG_LOCKD_V4
#define nlm_deadlock nlm4_deadlock
#else
#define nlm_deadlock nlm_lck_denied
#endif
 
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
static int nlmsvc_remove_block(struct nlm_block *block);
static void nlmsvc_grant_callback(struct rpc_task *task);
static void nlmsvc_notify_blocked(struct file_lock *);
 
/*
* The list of blocked locks to retry
*/
static struct nlm_block * nlm_blocked;
 
/*
* Insert a blocked lock into the global list
*/
static void
nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
{
struct nlm_block **bp, *b;
 
dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
if (block->b_queued)
nlmsvc_remove_block(block);
bp = &nlm_blocked;
if (when != NLM_NEVER) {
if ((when += jiffies) == NLM_NEVER)
when ++;
while ((b = *bp) && time_before_eq(b->b_when,when) && b->b_when != NLM_NEVER)
bp = &b->b_next;
} else
while ((b = *bp))
bp = &b->b_next;
 
block->b_queued = 1;
block->b_when = when;
block->b_next = b;
*bp = block;
}
 
/*
* Remove a block from the global list
*/
static int
nlmsvc_remove_block(struct nlm_block *block)
{
struct nlm_block **bp, *b;
 
if (!block->b_queued)
return 1;
for (bp = &nlm_blocked; (b = *bp); bp = &b->b_next) {
if (b == block) {
*bp = block->b_next;
block->b_queued = 0;
return 1;
}
}
 
return 0;
}
 
/*
* Find a block for a given lock and optionally remove it from
* the list.
*/
static struct nlm_block *
nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock, int remove)
{
struct nlm_block **head, *block;
struct file_lock *fl;
 
dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
file, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end, lock->fl.fl_type);
for (head = &nlm_blocked; (block = *head); head = &block->b_next) {
fl = &block->b_call.a_args.lock.fl;
dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
block->b_file, fl->fl_pid,
(long long)fl->fl_start,
(long long)fl->fl_end, fl->fl_type,
nlmdbg_cookie2a(&block->b_call.a_args.cookie));
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
if (remove) {
*head = block->b_next;
block->b_queued = 0;
}
return block;
}
}
 
return NULL;
}
 
static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
{
if(a->len != b->len)
return 0;
if(memcmp(a->data,b->data,a->len))
return 0;
return 1;
}
 
/*
* Find a block with a given NLM cookie.
*/
static inline struct nlm_block *
nlmsvc_find_block(struct nlm_cookie *cookie, struct sockaddr_in *sin)
{
struct nlm_block *block;
 
for (block = nlm_blocked; block; block = block->b_next) {
dprintk("cookie: head of blocked queue %p, block %p\n",
nlm_blocked, block);
if (nlm_cookie_match(&block->b_call.a_args.cookie,cookie)
&& nlm_cmp_addr(sin, &block->b_host->h_addr))
break;
}
 
return block;
}
 
/*
* Create a block and initialize it.
*
* Note: we explicitly set the cookie of the grant reply to that of
* the blocked lock request. The spec explicitly mentions that the client
* should _not_ rely on the callback containing the same cookie as the
* request, but (as I found out later) that's because some implementations
* do just this. Never mind the standards comittees, they support our
* logging industries.
*/
static inline struct nlm_block *
nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_lock *lock, struct nlm_cookie *cookie)
{
struct nlm_block *block;
struct nlm_host *host;
struct nlm_rqst *call;
 
/* Create host handle for callback */
host = nlmclnt_lookup_host(&rqstp->rq_addr,
rqstp->rq_prot, rqstp->rq_vers);
if (host == NULL)
return NULL;
 
/* Allocate memory for block, and initialize arguments */
if (!(block = (struct nlm_block *) kmalloc(sizeof(*block), GFP_KERNEL)))
goto failed;
memset(block, 0, sizeof(*block));
locks_init_lock(&block->b_call.a_args.lock.fl);
locks_init_lock(&block->b_call.a_res.lock.fl);
 
block->b_host = nlmsvc_lookup_host(rqstp);
if (block->b_host == NULL) {
goto failed_free;
}
 
if (!nlmclnt_setgrantargs(&block->b_call, lock))
goto failed_free;
 
/* Set notifier function for VFS, and init args */
block->b_call.a_args.lock.fl.fl_notify = nlmsvc_notify_blocked;
block->b_call.a_args.cookie = *cookie; /* see above */
 
dprintk("lockd: created block %p...\n", block);
 
/* Create and initialize the block */
block->b_daemon = rqstp->rq_server;
block->b_file = file;
 
/* Add to file's list of blocks */
block->b_fnext = file->f_blocks;
file->f_blocks = block;
 
/* Set up RPC arguments for callback */
call = &block->b_call;
call->a_host = host;
call->a_flags = RPC_TASK_ASYNC;
 
return block;
 
failed_free:
kfree(block);
failed:
nlm_release_host(host);
return NULL;
}
 
/*
* Delete a block. If the lock was cancelled or the grant callback
* failed, unlock is set to 1.
* It is the caller's responsibility to check whether the file
* can be closed hereafter.
*/
static void
nlmsvc_delete_block(struct nlm_block *block, int unlock)
{
struct file_lock *fl = &block->b_call.a_args.lock.fl;
struct nlm_file *file = block->b_file;
struct nlm_block **bp;
 
dprintk("lockd: deleting block %p...\n", block);
 
/* Remove block from list */
nlmsvc_remove_block(block);
 
/* If granted, unlock it, else remove from inode block list */
if (unlock && block->b_granted) {
dprintk("lockd: deleting granted lock\n");
fl->fl_type = F_UNLCK;
posix_lock_file(&block->b_file->f_file, fl, 0);
block->b_granted = 0;
} else {
dprintk("lockd: unblocking blocked lock\n");
posix_unblock_lock(fl);
}
 
/* If the block is in the middle of a GRANT callback,
* don't kill it yet. */
if (block->b_incall) {
nlmsvc_insert_block(block, NLM_NEVER);
block->b_done = 1;
return;
}
 
/* Remove block from file's list of blocks */
for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) {
if (*bp == block) {
*bp = block->b_fnext;
break;
}
}
 
nlm_release_host(block->b_host);
nlmclnt_freegrantargs(&block->b_call);
kfree(block);
}
 
/*
* Loop over all blocks and perform the action specified.
* (NLM_ACT_CHECK handled by nlmsvc_inspect_file).
*/
int
nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action)
{
struct nlm_block *block, *next;
 
down(&file->f_sema);
for (block = file->f_blocks; block; block = next) {
next = block->b_fnext;
if (action == NLM_ACT_MARK)
block->b_host->h_inuse = 1;
else if (action == NLM_ACT_UNLOCK) {
if (host == NULL || host == block->b_host)
nlmsvc_delete_block(block, 1);
}
}
up(&file->f_sema);
return 0;
}
 
/*
* Attempt to establish a lock, and if it can't be granted, block it
* if required.
*/
u32
nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
{
struct file_lock *conflock;
struct nlm_block *block;
int error;
 
dprintk("lockd: nlmsvc_lock(%04x/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_type, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end,
wait);
 
 
/* Get existing block (in case client is busy-waiting) */
block = nlmsvc_lookup_block(file, lock, 0);
 
lock->fl.fl_flags |= FL_LOCKD;
 
again:
/* Lock file against concurrent access */
down(&file->f_sema);
 
if (!(conflock = posix_test_lock(&file->f_file, &lock->fl))) {
error = posix_lock_file(&file->f_file, &lock->fl, 0);
 
if (block)
nlmsvc_delete_block(block, 0);
up(&file->f_sema);
 
dprintk("lockd: posix_lock_file returned %d\n", -error);
switch(-error) {
case 0:
return nlm_granted;
case EDEADLK:
return nlm_deadlock;
case EAGAIN:
return nlm_lck_denied;
default: /* includes ENOLCK */
return nlm_lck_denied_nolocks;
}
}
 
if (!wait) {
up(&file->f_sema);
return nlm_lck_denied;
}
 
if (posix_locks_deadlock(&lock->fl, conflock)) {
up(&file->f_sema);
return nlm_deadlock;
}
 
/* If we don't have a block, create and initialize it. Then
* retry because we may have slept in kmalloc. */
/* We have to release f_sema as nlmsvc_create_block may try to
* claim it while doing host garbage collection */
if (block == NULL) {
up(&file->f_sema);
dprintk("lockd: blocking on this lock (allocating).\n");
if (!(block = nlmsvc_create_block(rqstp, file, lock, cookie)))
return nlm_lck_denied_nolocks;
goto again;
}
 
/* Append to list of blocked */
nlmsvc_insert_block(block, NLM_NEVER);
 
if (list_empty(&block->b_call.a_args.lock.fl.fl_block)) {
/* Now add block to block list of the conflicting lock
if we haven't done so. */
dprintk("lockd: blocking on this lock.\n");
posix_block_lock(conflock, &block->b_call.a_args.lock.fl);
}
 
up(&file->f_sema);
return nlm_lck_blocked;
}
 
/*
* Test for presence of a conflicting lock.
*/
u32
nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock,
struct nlm_lock *conflock)
{
struct file_lock *fl;
 
dprintk("lockd: nlmsvc_testlock(%04x/%ld, ty=%d, %Ld-%Ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_type,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
 
if ((fl = posix_test_lock(&file->f_file, &lock->fl)) != NULL) {
dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
fl->fl_type, (long long)fl->fl_start,
(long long)fl->fl_end);
conflock->caller = "somehost"; /* FIXME */
conflock->oh.len = 0; /* don't return OH info */
conflock->fl = *fl;
return nlm_lck_denied;
}
 
return nlm_granted;
}
 
/*
* Remove a lock.
* This implies a CANCEL call: We send a GRANT_MSG, the client replies
* with a GRANT_RES call which gets lost, and calls UNLOCK immediately
* afterwards. In this case the block will still be there, and hence
* must be removed.
*/
u32
nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
{
int error;
 
dprintk("lockd: nlmsvc_unlock(%04x/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
 
/* First, cancel any lock that might be there */
nlmsvc_cancel_blocked(file, lock);
 
lock->fl.fl_type = F_UNLCK;
error = posix_lock_file(&file->f_file, &lock->fl, 0);
 
return (error < 0)? nlm_lck_denied_nolocks : nlm_granted;
}
 
/*
* Cancel a previously blocked request.
*
* A cancel request always overrides any grant that may currently
* be in progress.
* The calling procedure must check whether the file can be closed.
*/
u32
nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
 
dprintk("lockd: nlmsvc_cancel(%04x/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file.f_dentry->d_inode->i_dev,
file->f_file.f_dentry->d_inode->i_ino,
lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
 
down(&file->f_sema);
if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL)
nlmsvc_delete_block(block, 1);
up(&file->f_sema);
return nlm_granted;
}
 
/*
* Unblock a blocked lock request. This is a callback invoked from the
* VFS layer when a lock on which we blocked is removed.
*
* This function doesn't grant the blocked lock instantly, but rather moves
* the block to the head of nlm_blocked where it can be picked up by lockd.
*/
static void
nlmsvc_notify_blocked(struct file_lock *fl)
{
struct nlm_block **bp, *block;
 
dprintk("lockd: VFS unblock notification for block %p\n", fl);
posix_unblock_lock(fl);
for (bp = &nlm_blocked; (block = *bp); bp = &block->b_next) {
if (nlm_compare_locks(&block->b_call.a_args.lock.fl, fl)) {
nlmsvc_insert_block(block, 0);
svc_wake_up(block->b_daemon);
return;
}
}
 
printk(KERN_WARNING "lockd: notification for unknown block!\n");
}
 
/*
* Try to claim a lock that was previously blocked.
*
* Note that we use both the RPC_GRANTED_MSG call _and_ an async
* RPC thread when notifying the client. This seems like overkill...
* Here's why:
* - we don't want to use a synchronous RPC thread, otherwise
* we might find ourselves hanging on a dead portmapper.
* - Some lockd implementations (e.g. HP) don't react to
* RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls.
*/
static void
nlmsvc_grant_blocked(struct nlm_block *block)
{
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call.a_args.lock;
struct file_lock *conflock;
int error;
 
dprintk("lockd: grant blocked lock %p\n", block);
 
/* First thing is lock the file */
down(&file->f_sema);
 
/* Unlink block request from list */
nlmsvc_remove_block(block);
 
/* If b_granted is true this means we've been here before.
* Just retry the grant callback, possibly refreshing the RPC
* binding */
if (block->b_granted) {
nlm_rebind_host(block->b_call.a_host);
goto callback;
}
 
/* Try the lock operation again */
if ((conflock = posix_test_lock(&file->f_file, &lock->fl)) != NULL) {
/* Bummer, we blocked again */
dprintk("lockd: lock still blocked\n");
nlmsvc_insert_block(block, NLM_NEVER);
posix_block_lock(conflock, &lock->fl);
up(&file->f_sema);
return;
}
 
/* Alright, no conflicting lock. Now lock it for real. If the
* following yields an error, this is most probably due to low
* memory. Retry the lock in a few seconds.
*/
if ((error = posix_lock_file(&file->f_file, &lock->fl, 0)) < 0) {
printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
-error, __FUNCTION__);
nlmsvc_insert_block(block, 10 * HZ);
up(&file->f_sema);
return;
}
 
callback:
/* Lock was granted by VFS. */
dprintk("lockd: GRANTing blocked lock.\n");
block->b_granted = 1;
block->b_incall = 1;
 
/* Schedule next grant callback in 30 seconds */
nlmsvc_insert_block(block, 30 * HZ);
 
/* Call the client */
nlm_get_host(block->b_call.a_host);
if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
nlmsvc_grant_callback) < 0)
nlm_release_host(block->b_call.a_host);
up(&file->f_sema);
}
 
/*
* This is the callback from the RPC layer when the NLM_GRANTED_MSG
* RPC call has succeeded or timed out.
* Like all RPC callbacks, it is invoked by the rpciod process, so it
* better not sleep. Therefore, we put the blocked lock on the nlm_blocked
* chain once more in order to have it removed by lockd itself (which can
* then sleep on the file semaphore without disrupting e.g. the nfs client).
*/
static void
nlmsvc_grant_callback(struct rpc_task *task)
{
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
struct nlm_block *block;
unsigned long timeout;
struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client);
 
dprintk("lockd: GRANT_MSG RPC callback\n");
dprintk("callback: looking for cookie %s, host %u.%u.%u.%u\n",
nlmdbg_cookie2a(&call->a_args.cookie),
NIPQUAD(peer_addr->sin_addr.s_addr));
if (!(block = nlmsvc_find_block(&call->a_args.cookie, peer_addr))) {
dprintk("lockd: no block for cookie %s, host %u.%u.%u.%u\n",
nlmdbg_cookie2a(&call->a_args.cookie),
NIPQUAD(peer_addr->sin_addr.s_addr));
return;
}
 
/* Technically, we should down the file semaphore here. Since we
* move the block towards the head of the queue only, no harm
* can be done, though. */
if (task->tk_status < 0) {
/* RPC error: Re-insert for retransmission */
timeout = 10 * HZ;
} else if (block->b_done) {
/* Block already removed, kill it for real */
timeout = 0;
} else {
/* Call was successful, now wait for client callback */
timeout = 60 * HZ;
}
nlmsvc_insert_block(block, timeout);
svc_wake_up(block->b_daemon);
block->b_incall = 0;
 
nlm_release_host(call->a_host);
}
 
/*
* We received a GRANT_RES callback. Try to find the corresponding
* block.
*/
void
nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status)
{
struct nlm_block *block;
struct nlm_file *file;
 
dprintk("grant_reply: looking for cookie %x, host (%08x), s=%d \n",
*(unsigned int *)(cookie->data),
ntohl(rqstp->rq_addr.sin_addr.s_addr), status);
if (!(block = nlmsvc_find_block(cookie, &rqstp->rq_addr)))
return;
file = block->b_file;
 
file->f_count++;
down(&file->f_sema);
if ((block = nlmsvc_find_block(cookie,&rqstp->rq_addr)) != NULL) {
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
block = NULL;
} else {
/* Lock is now held by client, or has been rejected.
* In both cases, the block should be removed. */
file->f_count++;
up(&file->f_sema);
if (status == NLM_LCK_GRANTED)
nlmsvc_delete_block(block, 0);
else
nlmsvc_delete_block(block, 1);
}
}
if (!block)
up(&file->f_sema);
nlm_release_file(file);
}
 
/*
* Retry all blocked locks that have been notified. This is where lockd
* picks up locks that can be granted, or grant notifications that must
* be retransmitted.
*/
unsigned long
nlmsvc_retry_blocked(void)
{
struct nlm_block *block;
 
dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
nlm_blocked,
nlm_blocked? nlm_blocked->b_when : 0);
while ((block = nlm_blocked)) {
if (block->b_when == NLM_NEVER)
break;
if (time_after(block->b_when,jiffies))
break;
dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n",
block, block->b_when, block->b_done);
if (block->b_done)
nlmsvc_delete_block(block, 0);
else
nlmsvc_grant_blocked(block);
}
 
if ((block = nlm_blocked) && block->b_when != NLM_NEVER)
return (block->b_when - jiffies);
 
return MAX_SCHEDULE_TIMEOUT;
}
/clntproc.c
0,0 → 1,695
/*
* linux/fs/lockd/clntproc.c
*
* RPC procedures for the client side NLM implementation
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/config.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/nfs_fs.h>
#include <linux/utsname.h>
#include <linux/smp_lock.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/sm_inter.h>
 
#define NLMDBG_FACILITY NLMDBG_CLIENT
#define NLMCLNT_GRACE_WAIT (5*HZ)
 
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
static void nlmclnt_unlock_callback(struct rpc_task *);
static void nlmclnt_cancel_callback(struct rpc_task *);
static int nlm_stat_to_errno(u32 stat);
 
/*
* Cookie counter for NLM requests
*/
static u32 nlm_cookie = 0x1234;
 
static inline void nlmclnt_next_cookie(struct nlm_cookie *c)
{
memcpy(c->data, &nlm_cookie, 4);
memset(c->data+4, 0, 4);
c->len=4;
nlm_cookie++;
}
 
/*
* Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
*/
static inline void
nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_args *argp = &req->a_args;
struct nlm_lock *lock = &argp->lock;
 
nlmclnt_next_cookie(&argp->cookie);
argp->state = nsm_local_state;
memcpy(&lock->fh, NFS_FH(fl->fl_file->f_dentry->d_inode), sizeof(struct nfs_fh));
lock->caller = system_utsname.nodename;
lock->oh.data = req->a_owner;
lock->oh.len = sprintf(req->a_owner, "%d@%s",
current->pid, system_utsname.nodename);
locks_copy_lock(&lock->fl, fl);
}
 
/*
* Initialize arguments for GRANTED call. The nlm_rqst structure
* has been cleared already.
*/
int
nlmclnt_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
{
locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
call->a_args.lock.caller = system_utsname.nodename;
call->a_args.lock.oh.len = lock->oh.len;
 
/* set default data area */
call->a_args.lock.oh.data = call->a_owner;
 
if (lock->oh.len > NLMCLNT_OHSIZE) {
void *data = kmalloc(lock->oh.len, GFP_KERNEL);
if (!data)
return 0;
call->a_args.lock.oh.data = (u8 *) data;
}
 
memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
return 1;
}
 
void
nlmclnt_freegrantargs(struct nlm_rqst *call)
{
/*
* Check whether we allocated memory for the owner.
*/
if (call->a_args.lock.oh.data != (u8 *) call->a_owner) {
kfree(call->a_args.lock.oh.data);
}
}
 
/*
* This is the main entry point for the NLM client.
*/
int
nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
{
struct nfs_server *nfssrv = NFS_SERVER(inode);
struct nlm_host *host;
struct nlm_rqst reqst, *call = &reqst;
sigset_t oldset;
unsigned long flags;
int status, proto, vers;
 
vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
if (NFS_PROTO(inode)->version > 3) {
printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
return -ENOLCK;
}
 
/* Retrieve transport protocol from NFS client */
proto = NFS_CLIENT(inode)->cl_xprt->prot;
 
if (!(host = nlmclnt_lookup_host(NFS_ADDR(inode), proto, vers)))
return -ENOLCK;
 
/* Create RPC client handle if not there, and copy soft
* and intr flags from NFS client. */
if (host->h_rpcclnt == NULL) {
struct rpc_clnt *clnt;
 
/* Bind an rpc client to this host handle (does not
* perform a portmapper lookup) */
if (!(clnt = nlm_bind_host(host))) {
status = -ENOLCK;
goto done;
}
clnt->cl_softrtry = nfssrv->client->cl_softrtry;
clnt->cl_intr = nfssrv->client->cl_intr;
clnt->cl_chatty = nfssrv->client->cl_chatty;
}
 
/* Keep the old signal mask */
spin_lock_irqsave(&current->sigmask_lock, flags);
oldset = current->blocked;
 
/* If we're cleaning up locks because the process is exiting,
* perform the RPC call asynchronously. */
if ((IS_SETLK(cmd) || IS_SETLKW(cmd))
&& fl->fl_type == F_UNLCK
&& (current->flags & PF_EXITING)) {
sigfillset(&current->blocked); /* Mask all signals */
recalc_sigpending(current);
spin_unlock_irqrestore(&current->sigmask_lock, flags);
 
call = nlmclnt_alloc_call();
if (!call) {
status = -ENOMEM;
goto out_restore;
}
call->a_flags = RPC_TASK_ASYNC;
} else {
spin_unlock_irqrestore(&current->sigmask_lock, flags);
memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
}
call->a_host = host;
 
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
 
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
status = nlmclnt_lock(call, fl);
} else
status = nlmclnt_unlock(call, fl);
} else if (IS_GETLK(cmd))
status = nlmclnt_test(call, fl);
else
status = -EINVAL;
 
if (status < 0 && (call->a_flags & RPC_TASK_ASYNC))
kfree(call);
 
out_restore:
spin_lock_irqsave(&current->sigmask_lock, flags);
current->blocked = oldset;
recalc_sigpending(current);
spin_unlock_irqrestore(&current->sigmask_lock, flags);
 
done:
dprintk("lockd: clnt proc returns %d\n", status);
nlm_release_host(host);
return status;
}
 
/*
* Wait while server is in grace period
*/
static inline int
nlmclnt_grace_wait(struct nlm_host *host)
{
if (!host->h_reclaiming)
interruptible_sleep_on_timeout(&host->h_gracewait, 10*HZ);
else
interruptible_sleep_on(&host->h_gracewait);
return signalled()? -ERESTARTSYS : 0;
}
 
/*
* Allocate an NLM RPC call struct
*/
struct nlm_rqst *
nlmclnt_alloc_call(void)
{
struct nlm_rqst *call;
 
while (!signalled()) {
call = (struct nlm_rqst *) kmalloc(sizeof(struct nlm_rqst), GFP_KERNEL);
if (call) {
memset(call, 0, sizeof(*call));
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
return call;
}
printk("nlmclnt_alloc_call: failed, waiting for memory\n");
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(5*HZ);
}
return NULL;
}
 
/*
* Generic NLM call
*/
int
nlmclnt_call(struct nlm_rqst *req, u32 proc)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct nlm_args *argp = &req->a_args;
struct nlm_res *resp = &req->a_res;
struct file *filp = argp->lock.fl.fl_file;
struct rpc_message msg;
int status;
 
dprintk("lockd: call procedure %s on %s\n",
nlm_procname(proc), host->h_name);
 
msg.rpc_proc = proc;
msg.rpc_argp = argp;
msg.rpc_resp = resp;
if (filp)
msg.rpc_cred = nfs_file_cred(filp);
else
msg.rpc_cred = NULL;
 
do {
if (host->h_reclaiming && !argp->reclaim) {
interruptible_sleep_on(&host->h_gracewait);
continue;
}
 
/* If we have no RPC client yet, create one. */
if ((clnt = nlm_bind_host(host)) == NULL)
return -ENOLCK;
 
/* Perform the RPC call. If an error occurs, try again */
if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
dprintk("lockd: rpc_call returned error %d\n", -status);
switch (status) {
case -EPROTONOSUPPORT:
status = -EINVAL;
break;
case -ECONNREFUSED:
case -ETIMEDOUT:
case -ENOTCONN:
nlm_rebind_host(host);
status = -EAGAIN;
break;
case -ERESTARTSYS:
return signalled () ? -EINTR : status;
default:
break;
}
break;
} else
if (resp->status == NLM_LCK_DENIED_GRACE_PERIOD) {
dprintk("lockd: server in grace period\n");
if (argp->reclaim) {
printk(KERN_WARNING
"lockd: spurious grace period reject?!\n");
return -ENOLCK;
}
} else {
dprintk("lockd: server returns status %d\n", resp->status);
return 0; /* Okay, call complete */
}
 
/* Back off a little and try again */
interruptible_sleep_on_timeout(&host->h_gracewait, 15*HZ);
 
/* When the lock requested by F_SETLKW isn't available,
we will wait until the request can be satisfied. If
a signal is received during wait, we should return
-EINTR. */
if (signalled ()) {
status = -EINTR;
break;
}
} while (1);
 
return status;
}
 
/*
* Generic NLM call, async version.
*/
int
nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct nlm_args *argp = &req->a_args;
struct nlm_res *resp = &req->a_res;
struct rpc_message msg;
int status;
 
dprintk("lockd: call procedure %s on %s (async)\n",
nlm_procname(proc), host->h_name);
 
/* If we have no RPC client yet, create one. */
if ((clnt = nlm_bind_host(host)) == NULL)
return -ENOLCK;
 
/* bootstrap and kick off the async RPC call */
msg.rpc_proc = proc;
msg.rpc_argp = argp;
msg.rpc_resp =resp;
msg.rpc_cred = NULL;
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
 
return status;
}
 
int
nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
struct nlm_args *argp = &req->a_args;
struct nlm_res *resp = &req->a_res;
struct file *file = argp->lock.fl.fl_file;
struct rpc_message msg;
int status;
 
dprintk("lockd: call procedure %s on %s (async)\n",
nlm_procname(proc), host->h_name);
 
/* If we have no RPC client yet, create one. */
if ((clnt = nlm_bind_host(host)) == NULL)
return -ENOLCK;
 
/* bootstrap and kick off the async RPC call */
msg.rpc_proc = proc;
msg.rpc_argp = argp;
msg.rpc_resp =resp;
if (file)
msg.rpc_cred = nfs_file_cred(file);
else
msg.rpc_cred = NULL;
/* Increment host refcount */
nlm_get_host(host);
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
if (status < 0)
nlm_release_host(host);
return status;
}
 
/*
* TEST for the presence of a conflicting lock
*/
static int
nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
{
int status;
 
if ((status = nlmclnt_call(req, NLMPROC_TEST)) < 0)
return status;
 
status = req->a_res.status;
if (status == NLM_LCK_GRANTED) {
fl->fl_type = F_UNLCK;
} if (status == NLM_LCK_DENIED) {
/*
* Report the conflicting lock back to the application.
* FIXME: Is it OK to report the pid back as well?
*/
locks_copy_lock(fl, &req->a_res.lock.fl);
/* fl->fl_pid = 0; */
} else {
return nlm_stat_to_errno(req->a_res.status);
}
 
return 0;
}
 
static
void nlmclnt_insert_lock_callback(struct file_lock *fl)
{
nlm_get_host(fl->fl_u.nfs_fl.host);
}
static
void nlmclnt_remove_lock_callback(struct file_lock *fl)
{
if (fl->fl_u.nfs_fl.host) {
nlm_release_host(fl->fl_u.nfs_fl.host);
fl->fl_u.nfs_fl.host = NULL;
}
}
 
/*
* LOCK: Try to create a lock
*
* Programmer Harassment Alert
*
* When given a blocking lock request in a sync RPC call, the HPUX lockd
* will faithfully return LCK_BLOCKED but never cares to notify us when
* the lock could be granted. This way, our local process could hang
* around forever waiting for the callback.
*
* Solution A: Implement busy-waiting
* Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
*
* For now I am implementing solution A, because I hate the idea of
* re-implementing lockd for a third time in two months. The async
* calls shouldn't be too hard to do, however.
*
* This is one of the lovely things about standards in the NFS area:
* they're so soft and squishy you can't really blame HP for doing this.
*/
static int
nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_host *host = req->a_host;
struct nlm_res *resp = &req->a_res;
int status;
 
if (!host->h_monitored && nsm_monitor(host) < 0) {
printk(KERN_NOTICE "lockd: failed to monitor %s\n",
host->h_name);
return -ENOLCK;
}
 
do {
if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0) {
if (resp->status != NLM_LCK_BLOCKED)
break;
status = nlmclnt_block(host, fl, &resp->status);
}
if (status < 0)
return status;
} while (resp->status == NLM_LCK_BLOCKED && req->a_args.block);
 
if (resp->status == NLM_LCK_GRANTED) {
fl->fl_u.nfs_fl.state = host->h_state;
fl->fl_u.nfs_fl.flags |= NFS_LCK_GRANTED;
fl->fl_u.nfs_fl.host = host;
fl->fl_insert = nlmclnt_insert_lock_callback;
fl->fl_remove = nlmclnt_remove_lock_callback;
}
 
return nlm_stat_to_errno(resp->status);
}
 
/*
* RECLAIM: Try to reclaim a lock
*/
int
nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
{
struct nlm_rqst reqst, *req;
int status;
 
req = &reqst;
memset(req, 0, sizeof(*req));
locks_init_lock(&req->a_args.lock.fl);
locks_init_lock(&req->a_res.lock.fl);
req->a_host = host;
req->a_flags = 0;
 
/* Set up the argument struct */
nlmclnt_setlockargs(req, fl);
req->a_args.reclaim = 1;
 
if ((status = nlmclnt_call(req, NLMPROC_LOCK)) >= 0
&& req->a_res.status == NLM_LCK_GRANTED)
return 0;
 
printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
"(errno %d, status %d)\n", fl->fl_pid,
status, req->a_res.status);
 
/*
* FIXME: This is a serious failure. We can
*
* a. Ignore the problem
* b. Send the owning process some signal (Linux doesn't have
* SIGLOST, though...)
* c. Retry the operation
*
* Until someone comes up with a simple implementation
* for b or c, I'll choose option a.
*/
 
return -ENOLCK;
}
 
/*
* UNLOCK: remove an existing lock
*/
static int
nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_res *resp = &req->a_res;
int status;
 
/* Clean the GRANTED flag now so the lock doesn't get
* reclaimed while we're stuck in the unlock call. */
fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
 
if (req->a_flags & RPC_TASK_ASYNC) {
return nlmclnt_async_call(req, NLMPROC_UNLOCK,
nlmclnt_unlock_callback);
}
 
if ((status = nlmclnt_call(req, NLMPROC_UNLOCK)) < 0)
return status;
 
if (resp->status == NLM_LCK_GRANTED)
return 0;
 
if (resp->status != NLM_LCK_DENIED_NOLOCKS)
printk("lockd: unexpected unlock status: %d\n", resp->status);
 
/* What to do now? I'm out of my depth... */
 
return -ENOLCK;
}
 
static void
nlmclnt_unlock_callback(struct rpc_task *task)
{
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
int status = req->a_res.status;
 
if (RPC_ASSASSINATED(task))
goto die;
 
if (task->tk_status < 0) {
dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
goto retry_rebind;
}
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
rpc_delay(task, NLMCLNT_GRACE_WAIT);
goto retry_unlock;
}
if (status != NLM_LCK_GRANTED)
printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
 
die:
nlm_release_host(req->a_host);
kfree(req);
return;
retry_rebind:
nlm_rebind_host(req->a_host);
retry_unlock:
rpc_restart_call(task);
}
 
/*
* Cancel a blocked lock request.
* We always use an async RPC call for this in order not to hang a
* process that has been Ctrl-C'ed.
*/
int
nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
{
struct nlm_rqst *req;
unsigned long flags;
sigset_t oldset;
int status;
 
/* Block all signals while setting up call */
spin_lock_irqsave(&current->sigmask_lock, flags);
oldset = current->blocked;
sigfillset(&current->blocked);
recalc_sigpending(current);
spin_unlock_irqrestore(&current->sigmask_lock, flags);
 
req = nlmclnt_alloc_call();
if (!req)
return -ENOMEM;
req->a_host = host;
req->a_flags = RPC_TASK_ASYNC;
 
nlmclnt_setlockargs(req, fl);
 
status = nlmclnt_async_call(req, NLMPROC_CANCEL,
nlmclnt_cancel_callback);
if (status < 0)
kfree(req);
 
spin_lock_irqsave(&current->sigmask_lock, flags);
current->blocked = oldset;
recalc_sigpending(current);
spin_unlock_irqrestore(&current->sigmask_lock, flags);
 
return status;
}
 
static void
nlmclnt_cancel_callback(struct rpc_task *task)
{
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
 
if (RPC_ASSASSINATED(task))
goto die;
 
if (task->tk_status < 0) {
dprintk("lockd: CANCEL call error %d, retrying.\n",
task->tk_status);
goto retry_cancel;
}
 
dprintk("lockd: cancel status %d (task %d)\n",
req->a_res.status, task->tk_pid);
 
switch (req->a_res.status) {
case NLM_LCK_GRANTED:
case NLM_LCK_DENIED_GRACE_PERIOD:
/* Everything's good */
break;
case NLM_LCK_DENIED_NOLOCKS:
dprintk("lockd: CANCEL failed (server has no locks)\n");
goto retry_cancel;
default:
printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
req->a_res.status);
}
 
die:
nlm_release_host(req->a_host);
kfree(req);
return;
 
retry_cancel:
nlm_rebind_host(req->a_host);
rpc_restart_call(task);
rpc_delay(task, 30 * HZ);
}
 
/*
* Convert an NLM status code to a generic kernel errno
*/
static int
nlm_stat_to_errno(u32 status)
{
switch(status) {
case NLM_LCK_GRANTED:
return 0;
case NLM_LCK_DENIED:
return -EAGAIN;
case NLM_LCK_DENIED_NOLOCKS:
case NLM_LCK_DENIED_GRACE_PERIOD:
return -ENOLCK;
case NLM_LCK_BLOCKED:
printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
return -ENOLCK;
#ifdef CONFIG_LOCKD_V4
case NLM_DEADLCK:
return -EDEADLK;
case NLM_ROFS:
return -EROFS;
case NLM_STALE_FH:
return -ESTALE;
case NLM_FBIG:
return -EOVERFLOW;
case NLM_FAILED:
return -ENOLCK;
#endif
}
printk(KERN_NOTICE "lockd: unexpected server status %d\n", status);
return -ENOLCK;
}
/lockd_syms.c
0,0 → 1,38
/*
* linux/fs/lockd/lockd_syms.c
*
* Symbols exported by the lockd module.
*
* Authors: Olaf Kirch (okir@monad.swb.de)
*
* Copyright (C) 1997 Olaf Kirch <okir@monad.swb.de>
*/
 
#define __NO_VERSION__
#include <linux/config.h>
#include <linux/module.h>
 
#ifdef CONFIG_MODULES
 
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sched.h>
#include <linux/uio.h>
#include <linux/unistd.h>
 
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
 
/* Start/stop the daemon */
EXPORT_SYMBOL(lockd_up);
EXPORT_SYMBOL(lockd_down);
 
/* NFS client entry */
EXPORT_SYMBOL(nlmclnt_proc);
 
/* NFS server entry points/hooks */
EXPORT_SYMBOL(nlmsvc_invalidate_client);
EXPORT_SYMBOL(nlmsvc_ops);
 
#endif /* CONFIG_MODULES */
/svcsubs.c
0,0 → 1,311
/*
* linux/fs/lockd/svcsubs.c
*
* Various support routines for the NLM server.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/config.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/in.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfsd/nfsfh.h>
#include <linux/nfsd/export.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
#include <linux/lockd/sm_inter.h>
 
#define NLMDBG_FACILITY NLMDBG_SVCSUBS
 
 
/*
* Global file hash table
*/
#define FILE_HASH_BITS 5
#define FILE_NRHASH (1<<FILE_HASH_BITS)
static struct nlm_file * nlm_files[FILE_NRHASH];
static DECLARE_MUTEX(nlm_file_sema);
 
static inline unsigned int file_hash(struct nfs_fh *f)
{
unsigned int tmp=0;
int i;
for (i=0; i<NFS2_FHSIZE;i++)
tmp += f->data[i];
return tmp & (FILE_NRHASH - 1);
}
 
/*
* Lookup file info. If it doesn't exist, create a file info struct
* and open a (VFS) file for the given inode.
*
* FIXME:
* Note that we open the file O_RDONLY even when creating write locks.
* This is not quite right, but for now, we assume the client performs
* the proper R/W checking.
*/
u32
nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
struct nfs_fh *f)
{
struct nlm_file *file;
unsigned int hash;
u32 nfserr;
u32 *fhp = (u32*)f->data;
 
dprintk("lockd: nlm_file_lookup(%08x %08x %08x %08x %08x %08x)\n",
fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]);
 
 
hash = file_hash(f);
 
/* Lock file table */
down(&nlm_file_sema);
 
for (file = nlm_files[hash]; file; file = file->f_next)
if (!memcmp(&file->f_handle, f, sizeof(*f)))
goto found;
 
dprintk("lockd: creating file for (%08x %08x %08x %08x %08x %08x)\n",
fhp[0], fhp[1], fhp[2], fhp[3], fhp[4], fhp[5]);
 
nfserr = nlm_lck_denied_nolocks;
file = (struct nlm_file *) kmalloc(sizeof(*file), GFP_KERNEL);
if (!file)
goto out_unlock;
 
memset(file, 0, sizeof(*file));
memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
file->f_hash = hash;
init_MUTEX(&file->f_sema);
 
/* Open the file. Note that this must not sleep for too long, else
* we would lock up lockd:-) So no NFS re-exports, folks.
*
* We have to make sure we have the right credential to open
* the file.
*/
if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) {
dprintk("lockd: open failed (nfserr %d)\n", ntohl(nfserr));
goto out_free;
}
 
file->f_next = nlm_files[hash];
nlm_files[hash] = file;
 
found:
dprintk("lockd: found file %p (count %d)\n", file, file->f_count);
*result = file;
file->f_count++;
nfserr = 0;
 
out_unlock:
up(&nlm_file_sema);
return nfserr;
 
out_free:
kfree(file);
#ifdef CONFIG_LOCKD_V4
if (nfserr == 1)
nfserr = nlm4_stale_fh;
else
#endif
nfserr = nlm_lck_denied;
goto out_unlock;
}
 
/*
* Delete a file after having released all locks, blocks and shares
*/
static inline void
nlm_delete_file(struct nlm_file *file)
{
struct inode *inode = file->f_file.f_dentry->d_inode;
struct nlm_file **fp, *f;
 
dprintk("lockd: closing file %s/%ld\n",
kdevname(inode->i_dev), inode->i_ino);
fp = nlm_files + file->f_hash;
while ((f = *fp) != NULL) {
if (f == file) {
*fp = file->f_next;
nlmsvc_ops->fclose(&file->f_file);
kfree(file);
return;
}
fp = &f->f_next;
}
 
printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
}
 
/*
* Loop over all locks on the given file and perform the specified
* action.
*/
static int
nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action)
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
struct nlm_host *lockhost;
 
again:
file->f_locks = 0;
for (fl = inode->i_flock; fl; fl = fl->fl_next) {
if (!(fl->fl_flags & FL_LOCKD))
continue;
 
/* update current lock count */
file->f_locks++;
lockhost = (struct nlm_host *) fl->fl_owner;
if (action == NLM_ACT_MARK)
lockhost->h_inuse = 1;
else if (action == NLM_ACT_CHECK)
return 1;
else if (action == NLM_ACT_UNLOCK) {
struct file_lock lock = *fl;
 
if (host && lockhost != host)
continue;
 
lock.fl_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
if (posix_lock_file(&file->f_file, &lock, 0) < 0) {
printk("lockd: unlock failure in %s:%d\n",
__FILE__, __LINE__);
return 1;
}
goto again;
}
}
 
return 0;
}
 
/*
* Operate on a single file
*/
static inline int
nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action)
{
if (action == NLM_ACT_CHECK) {
/* Fast path for mark and sweep garbage collection */
if (file->f_count || file->f_blocks || file->f_shares)
return 1;
} else {
if (nlmsvc_traverse_blocks(host, file, action)
|| nlmsvc_traverse_shares(host, file, action))
return 1;
}
return nlm_traverse_locks(host, file, action);
}
 
/*
* Loop over all files in the file table.
*/
static int
nlm_traverse_files(struct nlm_host *host, int action)
{
struct nlm_file *file, **fp;
int i;
 
down(&nlm_file_sema);
for (i = 0; i < FILE_NRHASH; i++) {
fp = nlm_files + i;
while ((file = *fp) != NULL) {
/* Traverse locks, blocks and shares of this file
* and update file->f_locks count */
if (nlm_inspect_file(host, file, action)) {
up(&nlm_file_sema);
return 1;
}
 
/* No more references to this file. Let go of it. */
if (!file->f_blocks && !file->f_locks
&& !file->f_shares && !file->f_count) {
*fp = file->f_next;
nlmsvc_ops->fclose(&file->f_file);
kfree(file);
} else {
fp = &file->f_next;
}
}
}
up(&nlm_file_sema);
return 0;
}
 
/*
* Release file. If there are no more remote locks on this file,
* close it and free the handle.
*
* Note that we can't do proper reference counting without major
* contortions because the code in fs/locks.c creates, deletes and
* splits locks without notification. Our only way is to walk the
* entire lock list each time we remove a lock.
*/
void
nlm_release_file(struct nlm_file *file)
{
dprintk("lockd: nlm_release_file(%p, ct = %d)\n",
file, file->f_count);
 
/* Lock file table */
down(&nlm_file_sema);
 
/* If there are no more locks etc, delete the file */
if(--file->f_count == 0) {
if(!nlm_inspect_file(NULL, file, NLM_ACT_CHECK))
nlm_delete_file(file);
}
 
up(&nlm_file_sema);
}
 
/*
* Mark all hosts that still hold resources
*/
void
nlmsvc_mark_resources(void)
{
dprintk("lockd: nlmsvc_mark_resources\n");
 
nlm_traverse_files(NULL, NLM_ACT_MARK);
}
 
/*
* Release all resources held by the given client
*/
void
nlmsvc_free_host_resources(struct nlm_host *host)
{
dprintk("lockd: nlmsvc_free_host_resources\n");
 
if (nlm_traverse_files(host, NLM_ACT_UNLOCK))
printk(KERN_WARNING
"lockd: couldn't remove all locks held by %s",
host->h_name);
}
 
/*
* Delete a client when the nfsd entry is removed.
*/
void
nlmsvc_invalidate_client(struct svc_client *clnt)
{
struct nlm_host *host;
 
if ((host = nlm_lookup_host(clnt, NULL, 0, 0)) != NULL) {
dprintk("lockd: invalidating client for %s\n", host->h_name);
nlmsvc_free_host_resources(host);
host->h_expires = 0;
host->h_killed = 1;
nlm_release_host(host);
}
}
/mon.c
0,0 → 1,251
/*
* linux/fs/lockd/mon.c
*
* The kernel statd client.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/types.h>
#include <linux/utsname.h>
#include <linux/kernel.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/sm_inter.h>
 
 
#define NLMDBG_FACILITY NLMDBG_MONITOR
 
static struct rpc_clnt * nsm_create(void);
 
extern struct rpc_program nsm_program;
 
/*
* Local NSM state
*/
u32 nsm_local_state;
 
/*
* Common procedure for SM_MON/SM_UNMON calls
*/
static int
nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res)
{
struct rpc_clnt *clnt;
int status;
struct nsm_args args;
 
status = -EACCES;
clnt = nsm_create();
if (!clnt)
goto out;
 
args.addr = host->h_addr.sin_addr.s_addr;
args.prog = NLM_PROGRAM;
args.vers = host->h_version;
args.proc = NLMPROC_NSM_NOTIFY;
memset(res, 0, sizeof(*res));
 
status = rpc_call(clnt, proc, &args, res, 0);
if (status < 0)
printk(KERN_DEBUG "nsm_mon_unmon: rpc failed, status=%d\n",
status);
else
status = 0;
out:
return status;
}
 
/*
* Set up monitoring of a remote host
*/
int
nsm_monitor(struct nlm_host *host)
{
struct nsm_res res;
int status;
 
dprintk("lockd: nsm_monitor(%s)\n", host->h_name);
 
status = nsm_mon_unmon(host, SM_MON, &res);
 
if (status < 0 || res.status != 0)
printk(KERN_NOTICE "lockd: cannot monitor %s\n", host->h_name);
else
host->h_monitored = 1;
return status;
}
 
/*
* Cease to monitor remote host
*/
int
nsm_unmonitor(struct nlm_host *host)
{
struct nsm_res res;
int status;
 
dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
 
status = nsm_mon_unmon(host, SM_UNMON, &res);
if (status < 0)
printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", host->h_name);
else
host->h_monitored = 0;
return status;
}
 
/*
* Create NSM client for the local host
*/
static struct rpc_clnt *
nsm_create(void)
{
struct rpc_xprt *xprt;
struct rpc_clnt *clnt = NULL;
struct sockaddr_in sin;
 
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
sin.sin_port = 0;
 
xprt = xprt_create_proto(IPPROTO_UDP, &sin, NULL);
if (!xprt)
goto out;
 
clnt = rpc_create_client(xprt, "localhost",
&nsm_program, SM_VERSION,
RPC_AUTH_NULL);
if (!clnt)
goto out_destroy;
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
xprt->resvport = 1; /* NSM requires a reserved port */
out:
return clnt;
 
out_destroy:
xprt_destroy(xprt);
goto out;
}
 
/*
* XDR functions for NSM.
*/
static int
xdr_error(struct rpc_rqst *rqstp, u32 *p, void *dummy)
{
return -EACCES;
}
 
static int
xdr_encode_mon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
{
char buffer[20];
u32 addr = ntohl(argp->addr);
 
dprintk("nsm: xdr_encode_mon(%08x, %d, %d, %d)\n",
htonl(argp->addr), htonl(argp->prog),
htonl(argp->vers), htonl(argp->proc));
 
/*
* Use the dotted-quad IP address of the remote host as
* identifier. Linux statd always looks up the canonical
* hostname first for whatever remote hostname it receives,
* so this works alright.
*/
sprintf(buffer, "%d.%d.%d.%d", (addr>>24) & 0xff, (addr>>16) & 0xff,
(addr>>8) & 0xff, (addr) & 0xff);
if (!(p = xdr_encode_string(p, buffer))
|| !(p = xdr_encode_string(p, system_utsname.nodename)))
return -EIO;
*p++ = htonl(argp->prog);
*p++ = htonl(argp->vers);
*p++ = htonl(argp->proc);
 
/* This is the private part. Needed only for SM_MON call */
if (rqstp->rq_task->tk_msg.rpc_proc == SM_MON) {
*p++ = argp->addr;
*p++ = 0;
*p++ = 0;
*p++ = 0;
}
 
rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
return 0;
}
 
static int
xdr_decode_stat_res(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp)
{
resp->status = ntohl(*p++);
resp->state = ntohl(*p++);
dprintk("nsm: xdr_decode_stat_res status %d state %d\n",
resp->status, resp->state);
return 0;
}
 
static int
xdr_decode_stat(struct rpc_rqst *rqstp, u32 *p, struct nsm_res *resp)
{
resp->state = ntohl(*p++);
return 0;
}
 
#define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN))
#define SM_my_id_sz (3+1+SM_my_name_sz)
#define SM_mon_id_sz (1+XDR_QUADLEN(20)+SM_my_id_sz)
#define SM_mon_sz (SM_mon_id_sz+4)
#define SM_monres_sz 2
#define SM_unmonres_sz 1
 
#ifndef MAX
# define MAX(a, b) (((a) > (b))? (a) : (b))
#endif
 
static struct rpc_procinfo nsm_procedures[] = {
{ "sm_null",
(kxdrproc_t) xdr_error,
(kxdrproc_t) xdr_error, 0, 0 },
{ "sm_stat",
(kxdrproc_t) xdr_error,
(kxdrproc_t) xdr_error, 0, 0 },
{ "sm_mon",
(kxdrproc_t) xdr_encode_mon,
(kxdrproc_t) xdr_decode_stat_res, MAX(SM_mon_sz, SM_monres_sz) << 2, 0 },
{ "sm_unmon",
(kxdrproc_t) xdr_encode_mon,
(kxdrproc_t) xdr_decode_stat, MAX(SM_mon_id_sz, SM_unmonres_sz) << 2, 0 },
{ "sm_unmon_all",
(kxdrproc_t) xdr_error,
(kxdrproc_t) xdr_error, 0, 0 },
{ "sm_simu_crash",
(kxdrproc_t) xdr_error,
(kxdrproc_t) xdr_error, 0, 0 },
{ "sm_notify",
(kxdrproc_t) xdr_error,
(kxdrproc_t) xdr_error, 0, 0 },
};
 
static struct rpc_version nsm_version1 = {
1,
sizeof(nsm_procedures)/sizeof(nsm_procedures[0]),
nsm_procedures
};
 
static struct rpc_version * nsm_version[] = {
NULL,
&nsm_version1,
};
 
static struct rpc_stat nsm_stats;
 
struct rpc_program nsm_program = {
"statd",
SM_PROGRAM,
sizeof(nsm_version)/sizeof(nsm_version[0]),
nsm_version,
&nsm_stats
};
/clntlock.c
0,0 → 1,254
/*
* linux/fs/lockd/clntlock.c
*
* Lock handling for the client side NLM implementation
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#define __KERNEL_SYSCALLS__
 
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/nfs_fs.h>
#include <linux/unistd.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/smp_lock.h>
 
#define NLMDBG_FACILITY NLMDBG_CLIENT
 
/*
* Local function prototypes
*/
static int reclaimer(void *ptr);
 
/*
* The following functions handle blocking and granting from the
* client perspective.
*/
 
/*
* This is the representation of a blocked client lock.
*/
struct nlm_wait {
struct nlm_wait * b_next; /* linked list */
wait_queue_head_t b_wait; /* where to wait on */
struct nlm_host * b_host;
struct file_lock * b_lock; /* local file lock */
unsigned short b_reclaim; /* got to reclaim lock */
u32 b_status; /* grant callback status */
};
 
static struct nlm_wait * nlm_blocked;
 
/*
* Block on a lock
*/
int
nlmclnt_block(struct nlm_host *host, struct file_lock *fl, u32 *statp)
{
struct nlm_wait block, **head;
int err;
u32 pstate;
 
block.b_host = host;
block.b_lock = fl;
init_waitqueue_head(&block.b_wait);
block.b_status = NLM_LCK_BLOCKED;
block.b_next = nlm_blocked;
nlm_blocked = &block;
 
/* Remember pseudo nsm state */
pstate = host->h_state;
 
/* Go to sleep waiting for GRANT callback. Some servers seem
* to lose callbacks, however, so we're going to poll from
* time to time just to make sure.
*
* For now, the retry frequency is pretty high; normally
* a 1 minute timeout would do. See the comment before
* nlmclnt_lock for an explanation.
*/
sleep_on_timeout(&block.b_wait, 30*HZ);
 
for (head = &nlm_blocked; *head; head = &(*head)->b_next) {
if (*head == &block) {
*head = block.b_next;
break;
}
}
 
if (!signalled()) {
*statp = block.b_status;
return 0;
}
 
/* Okay, we were interrupted. Cancel the pending request
* unless the server has rebooted.
*/
if (pstate == host->h_state && (err = nlmclnt_cancel(host, fl)) < 0)
printk(KERN_NOTICE
"lockd: CANCEL call failed (errno %d)\n", -err);
 
return -ERESTARTSYS;
}
 
/*
* The server lockd has called us back to tell us the lock was granted
*/
u32
nlmclnt_grant(struct nlm_lock *lock)
{
struct nlm_wait *block;
 
/*
* Look up blocked request based on arguments.
* Warning: must not use cookie to match it!
*/
for (block = nlm_blocked; block; block = block->b_next) {
if (nlm_compare_locks(block->b_lock, &lock->fl))
break;
}
 
/* Ooops, no blocked request found. */
if (block == NULL)
return nlm_lck_denied;
 
/* Alright, we found the lock. Set the return status and
* wake up the caller.
*/
block->b_status = NLM_LCK_GRANTED;
wake_up(&block->b_wait);
 
return nlm_granted;
}
 
/*
* The following procedures deal with the recovery of locks after a
* server crash.
*/
 
/*
* Mark the locks for reclaiming.
* FIXME: In 2.5 we don't want to iterate through any global file_lock_list.
* Maintain NLM lock reclaiming lists in the nlm_host instead.
*/
static
void nlmclnt_mark_reclaim(struct nlm_host *host)
{
struct file_lock *fl;
struct inode *inode;
struct list_head *tmp;
 
list_for_each(tmp, &file_lock_list) {
fl = list_entry(tmp, struct file_lock, fl_link);
 
inode = fl->fl_file->f_dentry->d_inode;
if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
continue;
if (fl->fl_u.nfs_fl.host != host)
continue;
if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED))
continue;
fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM;
}
}
 
/*
* Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
* that we mark locks for reclaiming, and that we bump the pseudo NSM state.
*/
static inline
void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate)
{
host->h_monitored = 0;
host->h_nsmstate = newstate;
host->h_state++;
host->h_nextrebind = 0;
nlm_rebind_host(host);
nlmclnt_mark_reclaim(host);
dprintk("NLM: reclaiming locks for host %s", host->h_name);
}
 
/*
* Reclaim all locks on server host. We do this by spawning a separate
* reclaimer thread.
*/
void
nlmclnt_recovery(struct nlm_host *host, u32 newstate)
{
if (host->h_reclaiming++) {
if (host->h_nsmstate == newstate)
return;
nlmclnt_prepare_reclaim(host, newstate);
} else {
nlmclnt_prepare_reclaim(host, newstate);
nlm_get_host(host);
MOD_INC_USE_COUNT;
if (kernel_thread(reclaimer, host, CLONE_SIGNAL) < 0)
MOD_DEC_USE_COUNT;
}
}
 
static int
reclaimer(void *ptr)
{
struct nlm_host *host = (struct nlm_host *) ptr;
struct nlm_wait *block;
struct list_head *tmp;
struct file_lock *fl;
struct inode *inode;
 
daemonize();
reparent_to_init();
snprintf(current->comm, sizeof(current->comm),
"%s-reclaim",
host->h_name);
 
/* This one ensures that our parent doesn't terminate while the
* reclaim is in progress */
lock_kernel();
lockd_up();
 
/* First, reclaim all locks that have been marked. */
restart:
list_for_each(tmp, &file_lock_list) {
fl = list_entry(tmp, struct file_lock, fl_link);
 
inode = fl->fl_file->f_dentry->d_inode;
if (inode->i_sb->s_magic != NFS_SUPER_MAGIC)
continue;
if (fl->fl_u.nfs_fl.host != host)
continue;
if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM))
continue;
 
fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM;
nlmclnt_reclaim(host, fl);
if (signalled())
break;
goto restart;
}
 
host->h_reclaiming = 0;
wake_up(&host->h_gracewait);
 
/* Now, wake up all processes that sleep on a blocked lock */
for (block = nlm_blocked; block; block = block->b_next) {
if (block->b_host == host) {
block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;
wake_up(&block->b_wait);
}
}
 
/* Release host handle after use */
nlm_release_host(host);
lockd_down();
unlock_kernel();
MOD_DEC_USE_COUNT;
 
return 0;
}
/svc.c
0,0 → 1,398
/*
* linux/fs/lockd/svc.c
*
* This is the central lockd service.
*
* FIXME: Separate the lockd NFS server functionality from the lockd NFS
* client functionality. Oh why didn't Sun create two separate
* services in the first place?
*
* Authors: Olaf Kirch (okir@monad.swb.de)
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
 
#define __KERNEL_SYSCALLS__
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
 
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/uio.h>
#include <linux/version.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
 
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/lockd/lockd.h>
#include <linux/nfs.h>
 
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSSVC_XDRSIZE)
#define ALLOWED_SIGS (sigmask(SIGKILL))
 
extern struct svc_program nlmsvc_program;
struct nlmsvc_binding * nlmsvc_ops;
static DECLARE_MUTEX(nlmsvc_sema);
static unsigned int nlmsvc_users;
static pid_t nlmsvc_pid;
int nlmsvc_grace_period;
unsigned long nlmsvc_timeout;
 
static DECLARE_MUTEX_LOCKED(lockd_start);
static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
 
/*
* Currently the following can be set only at insmod time.
* Ideally, they would be accessible through the sysctl interface.
*/
unsigned long nlm_grace_period;
unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
unsigned long nlm_udpport, nlm_tcpport;
 
static unsigned long set_grace_period(void)
{
unsigned long grace_period;
 
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
grace_period = ((nlm_grace_period + nlm_timeout - 1)
/ nlm_timeout) * nlm_timeout * HZ;
else
grace_period = nlm_timeout * 5 * HZ;
nlmsvc_grace_period = 1;
return grace_period + jiffies;
}
 
/*
* This is the lockd kernel thread
*/
static void
lockd(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
int err = 0;
unsigned long grace_period_expire;
 
/* Lock module and set up kernel thread */
MOD_INC_USE_COUNT;
lock_kernel();
 
/*
* Let our maker know we're running.
*/
nlmsvc_pid = current->pid;
up(&lockd_start);
 
daemonize();
reparent_to_init();
sprintf(current->comm, "lockd");
 
/* Process request with signals blocked. */
spin_lock_irq(&current->sigmask_lock);
siginitsetinv(&current->blocked, sigmask(SIGKILL));
recalc_sigpending(current);
spin_unlock_irq(&current->sigmask_lock);
 
/* kick rpciod */
rpciod_up();
 
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
 
if (!nlm_timeout)
nlm_timeout = LOCKD_DFLT_TIMEO;
nlmsvc_timeout = nlm_timeout * HZ;
 
grace_period_expire = set_grace_period();
 
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away, and we've been sent a
* signal, or else another process has taken over our job.
*/
while ((nlmsvc_users || !signalled()) && nlmsvc_pid == current->pid)
{
long timeout = MAX_SCHEDULE_TIMEOUT;
if (signalled()) {
spin_lock_irq(&current->sigmask_lock);
flush_signals(current);
spin_unlock_irq(&current->sigmask_lock);
if (nlmsvc_ops) {
nlmsvc_ops->detach();
grace_period_expire = set_grace_period();
}
}
 
/*
* Retry any blocked locks that have been notified by
* the VFS. Don't do this during grace period.
* (Theoretically, there shouldn't even be blocked locks
* during grace period).
*/
if (!nlmsvc_grace_period)
timeout = nlmsvc_retry_blocked();
 
/*
* Find a socket with data available and call its
* recvfrom routine.
*/
err = svc_recv(serv, rqstp, timeout);
if (err == -EAGAIN || err == -EINTR)
continue;
if (err < 0) {
printk(KERN_WARNING
"lockd: terminating on error %d\n",
-err);
break;
}
 
dprintk("lockd: request from %08x\n",
(unsigned)ntohl(rqstp->rq_addr.sin_addr.s_addr));
 
/*
* Look up the NFS client handle. The handle is needed for
* all but the GRANTED callback RPCs.
*/
rqstp->rq_client = NULL;
if (nlmsvc_ops) {
nlmsvc_ops->exp_readlock();
rqstp->rq_client =
nlmsvc_ops->exp_getclient(&rqstp->rq_addr);
}
 
if (nlmsvc_grace_period &&
time_before(grace_period_expire, jiffies))
nlmsvc_grace_period = 0;
svc_process(serv, rqstp);
 
/* Unlock export hash tables */
if (nlmsvc_ops)
nlmsvc_ops->exp_unlock();
}
 
/*
* Check whether there's a new lockd process before
* shutting down the hosts and clearing the slot.
*/
if (!nlmsvc_pid || current->pid == nlmsvc_pid) {
if (nlmsvc_ops)
nlmsvc_ops->detach();
nlm_shutdown_hosts();
nlmsvc_pid = 0;
} else
printk(KERN_DEBUG
"lockd: new process, skipping host shutdown\n");
wake_up(&lockd_exit);
/* Exit the RPC thread */
svc_exit_thread(rqstp);
 
/* release rpciod */
rpciod_down();
 
/* Release module */
MOD_DEC_USE_COUNT;
}
 
/*
* Bring up the lockd process if it's not already up.
*/
int
lockd_up(void)
{
static int warned = 0;
struct svc_serv * serv;
int error = 0;
 
down(&nlmsvc_sema);
/*
* Unconditionally increment the user count ... this is
* the number of clients who _want_ a lockd process.
*/
nlmsvc_users++;
/*
* Check whether we're already up and running.
*/
if (nlmsvc_pid)
goto out;
 
/*
* Sanity check: if there's no pid,
* we should be the first user ...
*/
if (nlmsvc_users > 1)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
 
error = -ENOMEM;
serv = svc_create(&nlmsvc_program, 0, NLMSVC_XDRSIZE);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
goto out;
}
 
if ((error = svc_makesock(serv, IPPROTO_UDP, nlm_udpport)) < 0
#ifdef CONFIG_NFSD_TCP
|| (error = svc_makesock(serv, IPPROTO_TCP, nlm_tcpport)) < 0
#endif
) {
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", error);
goto destroy_and_out;
}
warned = 0;
 
/*
* Create the kernel thread and wait for it to start.
*/
error = svc_create_thread(lockd, serv);
if (error) {
printk(KERN_WARNING
"lockd_up: create thread failed, error=%d\n", error);
goto destroy_and_out;
}
down(&lockd_start);
 
/*
* Note: svc_serv structures have an initial use count of 1,
* so we exit through here on both success and failure.
*/
destroy_and_out:
svc_destroy(serv);
out:
up(&nlmsvc_sema);
return error;
}
 
/*
* Decrement the user count and bring down lockd if we're the last.
*/
void
lockd_down(void)
{
static int warned = 0;
 
down(&nlmsvc_sema);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
} else
printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid);
 
if (!nlmsvc_pid) {
if (warned++ == 0)
printk(KERN_WARNING "lockd_down: no lockd running.\n");
goto out;
}
warned = 0;
 
kill_proc(nlmsvc_pid, SIGKILL, 1);
/*
* Wait for the lockd process to exit, but since we're holding
* the lockd semaphore, we can't wait around forever ...
*/
current->sigpending = 0;
interruptible_sleep_on_timeout(&lockd_exit, HZ);
if (nlmsvc_pid) {
printk(KERN_WARNING
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(&current->sigmask_lock);
recalc_sigpending(current);
spin_unlock_irq(&current->sigmask_lock);
out:
up(&nlmsvc_sema);
}
 
#ifdef MODULE
/* New module support in 2.1.18 */
 
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
MODULE_DESCRIPTION("NFS file locking service version " LOCKD_VERSION ".");
MODULE_LICENSE("GPL");
MODULE_PARM(nlm_grace_period, "10-240l");
MODULE_PARM(nlm_timeout, "3-20l");
MODULE_PARM(nlm_udpport, "0-65535l");
MODULE_PARM(nlm_tcpport, "0-65535l");
 
int
init_module(void)
{
/* Init the static variables */
init_MUTEX(&nlmsvc_sema);
nlmsvc_users = 0;
nlmsvc_pid = 0;
return 0;
}
 
void
cleanup_module(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
}
#else
/* not a module, so process bootargs
* lockd.udpport and lockd.tcpport
*/
 
static int __init udpport_set(char *str)
{
nlm_udpport = simple_strtoul(str, NULL, 0);
return 1;
}
static int __init tcpport_set(char *str)
{
nlm_tcpport = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("lockd.udpport=", udpport_set);
__setup("lockd.tcpport=", tcpport_set);
 
#endif
 
/*
* Define NLM program and procedures
*/
static struct svc_version nlmsvc_version1 = {
1, 17, nlmsvc_procedures, NULL
};
static struct svc_version nlmsvc_version3 = {
3, 24, nlmsvc_procedures, NULL
};
#ifdef CONFIG_LOCKD_V4
static struct svc_version nlmsvc_version4 = {
4, 24, nlmsvc_procedures4, NULL
};
#endif
static struct svc_version * nlmsvc_version[] = {
NULL,
&nlmsvc_version1,
NULL,
&nlmsvc_version3,
#ifdef CONFIG_LOCKD_V4
&nlmsvc_version4,
#endif
};
 
static struct svc_stat nlmsvc_stats;
 
#define NLM_NRVERS (sizeof(nlmsvc_version)/sizeof(nlmsvc_version[0]))
struct svc_program nlmsvc_program = {
NLM_PROGRAM, /* program number */
1, NLM_NRVERS-1, /* version range */
NLM_NRVERS, /* number of entries in nlmsvc_version */
nlmsvc_version, /* version table */
"lockd", /* service name */
&nlmsvc_stats, /* stats table */
};
/Makefile
0,0 → 1,21
#
# Makefile for the linux lock manager stuff
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definitions are now in the main makefile...
 
O_TARGET := lockd.o
 
export-objs := lockd_syms.o
 
obj-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \
svcproc.o svcsubs.o mon.o xdr.o lockd_syms.o
 
obj-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o
 
obj-m := $(O_TARGET)
 
include $(TOPDIR)/Rules.make
/xdr.c
0,0 → 1,674
/*
* linux/fs/lockd/xdr.c
*
* XDR support for lockd and the lock client.
*
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/utsname.h>
#include <linux/nfs.h>
 
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/stats.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/sm_inter.h>
 
#define NLMDBG_FACILITY NLMDBG_XDR
 
 
static inline loff_t
s32_to_loff_t(__s32 offset)
{
return (loff_t)offset;
}
 
static inline __s32
loff_t_to_s32(loff_t offset)
{
__s32 res;
if (offset >= NLM_OFFSET_MAX)
res = NLM_OFFSET_MAX;
else if (offset <= -NLM_OFFSET_MAX)
res = -NLM_OFFSET_MAX;
else
res = offset;
return res;
}
 
/*
* XDR functions for basic NLM types
*/
static inline u32 *nlm_decode_cookie(u32 *p, struct nlm_cookie *c)
{
unsigned int len;
 
len = ntohl(*p++);
if(len==0)
{
c->len=4;
memset(c->data, 0, 4); /* hockeypux brain damage */
}
else if(len<=NLM_MAXCOOKIELEN)
{
c->len=len;
memcpy(c->data, p, len);
p+=(len+3)>>2;
}
else
{
printk(KERN_NOTICE
"lockd: bad cookie size %d (only cookies under %d bytes are supported.)\n", len, NLM_MAXCOOKIELEN);
return NULL;
}
return p;
}
 
static inline u32 *
nlm_encode_cookie(u32 *p, struct nlm_cookie *c)
{
*p++ = htonl(c->len);
memcpy(p, c->data, c->len);
p+=(c->len+3)>>2;
return p;
}
 
static inline u32 *
nlm_decode_fh(u32 *p, struct nfs_fh *f)
{
unsigned int len;
 
if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
printk(KERN_NOTICE
"lockd: bad fhandle size %d (should be %d)\n",
len, NFS2_FHSIZE);
return NULL;
}
f->size = NFS2_FHSIZE;
memset(f->data, 0, sizeof(f->data));
memcpy(f->data, p, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
 
static inline u32 *
nlm_encode_fh(u32 *p, struct nfs_fh *f)
{
*p++ = htonl(NFS2_FHSIZE);
memcpy(p, f->data, NFS2_FHSIZE);
return p + XDR_QUADLEN(NFS2_FHSIZE);
}
 
/*
* Encode and decode owner handle
*/
static inline u32 *
nlm_decode_oh(u32 *p, struct xdr_netobj *oh)
{
return xdr_decode_netobj(p, oh);
}
 
static inline u32 *
nlm_encode_oh(u32 *p, struct xdr_netobj *oh)
{
return xdr_encode_netobj(p, oh);
}
 
static inline u32 *
nlm_decode_lock(u32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
s32 start, len, end;
 
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len,
NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return NULL;
 
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = ntohl(*p++);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
start = ntohl(*p++);
len = ntohl(*p++);
end = start + len - 1;
 
fl->fl_start = s32_to_loff_t(start);
 
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s32_to_loff_t(end);
return p;
}
 
/*
* Encode a lock as part of an NLM call
*/
static u32 *
nlm_encode_lock(u32 *p, struct nlm_lock *lock)
{
struct file_lock *fl = &lock->fl;
__s32 start, len;
 
if (!(p = xdr_encode_string(p, lock->caller))
|| !(p = nlm_encode_fh(p, &lock->fh))
|| !(p = nlm_encode_oh(p, &lock->oh)))
return NULL;
 
if (fl->fl_start > NLM_OFFSET_MAX
|| (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
return NULL;
 
start = loff_t_to_s32(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
len = 0;
else
len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
 
*p++ = htonl(fl->fl_pid);
*p++ = htonl(start);
*p++ = htonl(len);
 
return p;
}
 
/*
* Encode result of a TEST/TEST_MSG call
*/
static u32 *
nlm_encode_testres(u32 *p, struct nlm_res *resp)
{
s32 start, len;
 
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
 
if (resp->status == nlm_lck_denied) {
struct file_lock *fl = &resp->lock.fl;
 
*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
*p++ = htonl(fl->fl_pid);
 
/* Encode owner handle. */
if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
return 0;
 
start = loff_t_to_s32(fl->fl_start);
if (fl->fl_end == OFFSET_MAX)
len = 0;
else
len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
 
*p++ = htonl(start);
*p++ = htonl(len);
}
 
return p;
}
 
/*
* Check buffer bounds after decoding arguments
*/
static inline int
xdr_argsize_check(struct svc_rqst *rqstp, u32 *p)
{
struct svc_buf *buf = &rqstp->rq_argbuf;
 
return p - buf->base <= buf->buflen;
}
 
static inline int
xdr_ressize_check(struct svc_rqst *rqstp, u32 *p)
{
struct svc_buf *buf = &rqstp->rq_resbuf;
 
buf->len = p - buf->base;
return (buf->len <= buf->buflen);
}
 
/*
* First, the server side XDR functions
*/
int
nlmsvc_decode_testargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
u32 exclusive;
 
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
 
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
 
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_encode_testres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_testres(p, resp)))
return 0;
return xdr_ressize_check(rqstp, p);
}
 
int
nlmsvc_decode_lockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
u32 exclusive;
 
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
argp->reclaim = ntohl(*p++);
argp->state = ntohl(*p++);
argp->monitor = 1; /* monitor client by default */
 
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_decode_cancargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
u32 exclusive;
 
if (!(p = nlm_decode_cookie(p, &argp->cookie)))
return 0;
argp->block = ntohl(*p++);
exclusive = ntohl(*p++);
if (!(p = nlm_decode_lock(p, &argp->lock)))
return 0;
if (exclusive)
argp->lock.fl.fl_type = F_WRLCK;
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = nlm_decode_lock(p, &argp->lock)))
return 0;
argp->lock.fl.fl_type = F_UNLCK;
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_decode_shareargs(struct svc_rqst *rqstp, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
memset(lock, 0, sizeof(*lock));
locks_init_lock(&lock->fl);
lock->fl.fl_pid = ~(u32) 0;
 
if (!(p = nlm_decode_cookie(p, &argp->cookie))
|| !(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN))
|| !(p = nlm_decode_fh(p, &lock->fh))
|| !(p = nlm_decode_oh(p, &lock->oh)))
return 0;
argp->fsm_mode = ntohl(*p++);
argp->fsm_access = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_encode_shareres(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
*p++ = xdr_zero; /* sequence argument */
return xdr_ressize_check(rqstp, p);
}
 
int
nlmsvc_encode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return 0;
*p++ = resp->status;
return xdr_ressize_check(rqstp, p);
}
 
int
nlmsvc_decode_notify(struct svc_rqst *rqstp, u32 *p, struct nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = xdr_decode_string_inplace(p, &lock->caller,
&lock->len, NLM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
{
if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
return 0;
argp->state = ntohl(*p++);
/* Preserve the address in network byte order */
argp->addr = *p++;
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_decode_res(struct svc_rqst *rqstp, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_decode_cookie(p, &resp->cookie)))
return 0;
resp->status = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_decode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
{
return xdr_argsize_check(rqstp, p);
}
 
int
nlmsvc_encode_void(struct svc_rqst *rqstp, u32 *p, void *dummy)
{
return xdr_ressize_check(rqstp, p);
}
 
/*
* Now, the client side XDR functions
*/
static int
nlmclt_encode_void(struct rpc_rqst *req, u32 *p, void *ptr)
{
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
{
return 0;
}
 
static int
nlmclt_encode_testargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
if (!(p = nlm_encode_lock(p, lock)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlmclt_decode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_decode_cookie(p, &resp->cookie)))
return -EIO;
resp->status = ntohl(*p++);
if (resp->status == NLM_LCK_DENIED) {
struct file_lock *fl = &resp->lock.fl;
u32 excl;
s32 start, len, end;
 
memset(&resp->lock, 0, sizeof(resp->lock));
locks_init_lock(fl);
excl = ntohl(*p++);
fl->fl_pid = ntohl(*p++);
if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
return -EIO;
 
fl->fl_flags = FL_POSIX;
fl->fl_type = excl? F_WRLCK : F_RDLCK;
start = ntohl(*p++);
len = ntohl(*p++);
end = start + len - 1;
 
fl->fl_start = s32_to_loff_t(start);
if (len == 0 || end < 0)
fl->fl_end = OFFSET_MAX;
else
fl->fl_end = s32_to_loff_t(end);
}
return 0;
}
 
 
static int
nlmclt_encode_lockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = argp->block? xdr_one : xdr_zero;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
if (!(p = nlm_encode_lock(p, lock)))
return -EIO;
*p++ = argp->reclaim? xdr_one : xdr_zero;
*p++ = htonl(argp->state);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlmclt_encode_cancargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
*p++ = argp->block? xdr_one : xdr_zero;
*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
if (!(p = nlm_encode_lock(p, lock)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlmclt_encode_unlockargs(struct rpc_rqst *req, u32 *p, nlm_args *argp)
{
struct nlm_lock *lock = &argp->lock;
 
if (!(p = nlm_encode_cookie(p, &argp->cookie)))
return -EIO;
if (!(p = nlm_encode_lock(p, lock)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlmclt_encode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_cookie(p, &resp->cookie)))
return -EIO;
*p++ = resp->status;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlmclt_encode_testres(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_encode_testres(p, resp)))
return -EIO;
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
 
static int
nlmclt_decode_res(struct rpc_rqst *req, u32 *p, struct nlm_res *resp)
{
if (!(p = nlm_decode_cookie(p, &resp->cookie)))
return -EIO;
resp->status = ntohl(*p++);
return 0;
}
 
/*
* Buffer requirements for NLM
*/
#define NLM_void_sz 0
#define NLM_cookie_sz 1+QUADLEN(NLM_MAXCOOKIELEN)
#define NLM_caller_sz 1+QUADLEN(sizeof(system_utsname.nodename))
#define NLM_netobj_sz 1+QUADLEN(XDR_MAX_NETOBJ)
/* #define NLM_owner_sz 1+QUADLEN(NLM_MAXOWNER) */
#define NLM_fhandle_sz 1+QUADLEN(NFS2_FHSIZE)
#define NLM_lock_sz 3+NLM_caller_sz+NLM_netobj_sz+NLM_fhandle_sz
#define NLM_holder_sz 4+NLM_netobj_sz
 
#define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz
#define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz
#define NLM_cancargs_sz NLM_cookie_sz+2+NLM_lock_sz
#define NLM_unlockargs_sz NLM_cookie_sz+NLM_lock_sz
 
#define NLM_testres_sz NLM_cookie_sz+1+NLM_holder_sz
#define NLM_res_sz NLM_cookie_sz+1
#define NLM_norep_sz 0
 
#ifndef MAX
# define MAX(a, b) (((a) > (b))? (a) : (b))
#endif
 
/*
* For NLM, a void procedure really returns nothing
*/
#define nlmclt_decode_norep NULL
 
#define PROC(proc, argtype, restype) \
{ .p_procname = "nlm_" #proc, \
.p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \
.p_decode = (kxdrproc_t) nlmclt_decode_##restype, \
.p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2 \
}
 
static struct rpc_procinfo nlm_procedures[] = {
PROC(null, void, void),
PROC(test, testargs, testres),
PROC(lock, lockargs, res),
PROC(canc, cancargs, res),
PROC(unlock, unlockargs, res),
PROC(granted, testargs, res),
PROC(test_msg, testargs, norep),
PROC(lock_msg, lockargs, norep),
PROC(canc_msg, cancargs, norep),
PROC(unlock_msg, unlockargs, norep),
PROC(granted_msg, testargs, norep),
PROC(test_res, testres, norep),
PROC(lock_res, res, norep),
PROC(canc_res, res, norep),
PROC(unlock_res, res, norep),
PROC(granted_res, res, norep),
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
#ifdef NLMCLNT_SUPPORT_SHARES
PROC(share, shareargs, shareres),
PROC(unshare, shareargs, shareres),
PROC(nm_lock, lockargs, res),
PROC(free_all, notify, void),
#else
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
PROC(undef, void, void),
#endif
};
 
static struct rpc_version nlm_version1 = {
1, 16, nlm_procedures,
};
 
static struct rpc_version nlm_version3 = {
3, 24, nlm_procedures,
};
 
#ifdef CONFIG_LOCKD_V4
extern struct rpc_version nlm_version4;
#endif
 
static struct rpc_version * nlm_versions[] = {
NULL,
&nlm_version1,
NULL,
&nlm_version3,
#ifdef CONFIG_LOCKD_V4
&nlm_version4,
#endif
};
 
static struct rpc_stat nlm_stats;
 
struct rpc_program nlm_program = {
"lockd",
NLM_PROGRAM,
sizeof(nlm_versions) / sizeof(nlm_versions[0]),
nlm_versions,
&nlm_stats,
};
 
#ifdef LOCKD_DEBUG
char *
nlm_procname(u32 proc)
{
if (proc < sizeof(nlm_procedures)/sizeof(nlm_procedures[0]))
return nlm_procedures[proc].p_procname;
return "unknown";
}
#endif
 
#ifdef RPC_DEBUG
const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
{
/*
* We can get away with a static buffer because we're only
* called with BKL held.
*/
static char buf[2*NLM_MAXCOOKIELEN+1];
int i;
int len = sizeof(buf);
char *p = buf;
 
len--; /* allow for trailing \0 */
if (len < 3)
return "???";
for (i = 0 ; i < cookie->len ; i++) {
if (len < 2) {
strcpy(p-3, "...");
break;
}
sprintf(p, "%02x", cookie->data[i]);
p += 2;
len -= 2;
}
*p = '\0';
 
return buf;
}
#endif
/host.c
0,0 → 1,343
/*
* linux/fs/lockd/host.c
*
* Management for NLM peer hosts. The nlm_host struct is shared
* between client and server implementation. The only reason to
* do so is to reduce code bloat.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
 
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/sm_inter.h>
 
 
#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
#define NLM_HOST_MAX 64
#define NLM_HOST_NRHASH 32
#define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
#define NLM_PTRHASH(ptr) ((((u32)(unsigned long) ptr) / 32) & (NLM_HOST_NRHASH-1))
#define NLM_HOST_REBIND (60 * HZ)
#define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
#define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
#define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr)
 
static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
static unsigned long next_gc;
static int nrhosts;
static DECLARE_MUTEX(nlm_host_sema);
 
 
static void nlm_gc_hosts(void);
 
/*
* Find an NLM server handle in the cache. If there is none, create it.
*/
struct nlm_host *
nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
{
return nlm_lookup_host(NULL, sin, proto, version);
}
 
/*
* Find an NLM client handle in the cache. If there is none, create it.
*/
struct nlm_host *
nlmsvc_lookup_host(struct svc_rqst *rqstp)
{
return nlm_lookup_host(rqstp->rq_client, &rqstp->rq_addr,
rqstp->rq_prot, rqstp->rq_vers);
}
 
/*
* Match the given host against client/address
*/
static inline int
nlm_match_host(struct nlm_host *host, struct svc_client *clnt,
struct sockaddr_in *sin)
{
if (clnt)
return host->h_exportent == clnt;
return nlm_cmp_addr(&host->h_addr, sin);
}
 
/*
* Common host lookup routine for server & client
*/
struct nlm_host *
nlm_lookup_host(struct svc_client *clnt, struct sockaddr_in *sin,
int proto, int version)
{
struct nlm_host *host, **hp;
u32 addr;
int hash;
 
if (!clnt && !sin) {
printk(KERN_NOTICE "lockd: no clnt or addr in lookup_host!\n");
return NULL;
}
 
dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
(unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
 
if (clnt)
hash = NLM_PTRHASH(clnt);
else
hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
 
/* Lock hash table */
down(&nlm_host_sema);
 
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
 
for (hp = &nlm_hosts[hash]; (host = *hp); hp = &host->h_next) {
if (proto && host->h_proto != proto)
continue;
if (version && host->h_version != version)
continue;
 
if (nlm_match_host(host, clnt, sin)) {
if (hp != nlm_hosts + hash) {
*hp = host->h_next;
host->h_next = nlm_hosts[hash];
nlm_hosts[hash] = host;
}
nlm_get_host(host);
up(&nlm_host_sema);
return host;
}
}
 
/* special hack for nlmsvc_invalidate_client */
if (sin == NULL)
goto nohost;
 
/* Ooops, no host found, create it */
dprintk("lockd: creating host entry\n");
 
if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
goto nohost;
memset(host, 0, sizeof(*host));
 
addr = sin->sin_addr.s_addr;
sprintf(host->h_name, "%d.%d.%d.%d",
(unsigned char) (ntohl(addr) >> 24),
(unsigned char) (ntohl(addr) >> 16),
(unsigned char) (ntohl(addr) >> 8),
(unsigned char) (ntohl(addr) >> 0));
 
host->h_addr = *sin;
host->h_addr.sin_port = 0; /* ouch! */
host->h_version = version;
host->h_proto = proto;
host->h_authflavor = RPC_AUTH_UNIX;
host->h_rpcclnt = NULL;
init_MUTEX(&host->h_sema);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
host->h_expires = jiffies + NLM_HOST_EXPIRE;
host->h_count = 1;
init_waitqueue_head(&host->h_gracewait);
host->h_state = 0; /* pseudo NSM state */
host->h_nsmstate = 0; /* real NSM state */
host->h_exportent = clnt;
 
host->h_next = nlm_hosts[hash];
nlm_hosts[hash] = host;
 
if (++nrhosts > NLM_HOST_MAX)
next_gc = 0;
 
nohost:
up(&nlm_host_sema);
return host;
}
 
/*
* Create the NLM RPC client for an NLM peer
*/
struct rpc_clnt *
nlm_bind_host(struct nlm_host *host)
{
struct rpc_clnt *clnt;
struct rpc_xprt *xprt;
 
dprintk("lockd: nlm_bind_host(%08x)\n",
(unsigned)ntohl(host->h_addr.sin_addr.s_addr));
 
/* Lock host handle */
down(&host->h_sema);
 
/* If we've already created an RPC client, check whether
* RPC rebind is required
* Note: why keep rebinding if we're on a tcp connection?
*/
if ((clnt = host->h_rpcclnt) != NULL) {
xprt = clnt->cl_xprt;
if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) {
clnt->cl_port = 0;
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
dprintk("lockd: next rebind in %ld jiffies\n",
host->h_nextrebind - jiffies);
}
} else {
xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
if (xprt == NULL)
goto forgetit;
 
xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
 
clnt = rpc_create_client(xprt, host->h_name, &nlm_program,
host->h_version, host->h_authflavor);
if (clnt == NULL) {
xprt_destroy(xprt);
goto forgetit;
}
clnt->cl_autobind = 1; /* turn on pmap queries */
xprt->nocong = 1; /* No congestion control for NLM */
xprt->resvport = 1; /* NLM requires a reserved port */
 
host->h_rpcclnt = clnt;
}
 
up(&host->h_sema);
return clnt;
 
forgetit:
printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
up(&host->h_sema);
return NULL;
}
 
/*
* Force a portmap lookup of the remote lockd port
*/
void
nlm_rebind_host(struct nlm_host *host)
{
dprintk("lockd: rebind host %s\n", host->h_name);
if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
host->h_rpcclnt->cl_port = 0;
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
}
}
 
/*
* Increment NLM host count
*/
struct nlm_host * nlm_get_host(struct nlm_host *host)
{
if (host) {
dprintk("lockd: get host %s\n", host->h_name);
host->h_count ++;
host->h_expires = jiffies + NLM_HOST_EXPIRE;
}
return host;
}
 
/*
* Release NLM host after use
*/
void nlm_release_host(struct nlm_host *host)
{
if (host && host->h_count) {
dprintk("lockd: release host %s\n", host->h_name);
host->h_count --;
}
}
 
/*
* Shut down the hosts module.
* Note that this routine is called only at server shutdown time.
*/
void
nlm_shutdown_hosts(void)
{
struct nlm_host *host;
int i;
 
dprintk("lockd: shutting down host module\n");
down(&nlm_host_sema);
 
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
for (i = 0; i < NLM_HOST_NRHASH; i++) {
for (host = nlm_hosts[i]; host; host = host->h_next)
host->h_expires = 0;
}
 
/* Then, perform a garbage collection pass */
nlm_gc_hosts();
up(&nlm_host_sema);
 
/* complain if any hosts are left */
if (nrhosts) {
printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
dprintk("lockd: %d hosts left:\n", nrhosts);
for (i = 0; i < NLM_HOST_NRHASH; i++) {
for (host = nlm_hosts[i]; host; host = host->h_next) {
dprintk(" %s (cnt %d use %d exp %ld)\n",
host->h_name, host->h_count,
host->h_inuse, host->h_expires);
}
}
}
}
 
/*
* Garbage collect any unused NLM hosts.
* This GC combines reference counting for async operations with
* mark & sweep for resources held by remote clients.
*/
static void
nlm_gc_hosts(void)
{
struct nlm_host **q, *host;
struct rpc_clnt *clnt;
int i;
 
dprintk("lockd: host garbage collection\n");
for (i = 0; i < NLM_HOST_NRHASH; i++) {
for (host = nlm_hosts[i]; host; host = host->h_next)
host->h_inuse = 0;
}
 
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources();
 
for (i = 0; i < NLM_HOST_NRHASH; i++) {
q = &nlm_hosts[i];
while ((host = *q) != NULL) {
if (host->h_count || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
q = &host->h_next;
continue;
}
dprintk("lockd: delete host %s\n", host->h_name);
*q = host->h_next;
/* Don't unmonitor hosts that have been invalidated */
if (host->h_monitored && !host->h_killed)
nsm_unmonitor(host);
if ((clnt = host->h_rpcclnt) != NULL) {
if (atomic_read(&clnt->cl_users)) {
printk(KERN_WARNING
"lockd: active RPC handle\n");
clnt->cl_dead = 1;
} else {
rpc_destroy_client(host->h_rpcclnt);
}
}
kfree(host);
nrhosts--;
}
}
 
next_gc = jiffies + NLM_HOST_COLLECT;
}
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.