OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [tags/] [linux-2.6/] [linux-2.6.24_or32_unified_v2.3/] [net/] [sunrpc/] [auth_gss/] [auth_gss.c] - Blame information for rev 8

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * linux/net/sunrpc/auth_gss/auth_gss.c
3
 *
4
 * RPCSEC_GSS client authentication.
5
 *
6
 *  Copyright (c) 2000 The Regents of the University of Michigan.
7
 *  All rights reserved.
8
 *
9
 *  Dug Song       <dugsong@monkey.org>
10
 *  Andy Adamson   <andros@umich.edu>
11
 *
12
 *  Redistribution and use in source and binary forms, with or without
13
 *  modification, are permitted provided that the following conditions
14
 *  are met:
15
 *
16
 *  1. Redistributions of source code must retain the above copyright
17
 *     notice, this list of conditions and the following disclaimer.
18
 *  2. Redistributions in binary form must reproduce the above copyright
19
 *     notice, this list of conditions and the following disclaimer in the
20
 *     documentation and/or other materials provided with the distribution.
21
 *  3. Neither the name of the University nor the names of its
22
 *     contributors may be used to endorse or promote products derived
23
 *     from this software without specific prior written permission.
24
 *
25
 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26
 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27
 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28
 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29
 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30
 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31
 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32
 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33
 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34
 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35
 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36
 *
37
 * $Id$
38
 */
39
 
40
 
41
#include <linux/module.h>
42
#include <linux/init.h>
43
#include <linux/types.h>
44
#include <linux/slab.h>
45
#include <linux/sched.h>
46
#include <linux/pagemap.h>
47
#include <linux/sunrpc/clnt.h>
48
#include <linux/sunrpc/auth.h>
49
#include <linux/sunrpc/auth_gss.h>
50
#include <linux/sunrpc/svcauth_gss.h>
51
#include <linux/sunrpc/gss_err.h>
52
#include <linux/workqueue.h>
53
#include <linux/sunrpc/rpc_pipe_fs.h>
54
#include <linux/sunrpc/gss_api.h>
55
#include <asm/uaccess.h>
56
 
57
static const struct rpc_authops authgss_ops;
58
 
59
static const struct rpc_credops gss_credops;
60
static const struct rpc_credops gss_nullops;
61
 
62
#ifdef RPC_DEBUG
63
# define RPCDBG_FACILITY        RPCDBG_AUTH
64
#endif
65
 
66
#define NFS_NGROUPS     16
67
 
68
#define GSS_CRED_SLACK          1024            /* XXX: unused */
69
/* length of a krb5 verifier (48), plus data added before arguments when
70
 * using integrity (two 4-byte integers): */
71
#define GSS_VERF_SLACK          100
72
 
73
/* XXX this define must match the gssd define
74
* as it is passed to gssd to signal the use of
75
* machine creds should be part of the shared rpc interface */
76
 
77
#define CA_RUN_AS_MACHINE  0x00000200
78
 
79
/* dump the buffer in `emacs-hexl' style */
80
#define isprint(c)      ((c > 0x1f) && (c < 0x7f))
81
 
82
struct gss_auth {
83
        struct kref kref;
84
        struct rpc_auth rpc_auth;
85
        struct gss_api_mech *mech;
86
        enum rpc_gss_svc service;
87
        struct rpc_clnt *client;
88
        struct dentry *dentry;
89
};
90
 
91
static void gss_free_ctx(struct gss_cl_ctx *);
92
static struct rpc_pipe_ops gss_upcall_ops;
93
 
94
static inline struct gss_cl_ctx *
95
gss_get_ctx(struct gss_cl_ctx *ctx)
96
{
97
        atomic_inc(&ctx->count);
98
        return ctx;
99
}
100
 
101
static inline void
102
gss_put_ctx(struct gss_cl_ctx *ctx)
103
{
104
        if (atomic_dec_and_test(&ctx->count))
105
                gss_free_ctx(ctx);
106
}
107
 
108
/* gss_cred_set_ctx:
109
 * called by gss_upcall_callback and gss_create_upcall in order
110
 * to set the gss context. The actual exchange of an old context
111
 * and a new one is protected by the inode->i_lock.
112
 */
113
static void
114
gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
115
{
116
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
117
        struct gss_cl_ctx *old;
118
 
119
        old = gss_cred->gc_ctx;
120
        rcu_assign_pointer(gss_cred->gc_ctx, ctx);
121
        set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
122
        clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
123
        if (old)
124
                gss_put_ctx(old);
125
}
126
 
127
static int
128
gss_cred_is_uptodate_ctx(struct rpc_cred *cred)
129
{
130
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
131
        int res = 0;
132
 
133
        rcu_read_lock();
134
        if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx)
135
                res = 1;
136
        rcu_read_unlock();
137
        return res;
138
}
139
 
140
static const void *
141
simple_get_bytes(const void *p, const void *end, void *res, size_t len)
142
{
143
        const void *q = (const void *)((const char *)p + len);
144
        if (unlikely(q > end || q < p))
145
                return ERR_PTR(-EFAULT);
146
        memcpy(res, p, len);
147
        return q;
148
}
149
 
150
static inline const void *
151
simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
152
{
153
        const void *q;
154
        unsigned int len;
155
 
156
        p = simple_get_bytes(p, end, &len, sizeof(len));
157
        if (IS_ERR(p))
158
                return p;
159
        q = (const void *)((const char *)p + len);
160
        if (unlikely(q > end || q < p))
161
                return ERR_PTR(-EFAULT);
162
        dest->data = kmemdup(p, len, GFP_KERNEL);
163
        if (unlikely(dest->data == NULL))
164
                return ERR_PTR(-ENOMEM);
165
        dest->len = len;
166
        return q;
167
}
168
 
169
static struct gss_cl_ctx *
170
gss_cred_get_ctx(struct rpc_cred *cred)
171
{
172
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
173
        struct gss_cl_ctx *ctx = NULL;
174
 
175
        rcu_read_lock();
176
        if (gss_cred->gc_ctx)
177
                ctx = gss_get_ctx(gss_cred->gc_ctx);
178
        rcu_read_unlock();
179
        return ctx;
180
}
181
 
182
static struct gss_cl_ctx *
183
gss_alloc_context(void)
184
{
185
        struct gss_cl_ctx *ctx;
186
 
187
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
188
        if (ctx != NULL) {
189
                ctx->gc_proc = RPC_GSS_PROC_DATA;
190
                ctx->gc_seq = 1;        /* NetApp 6.4R1 doesn't accept seq. no. 0 */
191
                spin_lock_init(&ctx->gc_seq_lock);
192
                atomic_set(&ctx->count,1);
193
        }
194
        return ctx;
195
}
196
 
197
#define GSSD_MIN_TIMEOUT (60 * 60)
198
static const void *
199
gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
200
{
201
        const void *q;
202
        unsigned int seclen;
203
        unsigned int timeout;
204
        u32 window_size;
205
        int ret;
206
 
207
        /* First unsigned int gives the lifetime (in seconds) of the cred */
208
        p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
209
        if (IS_ERR(p))
210
                goto err;
211
        if (timeout == 0)
212
                timeout = GSSD_MIN_TIMEOUT;
213
        ctx->gc_expiry = jiffies + (unsigned long)timeout * HZ * 3 / 4;
214
        /* Sequence number window. Determines the maximum number of simultaneous requests */
215
        p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
216
        if (IS_ERR(p))
217
                goto err;
218
        ctx->gc_win = window_size;
219
        /* gssd signals an error by passing ctx->gc_win = 0: */
220
        if (ctx->gc_win == 0) {
221
                /* in which case, p points to  an error code which we ignore */
222
                p = ERR_PTR(-EACCES);
223
                goto err;
224
        }
225
        /* copy the opaque wire context */
226
        p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
227
        if (IS_ERR(p))
228
                goto err;
229
        /* import the opaque security context */
230
        p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
231
        if (IS_ERR(p))
232
                goto err;
233
        q = (const void *)((const char *)p + seclen);
234
        if (unlikely(q > end || q < p)) {
235
                p = ERR_PTR(-EFAULT);
236
                goto err;
237
        }
238
        ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx);
239
        if (ret < 0) {
240
                p = ERR_PTR(ret);
241
                goto err;
242
        }
243
        return q;
244
err:
245
        dprintk("RPC:       gss_fill_context returning %ld\n", -PTR_ERR(p));
246
        return p;
247
}
248
 
249
 
250
struct gss_upcall_msg {
251
        atomic_t count;
252
        uid_t   uid;
253
        struct rpc_pipe_msg msg;
254
        struct list_head list;
255
        struct gss_auth *auth;
256
        struct rpc_wait_queue rpc_waitqueue;
257
        wait_queue_head_t waitqueue;
258
        struct gss_cl_ctx *ctx;
259
};
260
 
261
static void
262
gss_release_msg(struct gss_upcall_msg *gss_msg)
263
{
264
        if (!atomic_dec_and_test(&gss_msg->count))
265
                return;
266
        BUG_ON(!list_empty(&gss_msg->list));
267
        if (gss_msg->ctx != NULL)
268
                gss_put_ctx(gss_msg->ctx);
269
        kfree(gss_msg);
270
}
271
 
272
static struct gss_upcall_msg *
273
__gss_find_upcall(struct rpc_inode *rpci, uid_t uid)
274
{
275
        struct gss_upcall_msg *pos;
276
        list_for_each_entry(pos, &rpci->in_downcall, list) {
277
                if (pos->uid != uid)
278
                        continue;
279
                atomic_inc(&pos->count);
280
                dprintk("RPC:       gss_find_upcall found msg %p\n", pos);
281
                return pos;
282
        }
283
        dprintk("RPC:       gss_find_upcall found nothing\n");
284
        return NULL;
285
}
286
 
287
/* Try to add a upcall to the pipefs queue.
288
 * If an upcall owned by our uid already exists, then we return a reference
289
 * to that upcall instead of adding the new upcall.
290
 */
291
static inline struct gss_upcall_msg *
292
gss_add_msg(struct gss_auth *gss_auth, struct gss_upcall_msg *gss_msg)
293
{
294
        struct inode *inode = gss_auth->dentry->d_inode;
295
        struct rpc_inode *rpci = RPC_I(inode);
296
        struct gss_upcall_msg *old;
297
 
298
        spin_lock(&inode->i_lock);
299
        old = __gss_find_upcall(rpci, gss_msg->uid);
300
        if (old == NULL) {
301
                atomic_inc(&gss_msg->count);
302
                list_add(&gss_msg->list, &rpci->in_downcall);
303
        } else
304
                gss_msg = old;
305
        spin_unlock(&inode->i_lock);
306
        return gss_msg;
307
}
308
 
309
static void
310
__gss_unhash_msg(struct gss_upcall_msg *gss_msg)
311
{
312
        list_del_init(&gss_msg->list);
313
        rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
314
        wake_up_all(&gss_msg->waitqueue);
315
        atomic_dec(&gss_msg->count);
316
}
317
 
318
static void
319
gss_unhash_msg(struct gss_upcall_msg *gss_msg)
320
{
321
        struct gss_auth *gss_auth = gss_msg->auth;
322
        struct inode *inode = gss_auth->dentry->d_inode;
323
 
324
        if (list_empty(&gss_msg->list))
325
                return;
326
        spin_lock(&inode->i_lock);
327
        if (!list_empty(&gss_msg->list))
328
                __gss_unhash_msg(gss_msg);
329
        spin_unlock(&inode->i_lock);
330
}
331
 
332
static void
333
gss_upcall_callback(struct rpc_task *task)
334
{
335
        struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
336
                        struct gss_cred, gc_base);
337
        struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
338
        struct inode *inode = gss_msg->auth->dentry->d_inode;
339
 
340
        spin_lock(&inode->i_lock);
341
        if (gss_msg->ctx)
342
                gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx));
343
        else
344
                task->tk_status = gss_msg->msg.errno;
345
        gss_cred->gc_upcall = NULL;
346
        rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
347
        spin_unlock(&inode->i_lock);
348
        gss_release_msg(gss_msg);
349
}
350
 
351
static inline struct gss_upcall_msg *
352
gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
353
{
354
        struct gss_upcall_msg *gss_msg;
355
 
356
        gss_msg = kzalloc(sizeof(*gss_msg), GFP_KERNEL);
357
        if (gss_msg != NULL) {
358
                INIT_LIST_HEAD(&gss_msg->list);
359
                rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
360
                init_waitqueue_head(&gss_msg->waitqueue);
361
                atomic_set(&gss_msg->count, 1);
362
                gss_msg->msg.data = &gss_msg->uid;
363
                gss_msg->msg.len = sizeof(gss_msg->uid);
364
                gss_msg->uid = uid;
365
                gss_msg->auth = gss_auth;
366
        }
367
        return gss_msg;
368
}
369
 
370
static struct gss_upcall_msg *
371
gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
372
{
373
        struct gss_upcall_msg *gss_new, *gss_msg;
374
 
375
        gss_new = gss_alloc_msg(gss_auth, cred->cr_uid);
376
        if (gss_new == NULL)
377
                return ERR_PTR(-ENOMEM);
378
        gss_msg = gss_add_msg(gss_auth, gss_new);
379
        if (gss_msg == gss_new) {
380
                int res = rpc_queue_upcall(gss_auth->dentry->d_inode, &gss_new->msg);
381
                if (res) {
382
                        gss_unhash_msg(gss_new);
383
                        gss_msg = ERR_PTR(res);
384
                }
385
        } else
386
                gss_release_msg(gss_new);
387
        return gss_msg;
388
}
389
 
390
static inline int
391
gss_refresh_upcall(struct rpc_task *task)
392
{
393
        struct rpc_cred *cred = task->tk_msg.rpc_cred;
394
        struct gss_auth *gss_auth = container_of(cred->cr_auth,
395
                        struct gss_auth, rpc_auth);
396
        struct gss_cred *gss_cred = container_of(cred,
397
                        struct gss_cred, gc_base);
398
        struct gss_upcall_msg *gss_msg;
399
        struct inode *inode = gss_auth->dentry->d_inode;
400
        int err = 0;
401
 
402
        dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid,
403
                                                                cred->cr_uid);
404
        gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred);
405
        if (IS_ERR(gss_msg)) {
406
                err = PTR_ERR(gss_msg);
407
                goto out;
408
        }
409
        spin_lock(&inode->i_lock);
410
        if (gss_cred->gc_upcall != NULL)
411
                rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL);
412
        else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
413
                task->tk_timeout = 0;
414
                gss_cred->gc_upcall = gss_msg;
415
                /* gss_upcall_callback will release the reference to gss_upcall_msg */
416
                atomic_inc(&gss_msg->count);
417
                rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL);
418
        } else
419
                err = gss_msg->msg.errno;
420
        spin_unlock(&inode->i_lock);
421
        gss_release_msg(gss_msg);
422
out:
423
        dprintk("RPC: %5u gss_refresh_upcall for uid %u result %d\n",
424
                        task->tk_pid, cred->cr_uid, err);
425
        return err;
426
}
427
 
428
static inline int
429
gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
430
{
431
        struct inode *inode = gss_auth->dentry->d_inode;
432
        struct rpc_cred *cred = &gss_cred->gc_base;
433
        struct gss_upcall_msg *gss_msg;
434
        DEFINE_WAIT(wait);
435
        int err = 0;
436
 
437
        dprintk("RPC:       gss_upcall for uid %u\n", cred->cr_uid);
438
        gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
439
        if (IS_ERR(gss_msg)) {
440
                err = PTR_ERR(gss_msg);
441
                goto out;
442
        }
443
        for (;;) {
444
                prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE);
445
                spin_lock(&inode->i_lock);
446
                if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
447
                        break;
448
                }
449
                spin_unlock(&inode->i_lock);
450
                if (signalled()) {
451
                        err = -ERESTARTSYS;
452
                        goto out_intr;
453
                }
454
                schedule();
455
        }
456
        if (gss_msg->ctx)
457
                gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx));
458
        else
459
                err = gss_msg->msg.errno;
460
        spin_unlock(&inode->i_lock);
461
out_intr:
462
        finish_wait(&gss_msg->waitqueue, &wait);
463
        gss_release_msg(gss_msg);
464
out:
465
        dprintk("RPC:       gss_create_upcall for uid %u result %d\n",
466
                        cred->cr_uid, err);
467
        return err;
468
}
469
 
470
static ssize_t
471
gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
472
                char __user *dst, size_t buflen)
473
{
474
        char *data = (char *)msg->data + msg->copied;
475
        ssize_t mlen = msg->len;
476
        ssize_t left;
477
 
478
        if (mlen > buflen)
479
                mlen = buflen;
480
        left = copy_to_user(dst, data, mlen);
481
        if (left < 0) {
482
                msg->errno = left;
483
                return left;
484
        }
485
        mlen -= left;
486
        msg->copied += mlen;
487
        msg->errno = 0;
488
        return mlen;
489
}
490
 
491
#define MSG_BUF_MAXSIZE 1024
492
 
493
static ssize_t
494
gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
495
{
496
        const void *p, *end;
497
        void *buf;
498
        struct rpc_clnt *clnt;
499
        struct gss_upcall_msg *gss_msg;
500
        struct inode *inode = filp->f_path.dentry->d_inode;
501
        struct gss_cl_ctx *ctx;
502
        uid_t uid;
503
        ssize_t err = -EFBIG;
504
 
505
        if (mlen > MSG_BUF_MAXSIZE)
506
                goto out;
507
        err = -ENOMEM;
508
        buf = kmalloc(mlen, GFP_KERNEL);
509
        if (!buf)
510
                goto out;
511
 
512
        clnt = RPC_I(inode)->private;
513
        err = -EFAULT;
514
        if (copy_from_user(buf, src, mlen))
515
                goto err;
516
 
517
        end = (const void *)((char *)buf + mlen);
518
        p = simple_get_bytes(buf, end, &uid, sizeof(uid));
519
        if (IS_ERR(p)) {
520
                err = PTR_ERR(p);
521
                goto err;
522
        }
523
 
524
        err = -ENOMEM;
525
        ctx = gss_alloc_context();
526
        if (ctx == NULL)
527
                goto err;
528
 
529
        err = -ENOENT;
530
        /* Find a matching upcall */
531
        spin_lock(&inode->i_lock);
532
        gss_msg = __gss_find_upcall(RPC_I(inode), uid);
533
        if (gss_msg == NULL) {
534
                spin_unlock(&inode->i_lock);
535
                goto err_put_ctx;
536
        }
537
        list_del_init(&gss_msg->list);
538
        spin_unlock(&inode->i_lock);
539
 
540
        p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
541
        if (IS_ERR(p)) {
542
                err = PTR_ERR(p);
543
                gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES;
544
                goto err_release_msg;
545
        }
546
        gss_msg->ctx = gss_get_ctx(ctx);
547
        err = mlen;
548
 
549
err_release_msg:
550
        spin_lock(&inode->i_lock);
551
        __gss_unhash_msg(gss_msg);
552
        spin_unlock(&inode->i_lock);
553
        gss_release_msg(gss_msg);
554
err_put_ctx:
555
        gss_put_ctx(ctx);
556
err:
557
        kfree(buf);
558
out:
559
        dprintk("RPC:       gss_pipe_downcall returning %Zd\n", err);
560
        return err;
561
}
562
 
563
static void
564
gss_pipe_release(struct inode *inode)
565
{
566
        struct rpc_inode *rpci = RPC_I(inode);
567
        struct gss_upcall_msg *gss_msg;
568
 
569
        spin_lock(&inode->i_lock);
570
        while (!list_empty(&rpci->in_downcall)) {
571
 
572
                gss_msg = list_entry(rpci->in_downcall.next,
573
                                struct gss_upcall_msg, list);
574
                gss_msg->msg.errno = -EPIPE;
575
                atomic_inc(&gss_msg->count);
576
                __gss_unhash_msg(gss_msg);
577
                spin_unlock(&inode->i_lock);
578
                gss_release_msg(gss_msg);
579
                spin_lock(&inode->i_lock);
580
        }
581
        spin_unlock(&inode->i_lock);
582
}
583
 
584
static void
585
gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
586
{
587
        struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
588
        static unsigned long ratelimit;
589
 
590
        if (msg->errno < 0) {
591
                dprintk("RPC:       gss_pipe_destroy_msg releasing msg %p\n",
592
                                gss_msg);
593
                atomic_inc(&gss_msg->count);
594
                gss_unhash_msg(gss_msg);
595
                if (msg->errno == -ETIMEDOUT) {
596
                        unsigned long now = jiffies;
597
                        if (time_after(now, ratelimit)) {
598
                                printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n"
599
                                                    "Please check user daemon is running!\n");
600
                                ratelimit = now + 15*HZ;
601
                        }
602
                }
603
                gss_release_msg(gss_msg);
604
        }
605
}
606
 
607
/*
608
 * NOTE: we have the opportunity to use different
609
 * parameters based on the input flavor (which must be a pseudoflavor)
610
 */
611
static struct rpc_auth *
612
gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
613
{
614
        struct gss_auth *gss_auth;
615
        struct rpc_auth * auth;
616
        int err = -ENOMEM; /* XXX? */
617
 
618
        dprintk("RPC:       creating GSS authenticator for client %p\n", clnt);
619
 
620
        if (!try_module_get(THIS_MODULE))
621
                return ERR_PTR(err);
622
        if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
623
                goto out_dec;
624
        gss_auth->client = clnt;
625
        err = -EINVAL;
626
        gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
627
        if (!gss_auth->mech) {
628
                printk(KERN_WARNING "%s: Pseudoflavor %d not found!\n",
629
                                __FUNCTION__, flavor);
630
                goto err_free;
631
        }
632
        gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
633
        if (gss_auth->service == 0)
634
                goto err_put_mech;
635
        auth = &gss_auth->rpc_auth;
636
        auth->au_cslack = GSS_CRED_SLACK >> 2;
637
        auth->au_rslack = GSS_VERF_SLACK >> 2;
638
        auth->au_ops = &authgss_ops;
639
        auth->au_flavor = flavor;
640
        atomic_set(&auth->au_count, 1);
641
        kref_init(&gss_auth->kref);
642
 
643
        gss_auth->dentry = rpc_mkpipe(clnt->cl_dentry, gss_auth->mech->gm_name,
644
                        clnt, &gss_upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
645
        if (IS_ERR(gss_auth->dentry)) {
646
                err = PTR_ERR(gss_auth->dentry);
647
                goto err_put_mech;
648
        }
649
 
650
        err = rpcauth_init_credcache(auth);
651
        if (err)
652
                goto err_unlink_pipe;
653
 
654
        return auth;
655
err_unlink_pipe:
656
        rpc_unlink(gss_auth->dentry);
657
err_put_mech:
658
        gss_mech_put(gss_auth->mech);
659
err_free:
660
        kfree(gss_auth);
661
out_dec:
662
        module_put(THIS_MODULE);
663
        return ERR_PTR(err);
664
}
665
 
666
static void
667
gss_free(struct gss_auth *gss_auth)
668
{
669
        rpc_unlink(gss_auth->dentry);
670
        gss_auth->dentry = NULL;
671
        gss_mech_put(gss_auth->mech);
672
 
673
        kfree(gss_auth);
674
        module_put(THIS_MODULE);
675
}
676
 
677
static void
678
gss_free_callback(struct kref *kref)
679
{
680
        struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
681
 
682
        gss_free(gss_auth);
683
}
684
 
685
static void
686
gss_destroy(struct rpc_auth *auth)
687
{
688
        struct gss_auth *gss_auth;
689
 
690
        dprintk("RPC:       destroying GSS authenticator %p flavor %d\n",
691
                        auth, auth->au_flavor);
692
 
693
        rpcauth_destroy_credcache(auth);
694
 
695
        gss_auth = container_of(auth, struct gss_auth, rpc_auth);
696
        kref_put(&gss_auth->kref, gss_free_callback);
697
}
698
 
699
/*
700
 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
701
 * to the server with the GSS control procedure field set to
702
 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
703
 * all RPCSEC_GSS state associated with that context.
704
 */
705
static int
706
gss_destroying_context(struct rpc_cred *cred)
707
{
708
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
709
        struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
710
        struct rpc_task *task;
711
 
712
        if (gss_cred->gc_ctx == NULL ||
713
                        gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY)
714
                return 0;
715
 
716
        gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
717
        cred->cr_ops = &gss_nullops;
718
 
719
        /* Take a reference to ensure the cred will be destroyed either
720
         * by the RPC call or by the put_rpccred() below */
721
        get_rpccred(cred);
722
 
723
        task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC);
724
        if (!IS_ERR(task))
725
                rpc_put_task(task);
726
 
727
        put_rpccred(cred);
728
        return 1;
729
}
730
 
731
/* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
732
 * to create a new cred or context, so they check that things have been
733
 * allocated before freeing them. */
734
static void
735
gss_do_free_ctx(struct gss_cl_ctx *ctx)
736
{
737
        dprintk("RPC:       gss_free_ctx\n");
738
 
739
        kfree(ctx->gc_wire_ctx.data);
740
        kfree(ctx);
741
}
742
 
743
static void
744
gss_free_ctx_callback(struct rcu_head *head)
745
{
746
        struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
747
        gss_do_free_ctx(ctx);
748
}
749
 
750
static void
751
gss_free_ctx(struct gss_cl_ctx *ctx)
752
{
753
        struct gss_ctx *gc_gss_ctx;
754
 
755
        gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx);
756
        rcu_assign_pointer(ctx->gc_gss_ctx, NULL);
757
        call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
758
        if (gc_gss_ctx)
759
                gss_delete_sec_context(&gc_gss_ctx);
760
}
761
 
762
static void
763
gss_free_cred(struct gss_cred *gss_cred)
764
{
765
        dprintk("RPC:       gss_free_cred %p\n", gss_cred);
766
        kfree(gss_cred);
767
}
768
 
769
static void
770
gss_free_cred_callback(struct rcu_head *head)
771
{
772
        struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
773
        gss_free_cred(gss_cred);
774
}
775
 
776
static void
777
gss_destroy_cred(struct rpc_cred *cred)
778
{
779
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
780
        struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
781
        struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
782
 
783
        if (gss_destroying_context(cred))
784
                return;
785
        rcu_assign_pointer(gss_cred->gc_ctx, NULL);
786
        call_rcu(&cred->cr_rcu, gss_free_cred_callback);
787
        if (ctx)
788
                gss_put_ctx(ctx);
789
        kref_put(&gss_auth->kref, gss_free_callback);
790
}
791
 
792
/*
793
 * Lookup RPCSEC_GSS cred for the current process
794
 */
795
static struct rpc_cred *
796
gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
797
{
798
        return rpcauth_lookup_credcache(auth, acred, flags);
799
}
800
 
801
static struct rpc_cred *
802
gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
803
{
804
        struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
805
        struct gss_cred *cred = NULL;
806
        int err = -ENOMEM;
807
 
808
        dprintk("RPC:       gss_create_cred for uid %d, flavor %d\n",
809
                acred->uid, auth->au_flavor);
810
 
811
        if (!(cred = kzalloc(sizeof(*cred), GFP_KERNEL)))
812
                goto out_err;
813
 
814
        rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
815
        /*
816
         * Note: in order to force a call to call_refresh(), we deliberately
817
         * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
818
         */
819
        cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
820
        cred->gc_service = gss_auth->service;
821
        kref_get(&gss_auth->kref);
822
        return &cred->gc_base;
823
 
824
out_err:
825
        dprintk("RPC:       gss_create_cred failed with error %d\n", err);
826
        return ERR_PTR(err);
827
}
828
 
829
static int
830
gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
831
{
832
        struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
833
        struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
834
        int err;
835
 
836
        do {
837
                err = gss_create_upcall(gss_auth, gss_cred);
838
        } while (err == -EAGAIN);
839
        return err;
840
}
841
 
842
static int
843
gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
844
{
845
        struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
846
 
847
        /*
848
         * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
849
         * we don't really care if the credential has expired or not,
850
         * since the caller should be prepared to reinitialise it.
851
         */
852
        if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
853
                goto out;
854
        /* Don't match with creds that have expired. */
855
        if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
856
                return 0;
857
out:
858
        return (rc->cr_uid == acred->uid);
859
}
860
 
861
/*
862
* Marshal credentials.
863
* Maybe we should keep a cached credential for performance reasons.
864
*/
865
static __be32 *
866
gss_marshal(struct rpc_task *task, __be32 *p)
867
{
868
        struct rpc_cred *cred = task->tk_msg.rpc_cred;
869
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
870
                                                 gc_base);
871
        struct gss_cl_ctx       *ctx = gss_cred_get_ctx(cred);
872
        __be32          *cred_len;
873
        struct rpc_rqst *req = task->tk_rqstp;
874
        u32             maj_stat = 0;
875
        struct xdr_netobj mic;
876
        struct kvec     iov;
877
        struct xdr_buf  verf_buf;
878
 
879
        dprintk("RPC: %5u gss_marshal\n", task->tk_pid);
880
 
881
        *p++ = htonl(RPC_AUTH_GSS);
882
        cred_len = p++;
883
 
884
        spin_lock(&ctx->gc_seq_lock);
885
        req->rq_seqno = ctx->gc_seq++;
886
        spin_unlock(&ctx->gc_seq_lock);
887
 
888
        *p++ = htonl((u32) RPC_GSS_VERSION);
889
        *p++ = htonl((u32) ctx->gc_proc);
890
        *p++ = htonl((u32) req->rq_seqno);
891
        *p++ = htonl((u32) gss_cred->gc_service);
892
        p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
893
        *cred_len = htonl((p - (cred_len + 1)) << 2);
894
 
895
        /* We compute the checksum for the verifier over the xdr-encoded bytes
896
         * starting with the xid and ending at the end of the credential: */
897
        iov.iov_base = xprt_skip_transport_header(task->tk_xprt,
898
                                        req->rq_snd_buf.head[0].iov_base);
899
        iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
900
        xdr_buf_from_iov(&iov, &verf_buf);
901
 
902
        /* set verifier flavor*/
903
        *p++ = htonl(RPC_AUTH_GSS);
904
 
905
        mic.data = (u8 *)(p + 1);
906
        maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
907
        if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
908
                clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
909
        } else if (maj_stat != 0) {
910
                printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
911
                goto out_put_ctx;
912
        }
913
        p = xdr_encode_opaque(p, NULL, mic.len);
914
        gss_put_ctx(ctx);
915
        return p;
916
out_put_ctx:
917
        gss_put_ctx(ctx);
918
        return NULL;
919
}
920
 
921
/*
922
* Refresh credentials. XXX - finish
923
*/
924
static int
925
gss_refresh(struct rpc_task *task)
926
{
927
 
928
        if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred))
929
                return gss_refresh_upcall(task);
930
        return 0;
931
}
932
 
933
/* Dummy refresh routine: used only when destroying the context */
934
static int
935
gss_refresh_null(struct rpc_task *task)
936
{
937
        return -EACCES;
938
}
939
 
940
static __be32 *
941
gss_validate(struct rpc_task *task, __be32 *p)
942
{
943
        struct rpc_cred *cred = task->tk_msg.rpc_cred;
944
        struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
945
        __be32          seq;
946
        struct kvec     iov;
947
        struct xdr_buf  verf_buf;
948
        struct xdr_netobj mic;
949
        u32             flav,len;
950
        u32             maj_stat;
951
 
952
        dprintk("RPC: %5u gss_validate\n", task->tk_pid);
953
 
954
        flav = ntohl(*p++);
955
        if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
956
                goto out_bad;
957
        if (flav != RPC_AUTH_GSS)
958
                goto out_bad;
959
        seq = htonl(task->tk_rqstp->rq_seqno);
960
        iov.iov_base = &seq;
961
        iov.iov_len = sizeof(seq);
962
        xdr_buf_from_iov(&iov, &verf_buf);
963
        mic.data = (u8 *)p;
964
        mic.len = len;
965
 
966
        maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
967
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
968
                clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
969
        if (maj_stat) {
970
                dprintk("RPC: %5u gss_validate: gss_verify_mic returned "
971
                                "error 0x%08x\n", task->tk_pid, maj_stat);
972
                goto out_bad;
973
        }
974
        /* We leave it to unwrap to calculate au_rslack. For now we just
975
         * calculate the length of the verifier: */
976
        cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
977
        gss_put_ctx(ctx);
978
        dprintk("RPC: %5u gss_validate: gss_verify_mic succeeded.\n",
979
                        task->tk_pid);
980
        return p + XDR_QUADLEN(len);
981
out_bad:
982
        gss_put_ctx(ctx);
983
        dprintk("RPC: %5u gss_validate failed.\n", task->tk_pid);
984
        return NULL;
985
}
986
 
987
static inline int
988
gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
989
                kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
990
{
991
        struct xdr_buf  *snd_buf = &rqstp->rq_snd_buf;
992
        struct xdr_buf  integ_buf;
993
        __be32          *integ_len = NULL;
994
        struct xdr_netobj mic;
995
        u32             offset;
996
        __be32          *q;
997
        struct kvec     *iov;
998
        u32             maj_stat = 0;
999
        int             status = -EIO;
1000
 
1001
        integ_len = p++;
1002
        offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1003
        *p++ = htonl(rqstp->rq_seqno);
1004
 
1005
        status = rpc_call_xdrproc(encode, rqstp, p, obj);
1006
        if (status)
1007
                return status;
1008
 
1009
        if (xdr_buf_subsegment(snd_buf, &integ_buf,
1010
                                offset, snd_buf->len - offset))
1011
                return status;
1012
        *integ_len = htonl(integ_buf.len);
1013
 
1014
        /* guess whether we're in the head or the tail: */
1015
        if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1016
                iov = snd_buf->tail;
1017
        else
1018
                iov = snd_buf->head;
1019
        p = iov->iov_base + iov->iov_len;
1020
        mic.data = (u8 *)(p + 1);
1021
 
1022
        maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1023
        status = -EIO; /* XXX? */
1024
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1025
                clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1026
        else if (maj_stat)
1027
                return status;
1028
        q = xdr_encode_opaque(p, NULL, mic.len);
1029
 
1030
        offset = (u8 *)q - (u8 *)p;
1031
        iov->iov_len += offset;
1032
        snd_buf->len += offset;
1033
        return 0;
1034
}
1035
 
1036
static void
1037
priv_release_snd_buf(struct rpc_rqst *rqstp)
1038
{
1039
        int i;
1040
 
1041
        for (i=0; i < rqstp->rq_enc_pages_num; i++)
1042
                __free_page(rqstp->rq_enc_pages[i]);
1043
        kfree(rqstp->rq_enc_pages);
1044
}
1045
 
1046
static int
1047
alloc_enc_pages(struct rpc_rqst *rqstp)
1048
{
1049
        struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1050
        int first, last, i;
1051
 
1052
        if (snd_buf->page_len == 0) {
1053
                rqstp->rq_enc_pages_num = 0;
1054
                return 0;
1055
        }
1056
 
1057
        first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1058
        last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
1059
        rqstp->rq_enc_pages_num = last - first + 1 + 1;
1060
        rqstp->rq_enc_pages
1061
                = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
1062
                                GFP_NOFS);
1063
        if (!rqstp->rq_enc_pages)
1064
                goto out;
1065
        for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1066
                rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1067
                if (rqstp->rq_enc_pages[i] == NULL)
1068
                        goto out_free;
1069
        }
1070
        rqstp->rq_release_snd_buf = priv_release_snd_buf;
1071
        return 0;
1072
out_free:
1073
        for (i--; i >= 0; i--) {
1074
                __free_page(rqstp->rq_enc_pages[i]);
1075
        }
1076
out:
1077
        return -EAGAIN;
1078
}
1079
 
1080
static inline int
1081
gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1082
                kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
1083
{
1084
        struct xdr_buf  *snd_buf = &rqstp->rq_snd_buf;
1085
        u32             offset;
1086
        u32             maj_stat;
1087
        int             status;
1088
        __be32          *opaque_len;
1089
        struct page     **inpages;
1090
        int             first;
1091
        int             pad;
1092
        struct kvec     *iov;
1093
        char            *tmp;
1094
 
1095
        opaque_len = p++;
1096
        offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1097
        *p++ = htonl(rqstp->rq_seqno);
1098
 
1099
        status = rpc_call_xdrproc(encode, rqstp, p, obj);
1100
        if (status)
1101
                return status;
1102
 
1103
        status = alloc_enc_pages(rqstp);
1104
        if (status)
1105
                return status;
1106
        first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
1107
        inpages = snd_buf->pages + first;
1108
        snd_buf->pages = rqstp->rq_enc_pages;
1109
        snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
1110
        /* Give the tail its own page, in case we need extra space in the
1111
         * head when wrapping: */
1112
        if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1113
                tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1114
                memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1115
                snd_buf->tail[0].iov_base = tmp;
1116
        }
1117
        maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1118
        /* RPC_SLACK_SPACE should prevent this ever happening: */
1119
        BUG_ON(snd_buf->len > snd_buf->buflen);
1120
        status = -EIO;
1121
        /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1122
         * done anyway, so it's safe to put the request on the wire: */
1123
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1124
                clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1125
        else if (maj_stat)
1126
                return status;
1127
 
1128
        *opaque_len = htonl(snd_buf->len - offset);
1129
        /* guess whether we're in the head or the tail: */
1130
        if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1131
                iov = snd_buf->tail;
1132
        else
1133
                iov = snd_buf->head;
1134
        p = iov->iov_base + iov->iov_len;
1135
        pad = 3 - ((snd_buf->len - offset - 1) & 3);
1136
        memset(p, 0, pad);
1137
        iov->iov_len += pad;
1138
        snd_buf->len += pad;
1139
 
1140
        return 0;
1141
}
1142
 
1143
static int
1144
gss_wrap_req(struct rpc_task *task,
1145
             kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
1146
{
1147
        struct rpc_cred *cred = task->tk_msg.rpc_cred;
1148
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1149
                        gc_base);
1150
        struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1151
        int             status = -EIO;
1152
 
1153
        dprintk("RPC: %5u gss_wrap_req\n", task->tk_pid);
1154
        if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1155
                /* The spec seems a little ambiguous here, but I think that not
1156
                 * wrapping context destruction requests makes the most sense.
1157
                 */
1158
                status = rpc_call_xdrproc(encode, rqstp, p, obj);
1159
                goto out;
1160
        }
1161
        switch (gss_cred->gc_service) {
1162
                case RPC_GSS_SVC_NONE:
1163
                        status = rpc_call_xdrproc(encode, rqstp, p, obj);
1164
                        break;
1165
                case RPC_GSS_SVC_INTEGRITY:
1166
                        status = gss_wrap_req_integ(cred, ctx, encode,
1167
                                                                rqstp, p, obj);
1168
                        break;
1169
                case RPC_GSS_SVC_PRIVACY:
1170
                        status = gss_wrap_req_priv(cred, ctx, encode,
1171
                                        rqstp, p, obj);
1172
                        break;
1173
        }
1174
out:
1175
        gss_put_ctx(ctx);
1176
        dprintk("RPC: %5u gss_wrap_req returning %d\n", task->tk_pid, status);
1177
        return status;
1178
}
1179
 
1180
static inline int
1181
gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1182
                struct rpc_rqst *rqstp, __be32 **p)
1183
{
1184
        struct xdr_buf  *rcv_buf = &rqstp->rq_rcv_buf;
1185
        struct xdr_buf integ_buf;
1186
        struct xdr_netobj mic;
1187
        u32 data_offset, mic_offset;
1188
        u32 integ_len;
1189
        u32 maj_stat;
1190
        int status = -EIO;
1191
 
1192
        integ_len = ntohl(*(*p)++);
1193
        if (integ_len & 3)
1194
                return status;
1195
        data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1196
        mic_offset = integ_len + data_offset;
1197
        if (mic_offset > rcv_buf->len)
1198
                return status;
1199
        if (ntohl(*(*p)++) != rqstp->rq_seqno)
1200
                return status;
1201
 
1202
        if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
1203
                                mic_offset - data_offset))
1204
                return status;
1205
 
1206
        if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1207
                return status;
1208
 
1209
        maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1210
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1211
                clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1212
        if (maj_stat != GSS_S_COMPLETE)
1213
                return status;
1214
        return 0;
1215
}
1216
 
1217
static inline int
1218
gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1219
                struct rpc_rqst *rqstp, __be32 **p)
1220
{
1221
        struct xdr_buf  *rcv_buf = &rqstp->rq_rcv_buf;
1222
        u32 offset;
1223
        u32 opaque_len;
1224
        u32 maj_stat;
1225
        int status = -EIO;
1226
 
1227
        opaque_len = ntohl(*(*p)++);
1228
        offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
1229
        if (offset + opaque_len > rcv_buf->len)
1230
                return status;
1231
        /* remove padding: */
1232
        rcv_buf->len = offset + opaque_len;
1233
 
1234
        maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
1235
        if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1236
                clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1237
        if (maj_stat != GSS_S_COMPLETE)
1238
                return status;
1239
        if (ntohl(*(*p)++) != rqstp->rq_seqno)
1240
                return status;
1241
 
1242
        return 0;
1243
}
1244
 
1245
 
1246
static int
1247
gss_unwrap_resp(struct rpc_task *task,
1248
                kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
1249
{
1250
        struct rpc_cred *cred = task->tk_msg.rpc_cred;
1251
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
1252
                        gc_base);
1253
        struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1254
        __be32          *savedp = p;
1255
        struct kvec     *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head;
1256
        int             savedlen = head->iov_len;
1257
        int             status = -EIO;
1258
 
1259
        if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1260
                goto out_decode;
1261
        switch (gss_cred->gc_service) {
1262
                case RPC_GSS_SVC_NONE:
1263
                        break;
1264
                case RPC_GSS_SVC_INTEGRITY:
1265
                        status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1266
                        if (status)
1267
                                goto out;
1268
                        break;
1269
                case RPC_GSS_SVC_PRIVACY:
1270
                        status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1271
                        if (status)
1272
                                goto out;
1273
                        break;
1274
        }
1275
        /* take into account extra slack for integrity and privacy cases: */
1276
        cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1277
                                                + (savedlen - head->iov_len);
1278
out_decode:
1279
        status = rpc_call_xdrproc(decode, rqstp, p, obj);
1280
out:
1281
        gss_put_ctx(ctx);
1282
        dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
1283
                        status);
1284
        return status;
1285
}
1286
 
1287
static const struct rpc_authops authgss_ops = {
1288
        .owner          = THIS_MODULE,
1289
        .au_flavor      = RPC_AUTH_GSS,
1290
#ifdef RPC_DEBUG
1291
        .au_name        = "RPCSEC_GSS",
1292
#endif
1293
        .create         = gss_create,
1294
        .destroy        = gss_destroy,
1295
        .lookup_cred    = gss_lookup_cred,
1296
        .crcreate       = gss_create_cred
1297
};
1298
 
1299
static const struct rpc_credops gss_credops = {
1300
        .cr_name        = "AUTH_GSS",
1301
        .crdestroy      = gss_destroy_cred,
1302
        .cr_init        = gss_cred_init,
1303
        .crmatch        = gss_match,
1304
        .crmarshal      = gss_marshal,
1305
        .crrefresh      = gss_refresh,
1306
        .crvalidate     = gss_validate,
1307
        .crwrap_req     = gss_wrap_req,
1308
        .crunwrap_resp  = gss_unwrap_resp,
1309
};
1310
 
1311
static const struct rpc_credops gss_nullops = {
1312
        .cr_name        = "AUTH_GSS",
1313
        .crdestroy      = gss_destroy_cred,
1314
        .crmatch        = gss_match,
1315
        .crmarshal      = gss_marshal,
1316
        .crrefresh      = gss_refresh_null,
1317
        .crvalidate     = gss_validate,
1318
        .crwrap_req     = gss_wrap_req,
1319
        .crunwrap_resp  = gss_unwrap_resp,
1320
};
1321
 
1322
static struct rpc_pipe_ops gss_upcall_ops = {
1323
        .upcall         = gss_pipe_upcall,
1324
        .downcall       = gss_pipe_downcall,
1325
        .destroy_msg    = gss_pipe_destroy_msg,
1326
        .release_pipe   = gss_pipe_release,
1327
};
1328
 
1329
/*
1330
 * Initialize RPCSEC_GSS module
1331
 */
1332
static int __init init_rpcsec_gss(void)
1333
{
1334
        int err = 0;
1335
 
1336
        err = rpcauth_register(&authgss_ops);
1337
        if (err)
1338
                goto out;
1339
        err = gss_svc_init();
1340
        if (err)
1341
                goto out_unregister;
1342
        return 0;
1343
out_unregister:
1344
        rpcauth_unregister(&authgss_ops);
1345
out:
1346
        return err;
1347
}
1348
 
1349
static void __exit exit_rpcsec_gss(void)
1350
{
1351
        gss_svc_shutdown();
1352
        rpcauth_unregister(&authgss_ops);
1353
}
1354
 
1355
MODULE_LICENSE("GPL");
1356
module_init(init_rpcsec_gss)
1357
module_exit(exit_rpcsec_gss)

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.