OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [security/] [keys/] [key.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/* Basic authentication token and access key management
2
 *
3
 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4
 * Written by David Howells (dhowells@redhat.com)
5
 *
6
 * This program is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU General Public License
8
 * as published by the Free Software Foundation; either version
9
 * 2 of the License, or (at your option) any later version.
10
 */
11
 
12
#include <linux/module.h>
13
#include <linux/init.h>
14
#include <linux/poison.h>
15
#include <linux/sched.h>
16
#include <linux/slab.h>
17
#include <linux/security.h>
18
#include <linux/workqueue.h>
19
#include <linux/random.h>
20
#include <linux/err.h>
21
#include "internal.h"
22
 
23
static struct kmem_cache        *key_jar;
24
struct rb_root          key_serial_tree; /* tree of keys indexed by serial */
25
DEFINE_SPINLOCK(key_serial_lock);
26
 
27
struct rb_root  key_user_tree; /* tree of quota records indexed by UID */
28
DEFINE_SPINLOCK(key_user_lock);
29
 
30
static LIST_HEAD(key_types_list);
31
static DECLARE_RWSEM(key_types_sem);
32
 
33
static void key_cleanup(struct work_struct *work);
34
static DECLARE_WORK(key_cleanup_task, key_cleanup);
35
 
36
/* we serialise key instantiation and link */
37
DEFINE_MUTEX(key_construction_mutex);
38
 
39
/* any key who's type gets unegistered will be re-typed to this */
40
static struct key_type key_type_dead = {
41
        .name           = "dead",
42
};
43
 
44
#ifdef KEY_DEBUGGING
45
void __key_check(const struct key *key)
46
{
47
        printk("__key_check: key %p {%08x} should be {%08x}\n",
48
               key, key->magic, KEY_DEBUG_MAGIC);
49
        BUG();
50
}
51
#endif
52
 
53
/*****************************************************************************/
54
/*
55
 * get the key quota record for a user, allocating a new record if one doesn't
56
 * already exist
57
 */
58
struct key_user *key_user_lookup(uid_t uid)
59
{
60
        struct key_user *candidate = NULL, *user;
61
        struct rb_node *parent = NULL;
62
        struct rb_node **p;
63
 
64
 try_again:
65
        p = &key_user_tree.rb_node;
66
        spin_lock(&key_user_lock);
67
 
68
        /* search the tree for a user record with a matching UID */
69
        while (*p) {
70
                parent = *p;
71
                user = rb_entry(parent, struct key_user, node);
72
 
73
                if (uid < user->uid)
74
                        p = &(*p)->rb_left;
75
                else if (uid > user->uid)
76
                        p = &(*p)->rb_right;
77
                else
78
                        goto found;
79
        }
80
 
81
        /* if we get here, we failed to find a match in the tree */
82
        if (!candidate) {
83
                /* allocate a candidate user record if we don't already have
84
                 * one */
85
                spin_unlock(&key_user_lock);
86
 
87
                user = NULL;
88
                candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
89
                if (unlikely(!candidate))
90
                        goto out;
91
 
92
                /* the allocation may have scheduled, so we need to repeat the
93
                 * search lest someone else added the record whilst we were
94
                 * asleep */
95
                goto try_again;
96
        }
97
 
98
        /* if we get here, then the user record still hadn't appeared on the
99
         * second pass - so we use the candidate record */
100
        atomic_set(&candidate->usage, 1);
101
        atomic_set(&candidate->nkeys, 0);
102
        atomic_set(&candidate->nikeys, 0);
103
        candidate->uid = uid;
104
        candidate->qnkeys = 0;
105
        candidate->qnbytes = 0;
106
        spin_lock_init(&candidate->lock);
107
        mutex_init(&candidate->cons_lock);
108
 
109
        rb_link_node(&candidate->node, parent, p);
110
        rb_insert_color(&candidate->node, &key_user_tree);
111
        spin_unlock(&key_user_lock);
112
        user = candidate;
113
        goto out;
114
 
115
        /* okay - we found a user record for this UID */
116
 found:
117
        atomic_inc(&user->usage);
118
        spin_unlock(&key_user_lock);
119
        kfree(candidate);
120
 out:
121
        return user;
122
 
123
} /* end key_user_lookup() */
124
 
125
/*****************************************************************************/
126
/*
127
 * dispose of a user structure
128
 */
129
void key_user_put(struct key_user *user)
130
{
131
        if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
132
                rb_erase(&user->node, &key_user_tree);
133
                spin_unlock(&key_user_lock);
134
 
135
                kfree(user);
136
        }
137
 
138
} /* end key_user_put() */
139
 
140
/*****************************************************************************/
141
/*
142
 * insert a key with a fixed serial number
143
 */
144
static void __init __key_insert_serial(struct key *key)
145
{
146
        struct rb_node *parent, **p;
147
        struct key *xkey;
148
 
149
        parent = NULL;
150
        p = &key_serial_tree.rb_node;
151
 
152
        while (*p) {
153
                parent = *p;
154
                xkey = rb_entry(parent, struct key, serial_node);
155
 
156
                if (key->serial < xkey->serial)
157
                        p = &(*p)->rb_left;
158
                else if (key->serial > xkey->serial)
159
                        p = &(*p)->rb_right;
160
                else
161
                        BUG();
162
        }
163
 
164
        /* we've found a suitable hole - arrange for this key to occupy it */
165
        rb_link_node(&key->serial_node, parent, p);
166
        rb_insert_color(&key->serial_node, &key_serial_tree);
167
 
168
} /* end __key_insert_serial() */
169
 
170
/*****************************************************************************/
171
/*
172
 * assign a key the next unique serial number
173
 * - these are assigned randomly to avoid security issues through covert
174
 *   channel problems
175
 */
176
static inline void key_alloc_serial(struct key *key)
177
{
178
        struct rb_node *parent, **p;
179
        struct key *xkey;
180
 
181
        /* propose a random serial number and look for a hole for it in the
182
         * serial number tree */
183
        do {
184
                get_random_bytes(&key->serial, sizeof(key->serial));
185
 
186
                key->serial >>= 1; /* negative numbers are not permitted */
187
        } while (key->serial < 3);
188
 
189
        spin_lock(&key_serial_lock);
190
 
191
attempt_insertion:
192
        parent = NULL;
193
        p = &key_serial_tree.rb_node;
194
 
195
        while (*p) {
196
                parent = *p;
197
                xkey = rb_entry(parent, struct key, serial_node);
198
 
199
                if (key->serial < xkey->serial)
200
                        p = &(*p)->rb_left;
201
                else if (key->serial > xkey->serial)
202
                        p = &(*p)->rb_right;
203
                else
204
                        goto serial_exists;
205
        }
206
 
207
        /* we've found a suitable hole - arrange for this key to occupy it */
208
        rb_link_node(&key->serial_node, parent, p);
209
        rb_insert_color(&key->serial_node, &key_serial_tree);
210
 
211
        spin_unlock(&key_serial_lock);
212
        return;
213
 
214
        /* we found a key with the proposed serial number - walk the tree from
215
         * that point looking for the next unused serial number */
216
serial_exists:
217
        for (;;) {
218
                key->serial++;
219
                if (key->serial < 3) {
220
                        key->serial = 3;
221
                        goto attempt_insertion;
222
                }
223
 
224
                parent = rb_next(parent);
225
                if (!parent)
226
                        goto attempt_insertion;
227
 
228
                xkey = rb_entry(parent, struct key, serial_node);
229
                if (key->serial < xkey->serial)
230
                        goto attempt_insertion;
231
        }
232
 
233
} /* end key_alloc_serial() */
234
 
235
/*****************************************************************************/
236
/*
237
 * allocate a key of the specified type
238
 * - update the user's quota to reflect the existence of the key
239
 * - called from a key-type operation with key_types_sem read-locked by
240
 *   key_create_or_update()
241
 *   - this prevents unregistration of the key type
242
 * - upon return the key is as yet uninstantiated; the caller needs to either
243
 *   instantiate the key or discard it before returning
244
 */
245
struct key *key_alloc(struct key_type *type, const char *desc,
246
                      uid_t uid, gid_t gid, struct task_struct *ctx,
247
                      key_perm_t perm, unsigned long flags)
248
{
249
        struct key_user *user = NULL;
250
        struct key *key;
251
        size_t desclen, quotalen;
252
        int ret;
253
 
254
        key = ERR_PTR(-EINVAL);
255
        if (!desc || !*desc)
256
                goto error;
257
 
258
        desclen = strlen(desc) + 1;
259
        quotalen = desclen + type->def_datalen;
260
 
261
        /* get hold of the key tracking for this user */
262
        user = key_user_lookup(uid);
263
        if (!user)
264
                goto no_memory_1;
265
 
266
        /* check that the user's quota permits allocation of another key and
267
         * its description */
268
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
269
                spin_lock(&user->lock);
270
                if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
271
                        if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
272
                            user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
273
                            )
274
                                goto no_quota;
275
                }
276
 
277
                user->qnkeys++;
278
                user->qnbytes += quotalen;
279
                spin_unlock(&user->lock);
280
        }
281
 
282
        /* allocate and initialise the key and its description */
283
        key = kmem_cache_alloc(key_jar, GFP_KERNEL);
284
        if (!key)
285
                goto no_memory_2;
286
 
287
        if (desc) {
288
                key->description = kmemdup(desc, desclen, GFP_KERNEL);
289
                if (!key->description)
290
                        goto no_memory_3;
291
        }
292
 
293
        atomic_set(&key->usage, 1);
294
        init_rwsem(&key->sem);
295
        key->type = type;
296
        key->user = user;
297
        key->quotalen = quotalen;
298
        key->datalen = type->def_datalen;
299
        key->uid = uid;
300
        key->gid = gid;
301
        key->perm = perm;
302
        key->flags = 0;
303
        key->expiry = 0;
304
        key->payload.data = NULL;
305
        key->security = NULL;
306
 
307
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
308
                key->flags |= 1 << KEY_FLAG_IN_QUOTA;
309
 
310
        memset(&key->type_data, 0, sizeof(key->type_data));
311
 
312
#ifdef KEY_DEBUGGING
313
        key->magic = KEY_DEBUG_MAGIC;
314
#endif
315
 
316
        /* let the security module know about the key */
317
        ret = security_key_alloc(key, ctx, flags);
318
        if (ret < 0)
319
                goto security_error;
320
 
321
        /* publish the key by giving it a serial number */
322
        atomic_inc(&user->nkeys);
323
        key_alloc_serial(key);
324
 
325
error:
326
        return key;
327
 
328
security_error:
329
        kfree(key->description);
330
        kmem_cache_free(key_jar, key);
331
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
332
                spin_lock(&user->lock);
333
                user->qnkeys--;
334
                user->qnbytes -= quotalen;
335
                spin_unlock(&user->lock);
336
        }
337
        key_user_put(user);
338
        key = ERR_PTR(ret);
339
        goto error;
340
 
341
no_memory_3:
342
        kmem_cache_free(key_jar, key);
343
no_memory_2:
344
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
345
                spin_lock(&user->lock);
346
                user->qnkeys--;
347
                user->qnbytes -= quotalen;
348
                spin_unlock(&user->lock);
349
        }
350
        key_user_put(user);
351
no_memory_1:
352
        key = ERR_PTR(-ENOMEM);
353
        goto error;
354
 
355
no_quota:
356
        spin_unlock(&user->lock);
357
        key_user_put(user);
358
        key = ERR_PTR(-EDQUOT);
359
        goto error;
360
 
361
} /* end key_alloc() */
362
 
363
EXPORT_SYMBOL(key_alloc);
364
 
365
/*****************************************************************************/
366
/*
367
 * reserve an amount of quota for the key's payload
368
 */
369
int key_payload_reserve(struct key *key, size_t datalen)
370
{
371
        int delta = (int) datalen - key->datalen;
372
        int ret = 0;
373
 
374
        key_check(key);
375
 
376
        /* contemplate the quota adjustment */
377
        if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
378
                spin_lock(&key->user->lock);
379
 
380
                if (delta > 0 &&
381
                    key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
382
                    ) {
383
                        ret = -EDQUOT;
384
                }
385
                else {
386
                        key->user->qnbytes += delta;
387
                        key->quotalen += delta;
388
                }
389
                spin_unlock(&key->user->lock);
390
        }
391
 
392
        /* change the recorded data length if that didn't generate an error */
393
        if (ret == 0)
394
                key->datalen = datalen;
395
 
396
        return ret;
397
 
398
} /* end key_payload_reserve() */
399
 
400
EXPORT_SYMBOL(key_payload_reserve);
401
 
402
/*****************************************************************************/
403
/*
404
 * instantiate a key and link it into the target keyring atomically
405
 * - called with the target keyring's semaphore writelocked
406
 */
407
static int __key_instantiate_and_link(struct key *key,
408
                                      const void *data,
409
                                      size_t datalen,
410
                                      struct key *keyring,
411
                                      struct key *instkey)
412
{
413
        int ret, awaken;
414
 
415
        key_check(key);
416
        key_check(keyring);
417
 
418
        awaken = 0;
419
        ret = -EBUSY;
420
 
421
        mutex_lock(&key_construction_mutex);
422
 
423
        /* can't instantiate twice */
424
        if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
425
                /* instantiate the key */
426
                ret = key->type->instantiate(key, data, datalen);
427
 
428
                if (ret == 0) {
429
                        /* mark the key as being instantiated */
430
                        atomic_inc(&key->user->nikeys);
431
                        set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
432
 
433
                        if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
434
                                awaken = 1;
435
 
436
                        /* and link it into the destination keyring */
437
                        if (keyring)
438
                                ret = __key_link(keyring, key);
439
 
440
                        /* disable the authorisation key */
441
                        if (instkey)
442
                                key_revoke(instkey);
443
                }
444
        }
445
 
446
        mutex_unlock(&key_construction_mutex);
447
 
448
        /* wake up anyone waiting for a key to be constructed */
449
        if (awaken)
450
                wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
451
 
452
        return ret;
453
 
454
} /* end __key_instantiate_and_link() */
455
 
456
/*****************************************************************************/
457
/*
458
 * instantiate a key and link it into the target keyring atomically
459
 */
460
int key_instantiate_and_link(struct key *key,
461
                             const void *data,
462
                             size_t datalen,
463
                             struct key *keyring,
464
                             struct key *instkey)
465
{
466
        int ret;
467
 
468
        if (keyring)
469
                down_write(&keyring->sem);
470
 
471
        ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
472
 
473
        if (keyring)
474
                up_write(&keyring->sem);
475
 
476
        return ret;
477
 
478
} /* end key_instantiate_and_link() */
479
 
480
EXPORT_SYMBOL(key_instantiate_and_link);
481
 
482
/*****************************************************************************/
483
/*
484
 * negatively instantiate a key and link it into the target keyring atomically
485
 */
486
int key_negate_and_link(struct key *key,
487
                        unsigned timeout,
488
                        struct key *keyring,
489
                        struct key *instkey)
490
{
491
        struct timespec now;
492
        int ret, awaken;
493
 
494
        key_check(key);
495
        key_check(keyring);
496
 
497
        awaken = 0;
498
        ret = -EBUSY;
499
 
500
        if (keyring)
501
                down_write(&keyring->sem);
502
 
503
        mutex_lock(&key_construction_mutex);
504
 
505
        /* can't instantiate twice */
506
        if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
507
                /* mark the key as being negatively instantiated */
508
                atomic_inc(&key->user->nikeys);
509
                set_bit(KEY_FLAG_NEGATIVE, &key->flags);
510
                set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
511
                now = current_kernel_time();
512
                key->expiry = now.tv_sec + timeout;
513
 
514
                if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
515
                        awaken = 1;
516
 
517
                ret = 0;
518
 
519
                /* and link it into the destination keyring */
520
                if (keyring)
521
                        ret = __key_link(keyring, key);
522
 
523
                /* disable the authorisation key */
524
                if (instkey)
525
                        key_revoke(instkey);
526
        }
527
 
528
        mutex_unlock(&key_construction_mutex);
529
 
530
        if (keyring)
531
                up_write(&keyring->sem);
532
 
533
        /* wake up anyone waiting for a key to be constructed */
534
        if (awaken)
535
                wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
536
 
537
        return ret;
538
 
539
} /* end key_negate_and_link() */
540
 
541
EXPORT_SYMBOL(key_negate_and_link);
542
 
543
/*****************************************************************************/
544
/*
545
 * do cleaning up in process context so that we don't have to disable
546
 * interrupts all over the place
547
 */
548
static void key_cleanup(struct work_struct *work)
549
{
550
        struct rb_node *_n;
551
        struct key *key;
552
 
553
 go_again:
554
        /* look for a dead key in the tree */
555
        spin_lock(&key_serial_lock);
556
 
557
        for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
558
                key = rb_entry(_n, struct key, serial_node);
559
 
560
                if (atomic_read(&key->usage) == 0)
561
                        goto found_dead_key;
562
        }
563
 
564
        spin_unlock(&key_serial_lock);
565
        return;
566
 
567
 found_dead_key:
568
        /* we found a dead key - once we've removed it from the tree, we can
569
         * drop the lock */
570
        rb_erase(&key->serial_node, &key_serial_tree);
571
        spin_unlock(&key_serial_lock);
572
 
573
        key_check(key);
574
 
575
        security_key_free(key);
576
 
577
        /* deal with the user's key tracking and quota */
578
        if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
579
                spin_lock(&key->user->lock);
580
                key->user->qnkeys--;
581
                key->user->qnbytes -= key->quotalen;
582
                spin_unlock(&key->user->lock);
583
        }
584
 
585
        atomic_dec(&key->user->nkeys);
586
        if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
587
                atomic_dec(&key->user->nikeys);
588
 
589
        key_user_put(key->user);
590
 
591
        /* now throw away the key memory */
592
        if (key->type->destroy)
593
                key->type->destroy(key);
594
 
595
        kfree(key->description);
596
 
597
#ifdef KEY_DEBUGGING
598
        key->magic = KEY_DEBUG_MAGIC_X;
599
#endif
600
        kmem_cache_free(key_jar, key);
601
 
602
        /* there may, of course, be more than one key to destroy */
603
        goto go_again;
604
 
605
} /* end key_cleanup() */
606
 
607
/*****************************************************************************/
608
/*
609
 * dispose of a reference to a key
610
 * - when all the references are gone, we schedule the cleanup task to come and
611
 *   pull it out of the tree in definite process context
612
 */
613
void key_put(struct key *key)
614
{
615
        if (key) {
616
                key_check(key);
617
 
618
                if (atomic_dec_and_test(&key->usage))
619
                        schedule_work(&key_cleanup_task);
620
        }
621
 
622
} /* end key_put() */
623
 
624
EXPORT_SYMBOL(key_put);
625
 
626
/*****************************************************************************/
627
/*
628
 * find a key by its serial number
629
 */
630
struct key *key_lookup(key_serial_t id)
631
{
632
        struct rb_node *n;
633
        struct key *key;
634
 
635
        spin_lock(&key_serial_lock);
636
 
637
        /* search the tree for the specified key */
638
        n = key_serial_tree.rb_node;
639
        while (n) {
640
                key = rb_entry(n, struct key, serial_node);
641
 
642
                if (id < key->serial)
643
                        n = n->rb_left;
644
                else if (id > key->serial)
645
                        n = n->rb_right;
646
                else
647
                        goto found;
648
        }
649
 
650
 not_found:
651
        key = ERR_PTR(-ENOKEY);
652
        goto error;
653
 
654
 found:
655
        /* pretend it doesn't exist if it's dead */
656
        if (atomic_read(&key->usage) == 0 ||
657
            test_bit(KEY_FLAG_DEAD, &key->flags) ||
658
            key->type == &key_type_dead)
659
                goto not_found;
660
 
661
        /* this races with key_put(), but that doesn't matter since key_put()
662
         * doesn't actually change the key
663
         */
664
        atomic_inc(&key->usage);
665
 
666
 error:
667
        spin_unlock(&key_serial_lock);
668
        return key;
669
 
670
} /* end key_lookup() */
671
 
672
/*****************************************************************************/
673
/*
674
 * find and lock the specified key type against removal
675
 * - we return with the sem readlocked
676
 */
677
struct key_type *key_type_lookup(const char *type)
678
{
679
        struct key_type *ktype;
680
 
681
        down_read(&key_types_sem);
682
 
683
        /* look up the key type to see if it's one of the registered kernel
684
         * types */
685
        list_for_each_entry(ktype, &key_types_list, link) {
686
                if (strcmp(ktype->name, type) == 0)
687
                        goto found_kernel_type;
688
        }
689
 
690
        up_read(&key_types_sem);
691
        ktype = ERR_PTR(-ENOKEY);
692
 
693
 found_kernel_type:
694
        return ktype;
695
 
696
} /* end key_type_lookup() */
697
 
698
/*****************************************************************************/
699
/*
700
 * unlock a key type
701
 */
702
void key_type_put(struct key_type *ktype)
703
{
704
        up_read(&key_types_sem);
705
 
706
} /* end key_type_put() */
707
 
708
/*****************************************************************************/
709
/*
710
 * attempt to update an existing key
711
 * - the key has an incremented refcount
712
 * - we need to put the key if we get an error
713
 */
714
static inline key_ref_t __key_update(key_ref_t key_ref,
715
                                     const void *payload, size_t plen)
716
{
717
        struct key *key = key_ref_to_ptr(key_ref);
718
        int ret;
719
 
720
        /* need write permission on the key to update it */
721
        ret = key_permission(key_ref, KEY_WRITE);
722
        if (ret < 0)
723
                goto error;
724
 
725
        ret = -EEXIST;
726
        if (!key->type->update)
727
                goto error;
728
 
729
        down_write(&key->sem);
730
 
731
        ret = key->type->update(key, payload, plen);
732
        if (ret == 0)
733
                /* updating a negative key instantiates it */
734
                clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
735
 
736
        up_write(&key->sem);
737
 
738
        if (ret < 0)
739
                goto error;
740
out:
741
        return key_ref;
742
 
743
error:
744
        key_put(key);
745
        key_ref = ERR_PTR(ret);
746
        goto out;
747
 
748
} /* end __key_update() */
749
 
750
/*****************************************************************************/
751
/*
752
 * search the specified keyring for a key of the same description; if one is
753
 * found, update it, otherwise add a new one
754
 */
755
key_ref_t key_create_or_update(key_ref_t keyring_ref,
756
                               const char *type,
757
                               const char *description,
758
                               const void *payload,
759
                               size_t plen,
760
                               unsigned long flags)
761
{
762
        struct key_type *ktype;
763
        struct key *keyring, *key = NULL;
764
        key_perm_t perm;
765
        key_ref_t key_ref;
766
        int ret;
767
 
768
        /* look up the key type to see if it's one of the registered kernel
769
         * types */
770
        ktype = key_type_lookup(type);
771
        if (IS_ERR(ktype)) {
772
                key_ref = ERR_PTR(-ENODEV);
773
                goto error;
774
        }
775
 
776
        key_ref = ERR_PTR(-EINVAL);
777
        if (!ktype->match || !ktype->instantiate)
778
                goto error_2;
779
 
780
        keyring = key_ref_to_ptr(keyring_ref);
781
 
782
        key_check(keyring);
783
 
784
        key_ref = ERR_PTR(-ENOTDIR);
785
        if (keyring->type != &key_type_keyring)
786
                goto error_2;
787
 
788
        down_write(&keyring->sem);
789
 
790
        /* if we're going to allocate a new key, we're going to have
791
         * to modify the keyring */
792
        ret = key_permission(keyring_ref, KEY_WRITE);
793
        if (ret < 0) {
794
                key_ref = ERR_PTR(ret);
795
                goto error_3;
796
        }
797
 
798
        /* if it's possible to update this type of key, search for an existing
799
         * key of the same type and description in the destination keyring and
800
         * update that instead if possible
801
         */
802
        if (ktype->update) {
803
                key_ref = __keyring_search_one(keyring_ref, ktype, description,
804
                                               0);
805
                if (!IS_ERR(key_ref))
806
                        goto found_matching_key;
807
        }
808
 
809
        /* decide on the permissions we want */
810
        perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
811
        perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
812
 
813
        if (ktype->read)
814
                perm |= KEY_POS_READ | KEY_USR_READ;
815
 
816
        if (ktype == &key_type_keyring || ktype->update)
817
                perm |= KEY_USR_WRITE;
818
 
819
        /* allocate a new key */
820
        key = key_alloc(ktype, description, current->fsuid, current->fsgid,
821
                        current, perm, flags);
822
        if (IS_ERR(key)) {
823
                key_ref = ERR_PTR(PTR_ERR(key));
824
                goto error_3;
825
        }
826
 
827
        /* instantiate it and link it into the target keyring */
828
        ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
829
        if (ret < 0) {
830
                key_put(key);
831
                key_ref = ERR_PTR(ret);
832
                goto error_3;
833
        }
834
 
835
        key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
836
 
837
 error_3:
838
        up_write(&keyring->sem);
839
 error_2:
840
        key_type_put(ktype);
841
 error:
842
        return key_ref;
843
 
844
 found_matching_key:
845
        /* we found a matching key, so we're going to try to update it
846
         * - we can drop the locks first as we have the key pinned
847
         */
848
        up_write(&keyring->sem);
849
        key_type_put(ktype);
850
 
851
        key_ref = __key_update(key_ref, payload, plen);
852
        goto error;
853
 
854
} /* end key_create_or_update() */
855
 
856
EXPORT_SYMBOL(key_create_or_update);
857
 
858
/*****************************************************************************/
859
/*
860
 * update a key
861
 */
862
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
863
{
864
        struct key *key = key_ref_to_ptr(key_ref);
865
        int ret;
866
 
867
        key_check(key);
868
 
869
        /* the key must be writable */
870
        ret = key_permission(key_ref, KEY_WRITE);
871
        if (ret < 0)
872
                goto error;
873
 
874
        /* attempt to update it if supported */
875
        ret = -EOPNOTSUPP;
876
        if (key->type->update) {
877
                down_write(&key->sem);
878
 
879
                ret = key->type->update(key, payload, plen);
880
                if (ret == 0)
881
                        /* updating a negative key instantiates it */
882
                        clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
883
 
884
                up_write(&key->sem);
885
        }
886
 
887
 error:
888
        return ret;
889
 
890
} /* end key_update() */
891
 
892
EXPORT_SYMBOL(key_update);
893
 
894
/*****************************************************************************/
895
/*
896
 * revoke a key
897
 */
898
void key_revoke(struct key *key)
899
{
900
        key_check(key);
901
 
902
        /* make sure no one's trying to change or use the key when we mark it
903
         * - we tell lockdep that we might nest because we might be revoking an
904
         *   authorisation key whilst holding the sem on a key we've just
905
         *   instantiated
906
         */
907
        down_write_nested(&key->sem, 1);
908
        if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
909
            key->type->revoke)
910
                key->type->revoke(key);
911
 
912
        up_write(&key->sem);
913
 
914
} /* end key_revoke() */
915
 
916
EXPORT_SYMBOL(key_revoke);
917
 
918
/*****************************************************************************/
919
/*
920
 * register a type of key
921
 */
922
int register_key_type(struct key_type *ktype)
923
{
924
        struct key_type *p;
925
        int ret;
926
 
927
        ret = -EEXIST;
928
        down_write(&key_types_sem);
929
 
930
        /* disallow key types with the same name */
931
        list_for_each_entry(p, &key_types_list, link) {
932
                if (strcmp(p->name, ktype->name) == 0)
933
                        goto out;
934
        }
935
 
936
        /* store the type */
937
        list_add(&ktype->link, &key_types_list);
938
        ret = 0;
939
 
940
 out:
941
        up_write(&key_types_sem);
942
        return ret;
943
 
944
} /* end register_key_type() */
945
 
946
EXPORT_SYMBOL(register_key_type);
947
 
948
/*****************************************************************************/
949
/*
950
 * unregister a type of key
951
 */
952
void unregister_key_type(struct key_type *ktype)
953
{
954
        struct rb_node *_n;
955
        struct key *key;
956
 
957
        down_write(&key_types_sem);
958
 
959
        /* withdraw the key type */
960
        list_del_init(&ktype->link);
961
 
962
        /* mark all the keys of this type dead */
963
        spin_lock(&key_serial_lock);
964
 
965
        for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
966
                key = rb_entry(_n, struct key, serial_node);
967
 
968
                if (key->type == ktype)
969
                        key->type = &key_type_dead;
970
        }
971
 
972
        spin_unlock(&key_serial_lock);
973
 
974
        /* make sure everyone revalidates their keys */
975
        synchronize_rcu();
976
 
977
        /* we should now be able to destroy the payloads of all the keys of
978
         * this type with impunity */
979
        spin_lock(&key_serial_lock);
980
 
981
        for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
982
                key = rb_entry(_n, struct key, serial_node);
983
 
984
                if (key->type == ktype) {
985
                        if (ktype->destroy)
986
                                ktype->destroy(key);
987
                        memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
988
                }
989
        }
990
 
991
        spin_unlock(&key_serial_lock);
992
        up_write(&key_types_sem);
993
 
994
} /* end unregister_key_type() */
995
 
996
EXPORT_SYMBOL(unregister_key_type);
997
 
998
/*****************************************************************************/
999
/*
1000
 * initialise the key management stuff
1001
 */
1002
void __init key_init(void)
1003
{
1004
        /* allocate a slab in which we can store keys */
1005
        key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1006
                        0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1007
 
1008
        /* add the special key types */
1009
        list_add_tail(&key_type_keyring.link, &key_types_list);
1010
        list_add_tail(&key_type_dead.link, &key_types_list);
1011
        list_add_tail(&key_type_user.link, &key_types_list);
1012
 
1013
        /* record the root user tracking */
1014
        rb_link_node(&root_key_user.node,
1015
                     NULL,
1016
                     &key_user_tree.rb_node);
1017
 
1018
        rb_insert_color(&root_key_user.node,
1019
                        &key_user_tree);
1020
 
1021
        /* record root's user standard keyrings */
1022
        key_check(&root_user_keyring);
1023
        key_check(&root_session_keyring);
1024
 
1025
        __key_insert_serial(&root_user_keyring);
1026
        __key_insert_serial(&root_session_keyring);
1027
 
1028
        keyring_publish_name(&root_user_keyring);
1029
        keyring_publish_name(&root_session_keyring);
1030
 
1031
        /* link the two root keyrings together */
1032
        key_link(&root_session_keyring, &root_user_keyring);
1033
 
1034
} /* end key_init() */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.