OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [api/] [cap.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Capability manipulation syscall.
3
 *
4
 * The entry to Codezero security
5
 * mechanisms.
6
 *
7
 * Copyright (C) 2009 Bahadir Balban
8
 */
9
#include <l4/api/capability.h>
10
#include <l4/generic/capability.h>
11
#include <l4/generic/cap-types.h>
12
#include <l4/generic/container.h>
13
#include <l4/generic/tcb.h>
14
#include <l4/api/errno.h>
15
#include INC_API(syscall.h)
16
 
17
/*
18
 * Read all capabilitites of the current process.
19
 * This includes the private ones as well as
20
 * ones shared by other tasks that the task has
21
 * rights to but doesn't own.
22
 */
23
int cap_read_all(struct capability *caparray)
24
{
25
        struct capability *cap;
26
        int capidx = 0;
27
 
28
        /* Copy all capabilities from lists to buffer */
29
        list_foreach_struct(cap, &current->cap_list.caps, list) {
30
                memcpy(&caparray[capidx], cap, sizeof(*cap));
31
                capidx++;
32
        }
33
 
34
        list_foreach_struct(cap, &current->space->cap_list.caps, list) {
35
                memcpy(&caparray[capidx], cap, sizeof(*cap));
36
                capidx++;
37
        }
38
 
39
        list_foreach_struct(cap, &curcont->cap_list.caps, list) {
40
                memcpy(&caparray[capidx], cap, sizeof(*cap));
41
                capidx++;
42
        }
43
 
44
        return 0;
45
}
46
 
47
/*
48
 * Shares single cap. If you are sharing, there is
49
 * only one target that makes sense, that is your
50
 * own container.
51
 */
52
int cap_share_single(struct capability *user)
53
{
54
        struct capability *cap;
55
        struct cap_list *clist;
56
 
57
        if (!(cap = cap_find_by_capid(user->capid, &clist)))
58
                return -EEXIST;
59
 
60
        if (cap->owner != current->tid)
61
                return -EPERM;
62
 
63
        /* First remove it from its list */
64
        cap_list_remove(cap, clist);
65
 
66
        /* Place it where it is shared */
67
        cap_list_insert(cap, &curcont->cap_list);
68
 
69
        return 0;
70
}
71
 
72
/*
73
 * Shares the whole capability list.
74
 *
75
 * FIXME: Make sure each and every capability has its
76
 * share right set!
77
 */
78
int cap_share_all(unsigned int flags)
79
{
80
        if (flags == CAP_SHARE_ALL_CONTAINER) {
81
 
82
                /* Move all private caps to container */
83
                cap_list_move(&curcont->cap_list,
84
                              &current->cap_list);
85
 
86
                /*
87
                 * Move all space caps to container, also.
88
                 *
89
                 * FIXME: Make sure all space capabilities
90
                 * are owned by the sharer!!!
91
                 */
92
                cap_list_move(&curcont->cap_list,
93
                              &current->space->cap_list);
94
        } else if (flags == CAP_SHARE_ALL_SPACE) {
95
 
96
                /* Move all private caps to space */
97
                cap_list_move(&current->space->cap_list,
98
                              &current->cap_list);
99
        }
100
        return 0;
101
}
102
 
103
int cap_share(struct capability *cap, unsigned int flags)
104
{
105
        if (flags == CAP_SHARE_SINGLE)
106
                return cap_share_single(cap);
107
        else
108
                return cap_share_all(flags);
109
}
110
 
111
#if 0
112
 
113
/*
114
 * Currently unused. API hasn't settled.
115
 */
116
/* Grants all caps */
117
int cap_grant_all(struct capability *req, unsigned int flags)
118
{
119
        struct ktcb *target;
120
        struct capability *cap_head, *cap;
121
        int err;
122
 
123
        /* Owners are always threads, for simplicity */
124
        if (!(target = tcb_find(req->owner)))
125
                return -ESRCH;
126
 
127
        /* Detach all caps */
128
        cap_head = cap_list_detach(&current->space->cap_list);
129
 
130
        list_foreach_struct(cap, &cap_head->list, list) {
131
                /* Change ownership */
132
                cap->owner = target->tid;
133
                BUG_ON(target->tid != req->owner);
134
 
135
                /* Make immutable if GRANT_IMMUTABLE given */
136
                if (flags & CAP_GRANT_IMMUTABLE) {
137
                        cap->access &= ~CAP_GENERIC_MASK;
138
                        cap->access |= CAP_IMMUTABLE;
139
                }
140
 
141
                /*
142
                 * Sanity check: granted cap cannot have used
143
                 * quantity. Otherwise how else the original
144
                 * users of the cap free them?
145
                 */
146
                if (cap->used) {
147
                        err = -EPERM;
148
                        goto out_err;
149
                }
150
        }
151
 
152
        /* Attach all to target */
153
        cap_list_attach(cap_head, &target->space->cap_list);
154
        return 0;
155
 
156
out_err:
157
        /* Attach it back to original */
158
        cap_list_attach(cap_head, &current->space->cap_list);
159
        return err;
160
}
161
 
162
#endif
163
 
164
int cap_grant_single(struct capability *req, unsigned int flags)
165
{
166
        struct capability *cap;
167
        struct cap_list *clist;
168
        struct ktcb *target;
169
 
170
        if (!(cap = cap_find_by_capid(req->capid, &clist)))
171
                return -EEXIST;
172
 
173
        if (!(target = tcb_find(req->owner)))
174
                return -ESRCH;
175
 
176
        if (cap->owner != current->tid)
177
                return -EPERM;
178
 
179
        /* Granted cap cannot have used quantity */
180
        if (cap->used)
181
                return -EPERM;
182
 
183
        /* First remove it from its list */
184
        cap_list_remove(cap, clist);
185
 
186
        /* Change ownership */
187
        cap->owner = target->tid;
188
        BUG_ON(cap->owner != req->owner);
189
 
190
        /* Make immutable if GRANT_IMMUTABLE given */
191
        if (flags & CAP_GRANT_IMMUTABLE) {
192
                cap->access &= ~CAP_GENERIC_MASK;
193
                cap->access |= CAP_IMMUTABLE;
194
        }
195
 
196
        /* Place it where it is granted */
197
        cap_list_insert(cap, &target->space->cap_list);
198
 
199
        return 0;
200
}
201
 
202
int cap_grant(struct capability *cap, unsigned int flags)
203
{
204
        if (flags & CAP_GRANT_SINGLE)
205
                cap_grant_single(cap, flags);
206
        else
207
                return -EINVAL;
208
        return 0;
209
}
210
 
211
int cap_deduce_rtype(struct capability *orig, struct capability *new)
212
{
213
        struct ktcb *target;
214
        struct address_space *sp;
215
 
216
        /* An rtype deduction can only be to a space or thread */
217
        switch (cap_rtype(new)) {
218
        case CAP_RTYPE_SPACE:
219
                /* Check containment right */
220
                if (cap_rtype(orig) != CAP_RTYPE_CONTAINER)
221
                        return -ENOCAP;
222
 
223
                /*
224
                 * Find out if this space exists in this
225
                 * container.
226
                 *
227
                 * Note address space search is local only.
228
                 * Only thread searches are global.
229
                 */
230
                if (!(sp = address_space_find(new->resid)))
231
                        return -ENOCAP;
232
 
233
                /* Success. Assign new type to original cap */
234
                cap_set_rtype(orig, cap_rtype(new));
235
 
236
                /* Assign the space id to orig cap */
237
                orig->resid = sp->spid;
238
                break;
239
        case CAP_RTYPE_THREAD:
240
                /* Find the thread */
241
                if (!(target = tcb_find(new->resid)))
242
                        return -ENOCAP;
243
 
244
                /* Check containment */
245
                if (cap_rtype(orig) == CAP_RTYPE_SPACE) {
246
                        if (orig->resid != target->space->spid)
247
                                return -ENOCAP;
248
                } else if (cap_rtype(orig) == CAP_RTYPE_CONTAINER) {
249
                        if(orig->resid != target->container->cid)
250
                                return -ENOCAP;
251
                } else
252
                        return -ENOCAP;
253
 
254
                /* Success. Assign new type to original cap */
255
                cap_set_rtype(orig, cap_rtype(new));
256
 
257
                /* Assign the space id to orig cap */
258
                orig->resid = target->tid;
259
                break;
260
        default:
261
                return -ENOCAP;
262
        }
263
        return 0;
264
}
265
 
266
/*
267
 * Deduction can be by access permissions, start, end, size
268
 * fields, or the target resource type. Inter-container
269
 * deduction is not allowed.
270
 *
271
 * Target resource deduction denotes reducing the applicable
272
 * space of the target, e.g. from a container to a space in
273
 * that container.
274
 *
275
 * NOTE: If there is no target deduction, you cannot change
276
 * resid, as this is forbidden.
277
 *
278
 * Imagine a space cap, it cannot be deduced to become applicable
279
 * to another space, i.e a space is in same privilege level.
280
 * But a container-wide cap can be reduced to be applied on
281
 * a space in that container (thus changing the resid to that
282
 * space's id)
283
 *
284
 * capid: Id of original capability
285
 * new: Userspace pointer to new state of capability
286
 * that is desired.
287
 *
288
 * orig = deduced;
289
 */
290
int cap_deduce(struct capability *new)
291
{
292
        struct capability *orig;
293
        struct cap_list *clist;
294
        int ret;
295
 
296
        /* Find original capability */
297
        if (!(orig = cap_find_by_capid(new->capid, &clist)))
298
                return -EEXIST;
299
 
300
        /* Check that caller is owner */
301
        if (orig->owner != current->tid)
302
                return -ENOCAP;
303
 
304
        /* Check that it is deducable */
305
        if (!(orig->access & CAP_CHANGEABLE))
306
                return -ENOCAP;
307
 
308
        /* Check target resource deduction */
309
        if (cap_rtype(new) != cap_rtype(orig))
310
                if ((ret = cap_deduce_rtype(orig, new)) < 0)
311
                        return ret;
312
 
313
        /* Check owners are same for request validity */
314
        if (orig->owner != new->owner)
315
                return -EINVAL;
316
 
317
        /* Check permissions for deduction */
318
        if (orig->access) {
319
                /* New cannot have more bits than original */
320
                if ((orig->access & new->access) != new->access)
321
                        return -EINVAL;
322
                /* New cannot make original redundant */
323
                if (new->access == 0)
324
                        return -EINVAL;
325
 
326
                /* Deduce bits of orig */
327
                orig->access &= new->access;
328
        } else if (new->access)
329
                return -EINVAL;
330
 
331
        /* Check size for deduction */
332
        if (orig->size) {
333
                /* New can't have more, or make original redundant */
334
                if (new->size >= orig->size)
335
                        return -EINVAL;
336
 
337
                /*
338
                 * Can't make reduction on used ones, so there
339
                 * must be enough available ones
340
                 */
341
                if (new->size < orig->used)
342
                        return -EPERM;
343
                orig->size = new->size;
344
        } else if (new->size)
345
                return -EINVAL;
346
 
347
        /* Range-like permissions can't be deduced */
348
        if (orig->start || orig->end) {
349
                if (orig->start != new->start ||
350
                    orig->end != new->end)
351
                        return -EPERM;
352
        } else if (new->start || new->end)
353
                return -EINVAL;
354
 
355
        /* Ensure orig and new are the same */
356
        BUG_ON(orig->capid != new->capid);
357
        BUG_ON(orig->resid != new->resid);
358
        BUG_ON(orig->owner != new->owner);
359
        BUG_ON(orig->type != new->type);
360
        BUG_ON(orig->access != new->access);
361
        BUG_ON(orig->start != new->start);
362
        BUG_ON(orig->end != new->end);
363
        BUG_ON(orig->size != new->size);
364
        BUG_ON(orig->used != new->used);
365
 
366
        return 0;
367
}
368
 
369
/*
370
 * Destroys a capability
371
 */
372
int cap_destroy(struct capability *cap)
373
{
374
        struct capability *orig;
375
        struct cap_list *clist;
376
 
377
        /* Find original capability */
378
        if (!(orig = cap_find_by_capid(cap->capid, &clist)))
379
                return -EEXIST;
380
 
381
        /* Check that caller is owner */
382
        if (orig->owner != current->tid)
383
                return -ENOCAP;
384
 
385
        /* Check that it is destroyable */
386
        if (!(cap_generic_perms(orig) & CAP_CHANGEABLE))
387
                return -ENOCAP;
388
 
389
        /*
390
         * Check that it is not a device.
391
         *
392
         * We don't allow devices for now. To do this
393
         * correctly, we need to check if device irq
394
         * is not currently registered.
395
         */
396
        if (cap_is_devmem(orig))
397
                return -ENOCAP;
398
 
399
        cap_list_remove(orig, clist);
400
        free_capability(orig);
401
        return 0;
402
}
403
 
404
static inline int cap_has_size(struct capability *c)
405
{
406
        return c->size;
407
}
408
 
409
static inline int cap_has_range(struct capability *c)
410
{
411
        return c->start && c->end;
412
}
413
 
414
/*
415
 * Splits a capability
416
 *
417
 * Pools of typed memory objects can't be replicated, and
418
 * deduced that way, as replication would temporarily double
419
 * their size. So they are split in place.
420
 *
421
 * Splitting occurs by diff'ing resources possessed between
422
 * capabilities.
423
 *
424
 * capid: Original capability that is valid.
425
 * diff: New capability that we want to split out.
426
 *
427
 * orig = orig - diff;
428
 * new = diff;
429
 */
430
int cap_split(struct capability *diff, unsigned int flags)
431
{
432
        struct capability *orig, *new;
433
        struct cap_list *clist;
434
        int ret;
435
 
436
        /* Find original capability */
437
        if (!(orig = cap_find_by_capid(diff->capid, &clist)))
438
                return -EEXIST;
439
 
440
        /* Check target type/resid/owner is the same */
441
        if (orig->type != diff->type ||
442
            orig->resid != diff->resid ||
443
            orig->owner != diff->owner)
444
                return -EINVAL;
445
 
446
        /* Check that caller is owner */
447
        if (orig->owner != current->tid)
448
                return -ENOCAP;
449
 
450
        /* Check owners are same */
451
        if (orig->owner != diff->owner)
452
                return -EINVAL;
453
 
454
        /* Check that it is splitable */
455
        if (!(orig->access & CAP_CHANGEABLE))
456
                return -ENOCAP;
457
 
458
        /* Create new */
459
        if (!(new = capability_create()))
460
                return -ENOCAP;
461
 
462
        /* Check access bits usage and split */
463
        if (flags & CAP_SPLIT_ACCESS) {
464
                /* Access bits must never be redundant */
465
                BUG_ON(!orig->access);
466
 
467
                /* Split one can't have more bits than original */
468
                if ((orig->access & diff->access) != diff->access) {
469
                        ret = -EINVAL;
470
                        goto out_err;
471
                }
472
 
473
                /* Split one cannot make original redundant */
474
                if ((orig->access & ~diff->access) == 0) {
475
                        ret = -EINVAL;
476
                        goto out_err;
477
                }
478
 
479
                /* Split one cannot be redundant itself */
480
                if (!diff->access) {
481
                        ret = -EINVAL;
482
                        goto out_err;
483
                }
484
 
485
                /* Subtract given access permissions */
486
                orig->access &= ~diff->access;
487
 
488
                /* Assign given perms to new capability */
489
                new->access = diff->access;
490
        } else {
491
                /* Can't split only by access bits alone */
492
                if (!cap_has_size(orig) &&
493
                    !cap_has_range(orig)) {
494
                        ret = -EINVAL;
495
                        goto out_err;
496
                }
497
                /* If no split, then they are identical */
498
                new->access = orig->access;
499
 
500
                /* Diff must also reflect orig by convention */
501
                if (diff->access != orig->access) {
502
                        ret = -EINVAL;
503
                        goto out_err;
504
                }
505
        }
506
 
507
        /* If cap has size, split by size is compulsory */
508
        if (cap_type(orig) == CAP_TYPE_QUANTITY) {
509
                BUG_ON(!cap_has_size(orig));
510
 
511
                /*
512
                 * Split one can't have more,
513
                 * or make original redundant
514
                 */
515
                if (diff->size >= orig->size) {
516
                        ret = -EINVAL;
517
                        goto out_err;
518
                }
519
 
520
                /* Split one can't be redundant itself */
521
                if (!diff->size) {
522
                        ret = -EINVAL;
523
                        goto out_err;
524
                }
525
 
526
                /* Split one must be clean i.e. all unused */
527
                if (orig->size - orig->used < diff->size) {
528
                        ret = -EPERM;
529
                        goto out_err;
530
                }
531
 
532
                orig->size -= diff->size;
533
                new->size = diff->size;
534
                new->used = 0;
535
        } else {
536
 
537
                /* Diff must also reflect orig by convention */
538
                if (diff->size != orig->size) {
539
                        ret = -EINVAL;
540
                        goto out_err;
541
                }
542
 
543
                /* If no split, then they are identical */
544
                new->size = orig->size;
545
                new->used = orig->used;
546
 
547
        }
548
 
549
        if (flags & CAP_SPLIT_RANGE) {
550
                /* They must either be both one or both zero */
551
                BUG_ON(!!orig->start ^ !!orig->end);
552
 
553
                /* If orig doesn't have a range, return invalid */
554
                if (!orig->start && !orig->end) {
555
                        ret = -EINVAL;
556
                        goto out_err;
557
                } else {
558
                        /* Orig has a range but diff doesn't */
559
                        if (!diff->start || !diff->end) {
560
                                ret = -EINVAL;
561
                                goto out_err;
562
                        }
563
                        /* Both valid, but we don't permit range split */
564
                        ret = -EPERM;
565
                        goto out_err;
566
                }
567
        /* If no split, then they are identical */
568
        } else {
569
                new->start = orig->start;
570
                new->end = orig->end;
571
        }
572
 
573
        /* Copy other fields */
574
        new->type = orig->type;
575
        new->resid = orig->resid;
576
        new->owner = orig->owner;
577
 
578
        /* Add the new capability to the most private list */
579
        cap_list_insert(new, &current->space->cap_list);
580
 
581
        /* Check fields that must be identical */
582
        BUG_ON(new->resid != diff->resid);
583
        BUG_ON(new->owner != diff->owner);
584
        BUG_ON(new->type != diff->type);
585
        BUG_ON(new->access != diff->access);
586
        BUG_ON(new->start != diff->start);
587
        BUG_ON(new->end != diff->end);
588
        BUG_ON(new->size != diff->size);
589
 
590
        /* Copy capid, and used field that may not be the same */
591
        diff->capid = new->capid;
592
        diff->used = new->used;
593
        return 0;
594
 
595
out_err:
596
        free_capability(new);
597
        return ret;
598
}
599
 
600
/*
601
 * Replicates an existing capability. This is for expanding
602
 * capabilities to managed children.
603
 *
604
 * After replication, a duplicate capability exists in the
605
 * system, but as it is not a quantity, this does not increase
606
 * the capabilities of the caller in any way.
607
 */
608
int cap_replicate(struct capability *dupl)
609
{
610
        struct capability *new, *orig;
611
        struct cap_list *clist;
612
 
613
        /* Find original capability */
614
        if (!(orig = cap_find_by_capid(dupl->capid, &clist)))
615
                return -EEXIST;
616
 
617
        /* Check that caller is owner */
618
        if (orig->owner != current->tid)
619
                return -ENOCAP;
620
 
621
        /* Check that it is replicable */
622
        if (!(orig->access & CAP_REPLICABLE))
623
                return -ENOCAP;
624
 
625
         /* Quantitative types must not be replicable */
626
        if (cap_type(orig) == CAP_TYPE_QUANTITY) {
627
                printk("Cont %d: FATAL: Capability (%d) "
628
                       "is quantitative but also replicable\n",
629
                       curcont->cid, orig->capid);
630
                /* FIXME: Should rule this out as a CML2 requirement */
631
                BUG();
632
        }
633
 
634
        /* Replicate it */
635
        if (!(new = capability_create()))
636
                return -ENOCAP;
637
 
638
        /* Copy all except capid & listptrs */
639
        dupl->resid = new->resid = orig->resid;
640
        dupl->owner = new->owner = orig->owner;
641
        dupl->type = new->type = orig->type;
642
        dupl->access = new->access = orig->access;
643
        dupl->start = new->start = orig->start;
644
        dupl->end = new->end = orig->end;
645
        dupl->size = new->size = orig->size;
646
        dupl->used = new->used = orig->used;
647
 
648
        /* Copy new fields */
649
        dupl->capid = new->capid;
650
 
651
        /* Add it to most private list */
652
        cap_list_insert(new, &current->space->cap_list);
653
 
654
        return 0;
655
}
656
 
657
/*
658
 * Read, manipulate capabilities.
659
 */
660
int sys_capability_control(unsigned int req, unsigned int flags, void *userbuf)
661
{
662
        int err = 0;
663
 
664
        /*
665
         * Check capability to do a capability operation.
666
         * Supported only on current's caps for time being.
667
         */
668
        if ((err = cap_cap_check(current, req, flags)) < 0)
669
                return err;
670
 
671
        /* Check access for each request */
672
        switch(req) {
673
        case CAP_CONTROL_NCAPS:
674
                if ((err = check_access((unsigned long)userbuf,
675
                                        sizeof(int),
676
                                        MAP_USR_RW, 1)) < 0)
677
                        return err;
678
                break;
679
        case CAP_CONTROL_READ:
680
                if ((err = check_access((unsigned long)userbuf,
681
                                        cap_count(current) *
682
                                        sizeof(struct capability),
683
                                        MAP_USR_RW, 1)) < 0)
684
                        return err;
685
                break;
686
        case CAP_CONTROL_SHARE:
687
                if (flags == CAP_SHARE_ALL_CONTAINER ||
688
                    flags == CAP_SHARE_ALL_SPACE)
689
                        break;
690
        case CAP_CONTROL_GRANT:
691
        case CAP_CONTROL_SPLIT:
692
        case CAP_CONTROL_REPLICATE:
693
        case CAP_CONTROL_DEDUCE:
694
        case CAP_CONTROL_DESTROY:
695
                if ((err = check_access((unsigned long)userbuf,
696
                                        sizeof(struct capability),
697
                                        MAP_USR_RW, 1)) < 0)
698
                        return err;
699
                break;
700
        default:
701
                return -EINVAL;
702
        }
703
 
704
        /* Take action for each request */
705
        switch(req) {
706
        case CAP_CONTROL_NCAPS:
707
                *((int *)userbuf) = cap_count(current);
708
                break;
709
        case CAP_CONTROL_READ:
710
                err = cap_read_all((struct capability *)userbuf);
711
                break;
712
        case CAP_CONTROL_SHARE:
713
                err = cap_share((struct capability *)userbuf, flags);
714
                break;
715
        case CAP_CONTROL_GRANT:
716
                err = cap_grant((struct capability *)userbuf, flags);
717
                break;
718
        case CAP_CONTROL_SPLIT:
719
                err = cap_split((struct capability *)userbuf, flags);
720
                break;
721
        case CAP_CONTROL_REPLICATE:
722
                err = cap_replicate((struct capability *)userbuf);
723
                break;
724
        case CAP_CONTROL_DEDUCE:
725
                err = cap_deduce((struct capability *)userbuf);
726
                break;
727
        case CAP_CONTROL_DESTROY:
728
                err = cap_destroy((struct capability *)userbuf);
729
                break;
730
        default:
731
                return -EINVAL;
732
        }
733
 
734
        return err;
735
}
736
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.