OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [kernel/] [user.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * The "user cache".
3
 *
4
 * (C) Copyright 1991-2000 Linus Torvalds
5
 *
6
 * We have a per-user structure to keep track of how many
7
 * processes, files etc the user has claimed, in order to be
8
 * able to have per-user limits for system resources.
9
 */
10
 
11
#include <linux/init.h>
12
#include <linux/sched.h>
13
#include <linux/slab.h>
14
 
15
/*
16
 * UID task count cache, to get fast user lookup in "alloc_uid"
17
 * when changing user ID's (ie setuid() and friends).
18
 */
19
#define UIDHASH_BITS            8
20
#define UIDHASH_SZ              (1 << UIDHASH_BITS)
21
#define UIDHASH_MASK            (UIDHASH_SZ - 1)
22
#define __uidhashfn(uid)        (((uid >> UIDHASH_BITS) ^ uid) & UIDHASH_MASK)
23
#define uidhashentry(uid)       (uidhash_table + __uidhashfn(uid))
24
 
25
static kmem_cache_t *uid_cachep;
26
static struct user_struct *uidhash_table[UIDHASH_SZ];
27
static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
28
 
29
struct user_struct root_user = {
30
        __count:        ATOMIC_INIT(1),
31
        processes:      ATOMIC_INIT(1),
32
        files:          ATOMIC_INIT(0)
33
};
34
 
35
/*
36
 * These routines must be called with the uidhash spinlock held!
37
 */
38
static inline void uid_hash_insert(struct user_struct *up, struct user_struct **hashent)
39
{
40
        struct user_struct *next = *hashent;
41
 
42
        up->next = next;
43
        if (next)
44
                next->pprev = &up->next;
45
        up->pprev = hashent;
46
        *hashent = up;
47
}
48
 
49
static inline void uid_hash_remove(struct user_struct *up)
50
{
51
        struct user_struct *next = up->next;
52
        struct user_struct **pprev = up->pprev;
53
 
54
        if (next)
55
                next->pprev = pprev;
56
        *pprev = next;
57
}
58
 
59
static inline struct user_struct *uid_hash_find(uid_t uid, struct user_struct **hashent)
60
{
61
        struct user_struct *next;
62
 
63
        next = *hashent;
64
        for (;;) {
65
                struct user_struct *up = next;
66
                if (next) {
67
                        next = up->next;
68
                        if (up->uid != uid)
69
                                continue;
70
                        atomic_inc(&up->__count);
71
                }
72
                return up;
73
        }
74
}
75
 
76
void free_uid(struct user_struct *up)
77
{
78
        if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
79
                uid_hash_remove(up);
80
                kmem_cache_free(uid_cachep, up);
81
                spin_unlock(&uidhash_lock);
82
        }
83
}
84
 
85
struct user_struct * alloc_uid(uid_t uid)
86
{
87
        struct user_struct **hashent = uidhashentry(uid);
88
        struct user_struct *up;
89
 
90
        spin_lock(&uidhash_lock);
91
        up = uid_hash_find(uid, hashent);
92
        spin_unlock(&uidhash_lock);
93
 
94
        if (!up) {
95
                struct user_struct *new;
96
 
97
                new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
98
                if (!new)
99
                        return NULL;
100
                new->uid = uid;
101
                atomic_set(&new->__count, 1);
102
                atomic_set(&new->processes, 0);
103
                atomic_set(&new->files, 0);
104
 
105
                /*
106
                 * Before adding this, check whether we raced
107
                 * on adding the same user already..
108
                 */
109
                spin_lock(&uidhash_lock);
110
                up = uid_hash_find(uid, hashent);
111
                if (up) {
112
                        kmem_cache_free(uid_cachep, new);
113
                } else {
114
                        uid_hash_insert(new, hashent);
115
                        up = new;
116
                }
117
                spin_unlock(&uidhash_lock);
118
 
119
        }
120
        return up;
121
}
122
 
123
void switch_uid(struct user_struct *new_user)
124
{
125
 struct user_struct *old_user;
126
 
127
 /* What if a process setreuid()'s and this brings the
128
 * new uid over his NPROC rlimit? We can check this now
129
 * cheaply with the new uid cache, so if it matters
130
 * we should be checking for it. -DaveM
131
 */
132
 old_user = current->user;
133
 atomic_inc(&new_user->__count);
134
 atomic_inc(&new_user->processes);
135
 atomic_dec(&old_user->processes);
136
 current->user = new_user;
137
 free_uid(old_user);
138
}
139
 
140
 
141
static int __init uid_cache_init(void)
142
{
143
        uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
144
                                       0,
145
                                       SLAB_HWCACHE_ALIGN, NULL, NULL);
146
        if(!uid_cachep)
147
                panic("Cannot create uid taskcount SLAB cache\n");
148
 
149
        /* Insert the root user immediately - init already runs with this */
150
        uid_hash_insert(&root_user, uidhashentry(0));
151
        return 0;
152
}
153
 
154
module_init(uid_cache_init);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.