| 1 |
62 |
marcus.erl |
/*
|
| 2 |
|
|
* The "user cache".
|
| 3 |
|
|
*
|
| 4 |
|
|
* (C) Copyright 1991-2000 Linus Torvalds
|
| 5 |
|
|
*
|
| 6 |
|
|
* We have a per-user structure to keep track of how many
|
| 7 |
|
|
* processes, files etc the user has claimed, in order to be
|
| 8 |
|
|
* able to have per-user limits for system resources.
|
| 9 |
|
|
*/
|
| 10 |
|
|
|
| 11 |
|
|
#include <linux/init.h>
|
| 12 |
|
|
#include <linux/sched.h>
|
| 13 |
|
|
#include <linux/slab.h>
|
| 14 |
|
|
#include <linux/bitops.h>
|
| 15 |
|
|
#include <linux/key.h>
|
| 16 |
|
|
#include <linux/interrupt.h>
|
| 17 |
|
|
#include <linux/module.h>
|
| 18 |
|
|
#include <linux/user_namespace.h>
|
| 19 |
|
|
|
| 20 |
|
|
/*
|
| 21 |
|
|
* UID task count cache, to get fast user lookup in "alloc_uid"
|
| 22 |
|
|
* when changing user ID's (ie setuid() and friends).
|
| 23 |
|
|
*/
|
| 24 |
|
|
|
| 25 |
|
|
#define UIDHASH_MASK (UIDHASH_SZ - 1)
|
| 26 |
|
|
#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
|
| 27 |
|
|
#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
|
| 28 |
|
|
|
| 29 |
|
|
static struct kmem_cache *uid_cachep;
|
| 30 |
|
|
|
| 31 |
|
|
/*
|
| 32 |
|
|
* The uidhash_lock is mostly taken from process context, but it is
|
| 33 |
|
|
* occasionally also taken from softirq/tasklet context, when
|
| 34 |
|
|
* task-structs get RCU-freed. Hence all locking must be softirq-safe.
|
| 35 |
|
|
* But free_uid() is also called with local interrupts disabled, and running
|
| 36 |
|
|
* local_bh_enable() with local interrupts disabled is an error - we'll run
|
| 37 |
|
|
* softirq callbacks, and they can unconditionally enable interrupts, and
|
| 38 |
|
|
* the caller of free_uid() didn't expect that..
|
| 39 |
|
|
*/
|
| 40 |
|
|
static DEFINE_SPINLOCK(uidhash_lock);
|
| 41 |
|
|
|
| 42 |
|
|
struct user_struct root_user = {
|
| 43 |
|
|
.__count = ATOMIC_INIT(1),
|
| 44 |
|
|
.processes = ATOMIC_INIT(1),
|
| 45 |
|
|
.files = ATOMIC_INIT(0),
|
| 46 |
|
|
.sigpending = ATOMIC_INIT(0),
|
| 47 |
|
|
.locked_shm = 0,
|
| 48 |
|
|
#ifdef CONFIG_KEYS
|
| 49 |
|
|
.uid_keyring = &root_user_keyring,
|
| 50 |
|
|
.session_keyring = &root_session_keyring,
|
| 51 |
|
|
#endif
|
| 52 |
|
|
#ifdef CONFIG_FAIR_USER_SCHED
|
| 53 |
|
|
.tg = &init_task_group,
|
| 54 |
|
|
#endif
|
| 55 |
|
|
};
|
| 56 |
|
|
|
| 57 |
|
|
/*
|
| 58 |
|
|
* These routines must be called with the uidhash spinlock held!
|
| 59 |
|
|
*/
|
| 60 |
|
|
static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
|
| 61 |
|
|
{
|
| 62 |
|
|
hlist_add_head(&up->uidhash_node, hashent);
|
| 63 |
|
|
}
|
| 64 |
|
|
|
| 65 |
|
|
static void uid_hash_remove(struct user_struct *up)
|
| 66 |
|
|
{
|
| 67 |
|
|
hlist_del_init(&up->uidhash_node);
|
| 68 |
|
|
}
|
| 69 |
|
|
|
| 70 |
|
|
static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
|
| 71 |
|
|
{
|
| 72 |
|
|
struct user_struct *user;
|
| 73 |
|
|
struct hlist_node *h;
|
| 74 |
|
|
|
| 75 |
|
|
hlist_for_each_entry(user, h, hashent, uidhash_node) {
|
| 76 |
|
|
if (user->uid == uid) {
|
| 77 |
|
|
atomic_inc(&user->__count);
|
| 78 |
|
|
return user;
|
| 79 |
|
|
}
|
| 80 |
|
|
}
|
| 81 |
|
|
|
| 82 |
|
|
return NULL;
|
| 83 |
|
|
}
|
| 84 |
|
|
|
| 85 |
|
|
#ifdef CONFIG_FAIR_USER_SCHED
|
| 86 |
|
|
|
| 87 |
|
|
static void sched_destroy_user(struct user_struct *up)
|
| 88 |
|
|
{
|
| 89 |
|
|
sched_destroy_group(up->tg);
|
| 90 |
|
|
}
|
| 91 |
|
|
|
| 92 |
|
|
static int sched_create_user(struct user_struct *up)
|
| 93 |
|
|
{
|
| 94 |
|
|
int rc = 0;
|
| 95 |
|
|
|
| 96 |
|
|
up->tg = sched_create_group();
|
| 97 |
|
|
if (IS_ERR(up->tg))
|
| 98 |
|
|
rc = -ENOMEM;
|
| 99 |
|
|
|
| 100 |
|
|
return rc;
|
| 101 |
|
|
}
|
| 102 |
|
|
|
| 103 |
|
|
static void sched_switch_user(struct task_struct *p)
|
| 104 |
|
|
{
|
| 105 |
|
|
sched_move_task(p);
|
| 106 |
|
|
}
|
| 107 |
|
|
|
| 108 |
|
|
#else /* CONFIG_FAIR_USER_SCHED */
|
| 109 |
|
|
|
| 110 |
|
|
static void sched_destroy_user(struct user_struct *up) { }
|
| 111 |
|
|
static int sched_create_user(struct user_struct *up) { return 0; }
|
| 112 |
|
|
static void sched_switch_user(struct task_struct *p) { }
|
| 113 |
|
|
|
| 114 |
|
|
#endif /* CONFIG_FAIR_USER_SCHED */
|
| 115 |
|
|
|
| 116 |
|
|
#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
|
| 117 |
|
|
|
| 118 |
|
|
static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
|
| 119 |
|
|
static DEFINE_MUTEX(uids_mutex);
|
| 120 |
|
|
|
| 121 |
|
|
static inline void uids_mutex_lock(void)
|
| 122 |
|
|
{
|
| 123 |
|
|
mutex_lock(&uids_mutex);
|
| 124 |
|
|
}
|
| 125 |
|
|
|
| 126 |
|
|
static inline void uids_mutex_unlock(void)
|
| 127 |
|
|
{
|
| 128 |
|
|
mutex_unlock(&uids_mutex);
|
| 129 |
|
|
}
|
| 130 |
|
|
|
| 131 |
|
|
/* return cpu shares held by the user */
|
| 132 |
|
|
static ssize_t cpu_shares_show(struct kset *kset, char *buffer)
|
| 133 |
|
|
{
|
| 134 |
|
|
struct user_struct *up = container_of(kset, struct user_struct, kset);
|
| 135 |
|
|
|
| 136 |
|
|
return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
|
| 137 |
|
|
}
|
| 138 |
|
|
|
| 139 |
|
|
/* modify cpu shares held by the user */
|
| 140 |
|
|
static ssize_t cpu_shares_store(struct kset *kset, const char *buffer,
|
| 141 |
|
|
size_t size)
|
| 142 |
|
|
{
|
| 143 |
|
|
struct user_struct *up = container_of(kset, struct user_struct, kset);
|
| 144 |
|
|
unsigned long shares;
|
| 145 |
|
|
int rc;
|
| 146 |
|
|
|
| 147 |
|
|
sscanf(buffer, "%lu", &shares);
|
| 148 |
|
|
|
| 149 |
|
|
rc = sched_group_set_shares(up->tg, shares);
|
| 150 |
|
|
|
| 151 |
|
|
return (rc ? rc : size);
|
| 152 |
|
|
}
|
| 153 |
|
|
|
| 154 |
|
|
static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
|
| 155 |
|
|
{
|
| 156 |
|
|
sa->attr.name = name;
|
| 157 |
|
|
sa->attr.mode = mode;
|
| 158 |
|
|
sa->show = cpu_shares_show;
|
| 159 |
|
|
sa->store = cpu_shares_store;
|
| 160 |
|
|
}
|
| 161 |
|
|
|
| 162 |
|
|
/* Create "/sys/kernel/uids/<uid>" directory and
|
| 163 |
|
|
* "/sys/kernel/uids/<uid>/cpu_share" file for this user.
|
| 164 |
|
|
*/
|
| 165 |
|
|
static int user_kobject_create(struct user_struct *up)
|
| 166 |
|
|
{
|
| 167 |
|
|
struct kset *kset = &up->kset;
|
| 168 |
|
|
struct kobject *kobj = &kset->kobj;
|
| 169 |
|
|
int error;
|
| 170 |
|
|
|
| 171 |
|
|
memset(kset, 0, sizeof(struct kset));
|
| 172 |
|
|
kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
|
| 173 |
|
|
kobject_set_name(kobj, "%d", up->uid);
|
| 174 |
|
|
kset_init(kset);
|
| 175 |
|
|
user_attr_init(&up->user_attr, "cpu_share", 0644);
|
| 176 |
|
|
|
| 177 |
|
|
error = kobject_add(kobj);
|
| 178 |
|
|
if (error)
|
| 179 |
|
|
goto done;
|
| 180 |
|
|
|
| 181 |
|
|
error = sysfs_create_file(kobj, &up->user_attr.attr);
|
| 182 |
|
|
if (error)
|
| 183 |
|
|
kobject_del(kobj);
|
| 184 |
|
|
|
| 185 |
|
|
kobject_uevent(kobj, KOBJ_ADD);
|
| 186 |
|
|
|
| 187 |
|
|
done:
|
| 188 |
|
|
return error;
|
| 189 |
|
|
}
|
| 190 |
|
|
|
| 191 |
|
|
/* create these in sysfs filesystem:
|
| 192 |
|
|
* "/sys/kernel/uids" directory
|
| 193 |
|
|
* "/sys/kernel/uids/0" directory (for root user)
|
| 194 |
|
|
* "/sys/kernel/uids/0/cpu_share" file (for root user)
|
| 195 |
|
|
*/
|
| 196 |
|
|
int __init uids_kobject_init(void)
|
| 197 |
|
|
{
|
| 198 |
|
|
int error;
|
| 199 |
|
|
|
| 200 |
|
|
/* create under /sys/kernel dir */
|
| 201 |
|
|
uids_kobject.parent = &kernel_subsys.kobj;
|
| 202 |
|
|
uids_kobject.kset = &kernel_subsys;
|
| 203 |
|
|
kobject_set_name(&uids_kobject, "uids");
|
| 204 |
|
|
kobject_init(&uids_kobject);
|
| 205 |
|
|
|
| 206 |
|
|
error = kobject_add(&uids_kobject);
|
| 207 |
|
|
if (!error)
|
| 208 |
|
|
error = user_kobject_create(&root_user);
|
| 209 |
|
|
|
| 210 |
|
|
return error;
|
| 211 |
|
|
}
|
| 212 |
|
|
|
| 213 |
|
|
/* work function to remove sysfs directory for a user and free up
|
| 214 |
|
|
* corresponding structures.
|
| 215 |
|
|
*/
|
| 216 |
|
|
static void remove_user_sysfs_dir(struct work_struct *w)
|
| 217 |
|
|
{
|
| 218 |
|
|
struct user_struct *up = container_of(w, struct user_struct, work);
|
| 219 |
|
|
struct kobject *kobj = &up->kset.kobj;
|
| 220 |
|
|
unsigned long flags;
|
| 221 |
|
|
int remove_user = 0;
|
| 222 |
|
|
|
| 223 |
|
|
/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
|
| 224 |
|
|
* atomic.
|
| 225 |
|
|
*/
|
| 226 |
|
|
uids_mutex_lock();
|
| 227 |
|
|
|
| 228 |
|
|
local_irq_save(flags);
|
| 229 |
|
|
|
| 230 |
|
|
if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
|
| 231 |
|
|
uid_hash_remove(up);
|
| 232 |
|
|
remove_user = 1;
|
| 233 |
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
| 234 |
|
|
} else {
|
| 235 |
|
|
local_irq_restore(flags);
|
| 236 |
|
|
}
|
| 237 |
|
|
|
| 238 |
|
|
if (!remove_user)
|
| 239 |
|
|
goto done;
|
| 240 |
|
|
|
| 241 |
|
|
sysfs_remove_file(kobj, &up->user_attr.attr);
|
| 242 |
|
|
kobject_uevent(kobj, KOBJ_REMOVE);
|
| 243 |
|
|
kobject_del(kobj);
|
| 244 |
|
|
|
| 245 |
|
|
sched_destroy_user(up);
|
| 246 |
|
|
key_put(up->uid_keyring);
|
| 247 |
|
|
key_put(up->session_keyring);
|
| 248 |
|
|
kmem_cache_free(uid_cachep, up);
|
| 249 |
|
|
|
| 250 |
|
|
done:
|
| 251 |
|
|
uids_mutex_unlock();
|
| 252 |
|
|
}
|
| 253 |
|
|
|
| 254 |
|
|
/* IRQs are disabled and uidhash_lock is held upon function entry.
|
| 255 |
|
|
* IRQ state (as stored in flags) is restored and uidhash_lock released
|
| 256 |
|
|
* upon function exit.
|
| 257 |
|
|
*/
|
| 258 |
|
|
static inline void free_user(struct user_struct *up, unsigned long flags)
|
| 259 |
|
|
{
|
| 260 |
|
|
/* restore back the count */
|
| 261 |
|
|
atomic_inc(&up->__count);
|
| 262 |
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
| 263 |
|
|
|
| 264 |
|
|
INIT_WORK(&up->work, remove_user_sysfs_dir);
|
| 265 |
|
|
schedule_work(&up->work);
|
| 266 |
|
|
}
|
| 267 |
|
|
|
| 268 |
|
|
#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
|
| 269 |
|
|
|
| 270 |
|
|
static inline int user_kobject_create(struct user_struct *up) { return 0; }
|
| 271 |
|
|
static inline void uids_mutex_lock(void) { }
|
| 272 |
|
|
static inline void uids_mutex_unlock(void) { }
|
| 273 |
|
|
|
| 274 |
|
|
/* IRQs are disabled and uidhash_lock is held upon function entry.
|
| 275 |
|
|
* IRQ state (as stored in flags) is restored and uidhash_lock released
|
| 276 |
|
|
* upon function exit.
|
| 277 |
|
|
*/
|
| 278 |
|
|
static inline void free_user(struct user_struct *up, unsigned long flags)
|
| 279 |
|
|
{
|
| 280 |
|
|
uid_hash_remove(up);
|
| 281 |
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
| 282 |
|
|
sched_destroy_user(up);
|
| 283 |
|
|
key_put(up->uid_keyring);
|
| 284 |
|
|
key_put(up->session_keyring);
|
| 285 |
|
|
kmem_cache_free(uid_cachep, up);
|
| 286 |
|
|
}
|
| 287 |
|
|
|
| 288 |
|
|
#endif
|
| 289 |
|
|
|
| 290 |
|
|
/*
|
| 291 |
|
|
* Locate the user_struct for the passed UID. If found, take a ref on it. The
|
| 292 |
|
|
* caller must undo that ref with free_uid().
|
| 293 |
|
|
*
|
| 294 |
|
|
* If the user_struct could not be found, return NULL.
|
| 295 |
|
|
*/
|
| 296 |
|
|
struct user_struct *find_user(uid_t uid)
|
| 297 |
|
|
{
|
| 298 |
|
|
struct user_struct *ret;
|
| 299 |
|
|
unsigned long flags;
|
| 300 |
|
|
struct user_namespace *ns = current->nsproxy->user_ns;
|
| 301 |
|
|
|
| 302 |
|
|
spin_lock_irqsave(&uidhash_lock, flags);
|
| 303 |
|
|
ret = uid_hash_find(uid, uidhashentry(ns, uid));
|
| 304 |
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
| 305 |
|
|
return ret;
|
| 306 |
|
|
}
|
| 307 |
|
|
|
| 308 |
|
|
void free_uid(struct user_struct *up)
|
| 309 |
|
|
{
|
| 310 |
|
|
unsigned long flags;
|
| 311 |
|
|
|
| 312 |
|
|
if (!up)
|
| 313 |
|
|
return;
|
| 314 |
|
|
|
| 315 |
|
|
local_irq_save(flags);
|
| 316 |
|
|
if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
|
| 317 |
|
|
free_user(up, flags);
|
| 318 |
|
|
else
|
| 319 |
|
|
local_irq_restore(flags);
|
| 320 |
|
|
}
|
| 321 |
|
|
|
| 322 |
|
|
struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
|
| 323 |
|
|
{
|
| 324 |
|
|
struct hlist_head *hashent = uidhashentry(ns, uid);
|
| 325 |
|
|
struct user_struct *up;
|
| 326 |
|
|
|
| 327 |
|
|
/* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
|
| 328 |
|
|
* atomic.
|
| 329 |
|
|
*/
|
| 330 |
|
|
uids_mutex_lock();
|
| 331 |
|
|
|
| 332 |
|
|
spin_lock_irq(&uidhash_lock);
|
| 333 |
|
|
up = uid_hash_find(uid, hashent);
|
| 334 |
|
|
spin_unlock_irq(&uidhash_lock);
|
| 335 |
|
|
|
| 336 |
|
|
if (!up) {
|
| 337 |
|
|
struct user_struct *new;
|
| 338 |
|
|
|
| 339 |
|
|
new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
|
| 340 |
|
|
if (!new) {
|
| 341 |
|
|
uids_mutex_unlock();
|
| 342 |
|
|
return NULL;
|
| 343 |
|
|
}
|
| 344 |
|
|
|
| 345 |
|
|
new->uid = uid;
|
| 346 |
|
|
atomic_set(&new->__count, 1);
|
| 347 |
|
|
atomic_set(&new->processes, 0);
|
| 348 |
|
|
atomic_set(&new->files, 0);
|
| 349 |
|
|
atomic_set(&new->sigpending, 0);
|
| 350 |
|
|
#ifdef CONFIG_INOTIFY_USER
|
| 351 |
|
|
atomic_set(&new->inotify_watches, 0);
|
| 352 |
|
|
atomic_set(&new->inotify_devs, 0);
|
| 353 |
|
|
#endif
|
| 354 |
|
|
#ifdef CONFIG_POSIX_MQUEUE
|
| 355 |
|
|
new->mq_bytes = 0;
|
| 356 |
|
|
#endif
|
| 357 |
|
|
new->locked_shm = 0;
|
| 358 |
|
|
|
| 359 |
|
|
if (alloc_uid_keyring(new, current) < 0) {
|
| 360 |
|
|
kmem_cache_free(uid_cachep, new);
|
| 361 |
|
|
uids_mutex_unlock();
|
| 362 |
|
|
return NULL;
|
| 363 |
|
|
}
|
| 364 |
|
|
|
| 365 |
|
|
if (sched_create_user(new) < 0) {
|
| 366 |
|
|
key_put(new->uid_keyring);
|
| 367 |
|
|
key_put(new->session_keyring);
|
| 368 |
|
|
kmem_cache_free(uid_cachep, new);
|
| 369 |
|
|
uids_mutex_unlock();
|
| 370 |
|
|
return NULL;
|
| 371 |
|
|
}
|
| 372 |
|
|
|
| 373 |
|
|
if (user_kobject_create(new)) {
|
| 374 |
|
|
sched_destroy_user(new);
|
| 375 |
|
|
key_put(new->uid_keyring);
|
| 376 |
|
|
key_put(new->session_keyring);
|
| 377 |
|
|
kmem_cache_free(uid_cachep, new);
|
| 378 |
|
|
uids_mutex_unlock();
|
| 379 |
|
|
return NULL;
|
| 380 |
|
|
}
|
| 381 |
|
|
|
| 382 |
|
|
/*
|
| 383 |
|
|
* Before adding this, check whether we raced
|
| 384 |
|
|
* on adding the same user already..
|
| 385 |
|
|
*/
|
| 386 |
|
|
spin_lock_irq(&uidhash_lock);
|
| 387 |
|
|
up = uid_hash_find(uid, hashent);
|
| 388 |
|
|
if (up) {
|
| 389 |
|
|
/* This case is not possible when CONFIG_FAIR_USER_SCHED
|
| 390 |
|
|
* is defined, since we serialize alloc_uid() using
|
| 391 |
|
|
* uids_mutex. Hence no need to call
|
| 392 |
|
|
* sched_destroy_user() or remove_user_sysfs_dir().
|
| 393 |
|
|
*/
|
| 394 |
|
|
key_put(new->uid_keyring);
|
| 395 |
|
|
key_put(new->session_keyring);
|
| 396 |
|
|
kmem_cache_free(uid_cachep, new);
|
| 397 |
|
|
} else {
|
| 398 |
|
|
uid_hash_insert(new, hashent);
|
| 399 |
|
|
up = new;
|
| 400 |
|
|
}
|
| 401 |
|
|
spin_unlock_irq(&uidhash_lock);
|
| 402 |
|
|
|
| 403 |
|
|
}
|
| 404 |
|
|
|
| 405 |
|
|
uids_mutex_unlock();
|
| 406 |
|
|
|
| 407 |
|
|
return up;
|
| 408 |
|
|
}
|
| 409 |
|
|
|
| 410 |
|
|
void switch_uid(struct user_struct *new_user)
|
| 411 |
|
|
{
|
| 412 |
|
|
struct user_struct *old_user;
|
| 413 |
|
|
|
| 414 |
|
|
/* What if a process setreuid()'s and this brings the
|
| 415 |
|
|
* new uid over his NPROC rlimit? We can check this now
|
| 416 |
|
|
* cheaply with the new uid cache, so if it matters
|
| 417 |
|
|
* we should be checking for it. -DaveM
|
| 418 |
|
|
*/
|
| 419 |
|
|
old_user = current->user;
|
| 420 |
|
|
atomic_inc(&new_user->processes);
|
| 421 |
|
|
atomic_dec(&old_user->processes);
|
| 422 |
|
|
switch_uid_keyring(new_user);
|
| 423 |
|
|
current->user = new_user;
|
| 424 |
|
|
sched_switch_user(current);
|
| 425 |
|
|
|
| 426 |
|
|
/*
|
| 427 |
|
|
* We need to synchronize with __sigqueue_alloc()
|
| 428 |
|
|
* doing a get_uid(p->user).. If that saw the old
|
| 429 |
|
|
* user value, we need to wait until it has exited
|
| 430 |
|
|
* its critical region before we can free the old
|
| 431 |
|
|
* structure.
|
| 432 |
|
|
*/
|
| 433 |
|
|
smp_mb();
|
| 434 |
|
|
spin_unlock_wait(¤t->sighand->siglock);
|
| 435 |
|
|
|
| 436 |
|
|
free_uid(old_user);
|
| 437 |
|
|
suid_keys(current);
|
| 438 |
|
|
}
|
| 439 |
|
|
|
| 440 |
|
|
void release_uids(struct user_namespace *ns)
|
| 441 |
|
|
{
|
| 442 |
|
|
int i;
|
| 443 |
|
|
unsigned long flags;
|
| 444 |
|
|
struct hlist_head *head;
|
| 445 |
|
|
struct hlist_node *nd;
|
| 446 |
|
|
|
| 447 |
|
|
spin_lock_irqsave(&uidhash_lock, flags);
|
| 448 |
|
|
/*
|
| 449 |
|
|
* collapse the chains so that the user_struct-s will
|
| 450 |
|
|
* be still alive, but not in hashes. subsequent free_uid()
|
| 451 |
|
|
* will free them.
|
| 452 |
|
|
*/
|
| 453 |
|
|
for (i = 0; i < UIDHASH_SZ; i++) {
|
| 454 |
|
|
head = ns->uidhash_table + i;
|
| 455 |
|
|
while (!hlist_empty(head)) {
|
| 456 |
|
|
nd = head->first;
|
| 457 |
|
|
hlist_del_init(nd);
|
| 458 |
|
|
}
|
| 459 |
|
|
}
|
| 460 |
|
|
spin_unlock_irqrestore(&uidhash_lock, flags);
|
| 461 |
|
|
|
| 462 |
|
|
free_uid(ns->root_user);
|
| 463 |
|
|
}
|
| 464 |
|
|
|
| 465 |
|
|
static int __init uid_cache_init(void)
|
| 466 |
|
|
{
|
| 467 |
|
|
int n;
|
| 468 |
|
|
|
| 469 |
|
|
uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
|
| 470 |
|
|
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
|
| 471 |
|
|
|
| 472 |
|
|
for(n = 0; n < UIDHASH_SZ; ++n)
|
| 473 |
|
|
INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
|
| 474 |
|
|
|
| 475 |
|
|
/* Insert the root user immediately (init already runs as root) */
|
| 476 |
|
|
spin_lock_irq(&uidhash_lock);
|
| 477 |
|
|
uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
|
| 478 |
|
|
spin_unlock_irq(&uidhash_lock);
|
| 479 |
|
|
|
| 480 |
|
|
return 0;
|
| 481 |
|
|
}
|
| 482 |
|
|
|
| 483 |
|
|
module_init(uid_cache_init);
|