]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The "user cache". | |
3 | * | |
4 | * (C) Copyright 1991-2000 Linus Torvalds | |
5 | * | |
6 | * We have a per-user structure to keep track of how many | |
7 | * processes, files etc the user has claimed, in order to be | |
8 | * able to have per-user limits for system resources. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/key.h> | |
4021cb27 | 16 | #include <linux/interrupt.h> |
acce292c CLG |
17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | |
1da177e4 | 19 | |
aee16ce7 PE |
20 | struct user_namespace init_user_ns = { |
21 | .kref = { | |
22 | .refcount = ATOMIC_INIT(2), | |
23 | }, | |
24 | .root_user = &root_user, | |
25 | }; | |
26 | EXPORT_SYMBOL_GPL(init_user_ns); | |
27 | ||
1da177e4 LT |
28 | /* |
29 | * UID task count cache, to get fast user lookup in "alloc_uid" | |
30 | * when changing user ID's (ie setuid() and friends). | |
31 | */ | |
32 | ||
1da177e4 LT |
33 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
34 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | |
acce292c | 35 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
1da177e4 | 36 | |
e18b890b | 37 | static struct kmem_cache *uid_cachep; |
4021cb27 IM |
38 | |
39 | /* | |
40 | * The uidhash_lock is mostly taken from process context, but it is | |
41 | * occasionally also taken from softirq/tasklet context, when | |
42 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | |
3fa97c9d AM |
43 | * But free_uid() is also called with local interrupts disabled, and running |
44 | * local_bh_enable() with local interrupts disabled is an error - we'll run | |
45 | * softirq callbacks, and they can unconditionally enable interrupts, and | |
46 | * the caller of free_uid() didn't expect that.. | |
4021cb27 | 47 | */ |
1da177e4 LT |
48 | static DEFINE_SPINLOCK(uidhash_lock); |
49 | ||
50 | struct user_struct root_user = { | |
51 | .__count = ATOMIC_INIT(1), | |
52 | .processes = ATOMIC_INIT(1), | |
53 | .files = ATOMIC_INIT(0), | |
54 | .sigpending = ATOMIC_INIT(0), | |
1da177e4 LT |
55 | .locked_shm = 0, |
56 | #ifdef CONFIG_KEYS | |
57 | .uid_keyring = &root_user_keyring, | |
58 | .session_keyring = &root_session_keyring, | |
59 | #endif | |
052f1dc7 | 60 | #ifdef CONFIG_USER_SCHED |
4cf86d77 | 61 | .tg = &init_task_group, |
24e377a8 | 62 | #endif |
1da177e4 LT |
63 | }; |
64 | ||
5cb350ba DG |
65 | /* |
66 | * These routines must be called with the uidhash spinlock held! | |
67 | */ | |
40aeb400 | 68 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
5cb350ba DG |
69 | { |
70 | hlist_add_head(&up->uidhash_node, hashent); | |
71 | } | |
72 | ||
40aeb400 | 73 | static void uid_hash_remove(struct user_struct *up) |
5cb350ba DG |
74 | { |
75 | hlist_del_init(&up->uidhash_node); | |
76 | } | |
77 | ||
40aeb400 | 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
5cb350ba DG |
79 | { |
80 | struct user_struct *user; | |
81 | struct hlist_node *h; | |
82 | ||
83 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | |
84 | if (user->uid == uid) { | |
85 | atomic_inc(&user->__count); | |
86 | return user; | |
87 | } | |
88 | } | |
89 | ||
90 | return NULL; | |
91 | } | |
92 | ||
052f1dc7 | 93 | #ifdef CONFIG_USER_SCHED |
5cb350ba | 94 | |
24e377a8 SV |
95 | static void sched_destroy_user(struct user_struct *up) |
96 | { | |
97 | sched_destroy_group(up->tg); | |
98 | } | |
99 | ||
100 | static int sched_create_user(struct user_struct *up) | |
101 | { | |
102 | int rc = 0; | |
103 | ||
104 | up->tg = sched_create_group(); | |
105 | if (IS_ERR(up->tg)) | |
106 | rc = -ENOMEM; | |
107 | ||
108 | return rc; | |
109 | } | |
110 | ||
111 | static void sched_switch_user(struct task_struct *p) | |
112 | { | |
113 | sched_move_task(p); | |
114 | } | |
115 | ||
052f1dc7 | 116 | #else /* CONFIG_USER_SCHED */ |
b1a8c172 DG |
117 | |
118 | static void sched_destroy_user(struct user_struct *up) { } | |
119 | static int sched_create_user(struct user_struct *up) { return 0; } | |
120 | static void sched_switch_user(struct task_struct *p) { } | |
121 | ||
052f1dc7 | 122 | #endif /* CONFIG_USER_SCHED */ |
b1a8c172 | 123 | |
052f1dc7 | 124 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) |
b1a8c172 | 125 | |
eb41d946 | 126 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
b1a8c172 DG |
127 | static DEFINE_MUTEX(uids_mutex); |
128 | ||
5cb350ba DG |
129 | static inline void uids_mutex_lock(void) |
130 | { | |
131 | mutex_lock(&uids_mutex); | |
132 | } | |
24e377a8 | 133 | |
5cb350ba DG |
134 | static inline void uids_mutex_unlock(void) |
135 | { | |
136 | mutex_unlock(&uids_mutex); | |
137 | } | |
24e377a8 | 138 | |
eb41d946 | 139 | /* uid directory attributes */ |
052f1dc7 | 140 | #ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d946 KS |
141 | static ssize_t cpu_shares_show(struct kobject *kobj, |
142 | struct kobj_attribute *attr, | |
143 | char *buf) | |
5cb350ba | 144 | { |
eb41d946 | 145 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
24e377a8 | 146 | |
eb41d946 | 147 | return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); |
5cb350ba DG |
148 | } |
149 | ||
eb41d946 KS |
150 | static ssize_t cpu_shares_store(struct kobject *kobj, |
151 | struct kobj_attribute *attr, | |
152 | const char *buf, size_t size) | |
5cb350ba | 153 | { |
eb41d946 | 154 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
5cb350ba DG |
155 | unsigned long shares; |
156 | int rc; | |
157 | ||
eb41d946 | 158 | sscanf(buf, "%lu", &shares); |
5cb350ba DG |
159 | |
160 | rc = sched_group_set_shares(up->tg, shares); | |
161 | ||
162 | return (rc ? rc : size); | |
163 | } | |
164 | ||
eb41d946 KS |
165 | static struct kobj_attribute cpu_share_attr = |
166 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); | |
052f1dc7 | 167 | #endif |
eb41d946 | 168 | |
052f1dc7 | 169 | #ifdef CONFIG_RT_GROUP_SCHED |
9f0c1e56 PZ |
170 | static ssize_t cpu_rt_runtime_show(struct kobject *kobj, |
171 | struct kobj_attribute *attr, | |
172 | char *buf) | |
173 | { | |
174 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | |
175 | ||
176 | return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg)); | |
177 | } | |
178 | ||
179 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | |
180 | struct kobj_attribute *attr, | |
181 | const char *buf, size_t size) | |
182 | { | |
183 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | |
184 | unsigned long rt_runtime; | |
185 | int rc; | |
186 | ||
187 | sscanf(buf, "%lu", &rt_runtime); | |
188 | ||
189 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); | |
190 | ||
191 | return (rc ? rc : size); | |
192 | } | |
193 | ||
194 | static struct kobj_attribute cpu_rt_runtime_attr = | |
195 | __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); | |
052f1dc7 | 196 | #endif |
9f0c1e56 | 197 | |
eb41d946 KS |
198 | /* default attributes per uid directory */ |
199 | static struct attribute *uids_attributes[] = { | |
052f1dc7 | 200 | #ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d946 | 201 | &cpu_share_attr.attr, |
052f1dc7 PZ |
202 | #endif |
203 | #ifdef CONFIG_RT_GROUP_SCHED | |
9f0c1e56 | 204 | &cpu_rt_runtime_attr.attr, |
052f1dc7 | 205 | #endif |
eb41d946 KS |
206 | NULL |
207 | }; | |
208 | ||
209 | /* the lifetime of user_struct is not managed by the core (now) */ | |
210 | static void uids_release(struct kobject *kobj) | |
5cb350ba | 211 | { |
eb41d946 | 212 | return; |
5cb350ba DG |
213 | } |
214 | ||
eb41d946 KS |
215 | static struct kobj_type uids_ktype = { |
216 | .sysfs_ops = &kobj_sysfs_ops, | |
217 | .default_attrs = uids_attributes, | |
218 | .release = uids_release, | |
219 | }; | |
220 | ||
221 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | |
222 | static int uids_user_create(struct user_struct *up) | |
1da177e4 | 223 | { |
eb41d946 | 224 | struct kobject *kobj = &up->kobj; |
5cb350ba DG |
225 | int error; |
226 | ||
eb41d946 | 227 | memset(kobj, 0, sizeof(struct kobject)); |
eb41d946 | 228 | kobj->kset = uids_kset; |
cf15126b GKH |
229 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
230 | if (error) { | |
231 | kobject_put(kobj); | |
5cb350ba | 232 | goto done; |
cf15126b | 233 | } |
5cb350ba | 234 | |
fb7dde37 | 235 | kobject_uevent(kobj, KOBJ_ADD); |
5cb350ba DG |
236 | done: |
237 | return error; | |
1da177e4 LT |
238 | } |
239 | ||
eb41d946 | 240 | /* create these entries in sysfs: |
5cb350ba DG |
241 | * "/sys/kernel/uids" directory |
242 | * "/sys/kernel/uids/0" directory (for root user) | |
243 | * "/sys/kernel/uids/0/cpu_share" file (for root user) | |
244 | */ | |
eb41d946 | 245 | int __init uids_sysfs_init(void) |
1da177e4 | 246 | { |
0ff21e46 | 247 | uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); |
eb41d946 KS |
248 | if (!uids_kset) |
249 | return -ENOMEM; | |
5cb350ba | 250 | |
eb41d946 | 251 | return uids_user_create(&root_user); |
1da177e4 LT |
252 | } |
253 | ||
5cb350ba DG |
254 | /* work function to remove sysfs directory for a user and free up |
255 | * corresponding structures. | |
256 | */ | |
257 | static void remove_user_sysfs_dir(struct work_struct *w) | |
1da177e4 | 258 | { |
5cb350ba | 259 | struct user_struct *up = container_of(w, struct user_struct, work); |
5cb350ba DG |
260 | unsigned long flags; |
261 | int remove_user = 0; | |
1da177e4 | 262 | |
5cb350ba DG |
263 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
264 | * atomic. | |
265 | */ | |
266 | uids_mutex_lock(); | |
267 | ||
268 | local_irq_save(flags); | |
269 | ||
270 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | |
271 | uid_hash_remove(up); | |
272 | remove_user = 1; | |
273 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
274 | } else { | |
275 | local_irq_restore(flags); | |
1da177e4 LT |
276 | } |
277 | ||
5cb350ba DG |
278 | if (!remove_user) |
279 | goto done; | |
280 | ||
eb41d946 KS |
281 | kobject_uevent(&up->kobj, KOBJ_REMOVE); |
282 | kobject_del(&up->kobj); | |
283 | kobject_put(&up->kobj); | |
5cb350ba DG |
284 | |
285 | sched_destroy_user(up); | |
286 | key_put(up->uid_keyring); | |
287 | key_put(up->session_keyring); | |
288 | kmem_cache_free(uid_cachep, up); | |
289 | ||
290 | done: | |
291 | uids_mutex_unlock(); | |
292 | } | |
293 | ||
294 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
295 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
296 | * upon function exit. | |
297 | */ | |
298 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
299 | { | |
300 | /* restore back the count */ | |
301 | atomic_inc(&up->__count); | |
302 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
303 | ||
304 | INIT_WORK(&up->work, remove_user_sysfs_dir); | |
305 | schedule_work(&up->work); | |
1da177e4 LT |
306 | } |
307 | ||
052f1dc7 | 308 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
5cb350ba | 309 | |
eb41d946 KS |
310 | int uids_sysfs_init(void) { return 0; } |
311 | static inline int uids_user_create(struct user_struct *up) { return 0; } | |
5cb350ba DG |
312 | static inline void uids_mutex_lock(void) { } |
313 | static inline void uids_mutex_unlock(void) { } | |
314 | ||
315 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
316 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
317 | * upon function exit. | |
318 | */ | |
319 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
320 | { | |
321 | uid_hash_remove(up); | |
322 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
323 | sched_destroy_user(up); | |
324 | key_put(up->uid_keyring); | |
325 | key_put(up->session_keyring); | |
326 | kmem_cache_free(uid_cachep, up); | |
327 | } | |
328 | ||
b1a8c172 | 329 | #endif |
5cb350ba | 330 | |
1da177e4 LT |
331 | /* |
332 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | |
333 | * caller must undo that ref with free_uid(). | |
334 | * | |
335 | * If the user_struct could not be found, return NULL. | |
336 | */ | |
337 | struct user_struct *find_user(uid_t uid) | |
338 | { | |
339 | struct user_struct *ret; | |
3fa97c9d | 340 | unsigned long flags; |
acce292c | 341 | struct user_namespace *ns = current->nsproxy->user_ns; |
1da177e4 | 342 | |
3fa97c9d | 343 | spin_lock_irqsave(&uidhash_lock, flags); |
acce292c | 344 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
3fa97c9d | 345 | spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4 LT |
346 | return ret; |
347 | } | |
348 | ||
349 | void free_uid(struct user_struct *up) | |
350 | { | |
3fa97c9d AM |
351 | unsigned long flags; |
352 | ||
36f57413 AM |
353 | if (!up) |
354 | return; | |
355 | ||
3fa97c9d | 356 | local_irq_save(flags); |
5cb350ba DG |
357 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) |
358 | free_user(up, flags); | |
359 | else | |
36f57413 | 360 | local_irq_restore(flags); |
1da177e4 LT |
361 | } |
362 | ||
acce292c | 363 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
1da177e4 | 364 | { |
735de223 | 365 | struct hlist_head *hashent = uidhashentry(ns, uid); |
8eb703e4 | 366 | struct user_struct *up, *new; |
1da177e4 | 367 | |
eb41d946 | 368 | /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
5cb350ba DG |
369 | * atomic. |
370 | */ | |
371 | uids_mutex_lock(); | |
372 | ||
3fa97c9d | 373 | spin_lock_irq(&uidhash_lock); |
1da177e4 | 374 | up = uid_hash_find(uid, hashent); |
3fa97c9d | 375 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
376 | |
377 | if (!up) { | |
e94b1766 | 378 | new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); |
8eb703e4 PE |
379 | if (!new) |
380 | goto out_unlock; | |
5e8869bb | 381 | |
1da177e4 LT |
382 | new->uid = uid; |
383 | atomic_set(&new->__count, 1); | |
384 | atomic_set(&new->processes, 0); | |
385 | atomic_set(&new->files, 0); | |
386 | atomic_set(&new->sigpending, 0); | |
2d9048e2 | 387 | #ifdef CONFIG_INOTIFY_USER |
0eeca283 RL |
388 | atomic_set(&new->inotify_watches, 0); |
389 | atomic_set(&new->inotify_devs, 0); | |
390 | #endif | |
970a8645 | 391 | #ifdef CONFIG_POSIX_MQUEUE |
1da177e4 | 392 | new->mq_bytes = 0; |
970a8645 | 393 | #endif |
1da177e4 LT |
394 | new->locked_shm = 0; |
395 | ||
8eb703e4 PE |
396 | if (alloc_uid_keyring(new, current) < 0) |
397 | goto out_free_user; | |
1da177e4 | 398 | |
8eb703e4 PE |
399 | if (sched_create_user(new) < 0) |
400 | goto out_put_keys; | |
24e377a8 | 401 | |
8eb703e4 PE |
402 | if (uids_user_create(new)) |
403 | goto out_destoy_sched; | |
5cb350ba | 404 | |
1da177e4 LT |
405 | /* |
406 | * Before adding this, check whether we raced | |
407 | * on adding the same user already.. | |
408 | */ | |
3fa97c9d | 409 | spin_lock_irq(&uidhash_lock); |
1da177e4 LT |
410 | up = uid_hash_find(uid, hashent); |
411 | if (up) { | |
052f1dc7 | 412 | /* This case is not possible when CONFIG_USER_SCHED |
5cb350ba DG |
413 | * is defined, since we serialize alloc_uid() using |
414 | * uids_mutex. Hence no need to call | |
415 | * sched_destroy_user() or remove_user_sysfs_dir(). | |
416 | */ | |
1da177e4 LT |
417 | key_put(new->uid_keyring); |
418 | key_put(new->session_keyring); | |
419 | kmem_cache_free(uid_cachep, new); | |
420 | } else { | |
421 | uid_hash_insert(new, hashent); | |
422 | up = new; | |
423 | } | |
3fa97c9d | 424 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
425 | |
426 | } | |
5cb350ba DG |
427 | |
428 | uids_mutex_unlock(); | |
429 | ||
1da177e4 | 430 | return up; |
8eb703e4 PE |
431 | |
432 | out_destoy_sched: | |
433 | sched_destroy_user(new); | |
434 | out_put_keys: | |
435 | key_put(new->uid_keyring); | |
436 | key_put(new->session_keyring); | |
437 | out_free_user: | |
438 | kmem_cache_free(uid_cachep, new); | |
439 | out_unlock: | |
440 | uids_mutex_unlock(); | |
441 | return NULL; | |
1da177e4 LT |
442 | } |
443 | ||
444 | void switch_uid(struct user_struct *new_user) | |
445 | { | |
446 | struct user_struct *old_user; | |
447 | ||
448 | /* What if a process setreuid()'s and this brings the | |
449 | * new uid over his NPROC rlimit? We can check this now | |
450 | * cheaply with the new uid cache, so if it matters | |
451 | * we should be checking for it. -DaveM | |
452 | */ | |
453 | old_user = current->user; | |
454 | atomic_inc(&new_user->processes); | |
455 | atomic_dec(&old_user->processes); | |
456 | switch_uid_keyring(new_user); | |
457 | current->user = new_user; | |
24e377a8 | 458 | sched_switch_user(current); |
45c18b0b LT |
459 | |
460 | /* | |
461 | * We need to synchronize with __sigqueue_alloc() | |
462 | * doing a get_uid(p->user).. If that saw the old | |
463 | * user value, we need to wait until it has exited | |
464 | * its critical region before we can free the old | |
465 | * structure. | |
466 | */ | |
467 | smp_mb(); | |
468 | spin_unlock_wait(¤t->sighand->siglock); | |
469 | ||
1da177e4 LT |
470 | free_uid(old_user); |
471 | suid_keys(current); | |
472 | } | |
473 | ||
aee16ce7 | 474 | #ifdef CONFIG_USER_NS |
28f300d2 PE |
475 | void release_uids(struct user_namespace *ns) |
476 | { | |
477 | int i; | |
478 | unsigned long flags; | |
479 | struct hlist_head *head; | |
480 | struct hlist_node *nd; | |
481 | ||
482 | spin_lock_irqsave(&uidhash_lock, flags); | |
483 | /* | |
484 | * collapse the chains so that the user_struct-s will | |
485 | * be still alive, but not in hashes. subsequent free_uid() | |
486 | * will free them. | |
487 | */ | |
488 | for (i = 0; i < UIDHASH_SZ; i++) { | |
489 | head = ns->uidhash_table + i; | |
490 | while (!hlist_empty(head)) { | |
491 | nd = head->first; | |
492 | hlist_del_init(nd); | |
493 | } | |
494 | } | |
495 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
496 | ||
497 | free_uid(ns->root_user); | |
498 | } | |
aee16ce7 | 499 | #endif |
1da177e4 LT |
500 | |
501 | static int __init uid_cache_init(void) | |
502 | { | |
503 | int n; | |
504 | ||
505 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | |
20c2df83 | 506 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 LT |
507 | |
508 | for(n = 0; n < UIDHASH_SZ; ++n) | |
735de223 | 509 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
1da177e4 LT |
510 | |
511 | /* Insert the root user immediately (init already runs as root) */ | |
3fa97c9d | 512 | spin_lock_irq(&uidhash_lock); |
acce292c | 513 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
3fa97c9d | 514 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
515 | |
516 | return 0; | |
517 | } | |
518 | ||
519 | module_init(uid_cache_init); |