]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/sys.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
7 | #include <linux/config.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/utsname.h> | |
11 | #include <linux/mman.h> | |
12 | #include <linux/smp_lock.h> | |
13 | #include <linux/notifier.h> | |
14 | #include <linux/reboot.h> | |
15 | #include <linux/prctl.h> | |
1da177e4 LT |
16 | #include <linux/highuid.h> |
17 | #include <linux/fs.h> | |
dc009d92 EB |
18 | #include <linux/kernel.h> |
19 | #include <linux/kexec.h> | |
1da177e4 | 20 | #include <linux/workqueue.h> |
c59ede7b | 21 | #include <linux/capability.h> |
1da177e4 LT |
22 | #include <linux/device.h> |
23 | #include <linux/key.h> | |
24 | #include <linux/times.h> | |
25 | #include <linux/posix-timers.h> | |
26 | #include <linux/security.h> | |
27 | #include <linux/dcookies.h> | |
28 | #include <linux/suspend.h> | |
29 | #include <linux/tty.h> | |
7ed20e1a | 30 | #include <linux/signal.h> |
9f46080c | 31 | #include <linux/cn_proc.h> |
1da177e4 LT |
32 | |
33 | #include <linux/compat.h> | |
34 | #include <linux/syscalls.h> | |
00d7c05a | 35 | #include <linux/kprobes.h> |
1da177e4 LT |
36 | |
37 | #include <asm/uaccess.h> | |
38 | #include <asm/io.h> | |
39 | #include <asm/unistd.h> | |
40 | ||
41 | #ifndef SET_UNALIGN_CTL | |
42 | # define SET_UNALIGN_CTL(a,b) (-EINVAL) | |
43 | #endif | |
44 | #ifndef GET_UNALIGN_CTL | |
45 | # define GET_UNALIGN_CTL(a,b) (-EINVAL) | |
46 | #endif | |
47 | #ifndef SET_FPEMU_CTL | |
48 | # define SET_FPEMU_CTL(a,b) (-EINVAL) | |
49 | #endif | |
50 | #ifndef GET_FPEMU_CTL | |
51 | # define GET_FPEMU_CTL(a,b) (-EINVAL) | |
52 | #endif | |
53 | #ifndef SET_FPEXC_CTL | |
54 | # define SET_FPEXC_CTL(a,b) (-EINVAL) | |
55 | #endif | |
56 | #ifndef GET_FPEXC_CTL | |
57 | # define GET_FPEXC_CTL(a,b) (-EINVAL) | |
58 | #endif | |
651d765d AB |
59 | #ifndef GET_ENDIAN |
60 | # define GET_ENDIAN(a,b) (-EINVAL) | |
61 | #endif | |
62 | #ifndef SET_ENDIAN | |
63 | # define SET_ENDIAN(a,b) (-EINVAL) | |
64 | #endif | |
1da177e4 LT |
65 | |
66 | /* | |
67 | * this is where the system-wide overflow UID and GID are defined, for | |
68 | * architectures that now have 32-bit UID/GID but didn't in the past | |
69 | */ | |
70 | ||
71 | int overflowuid = DEFAULT_OVERFLOWUID; | |
72 | int overflowgid = DEFAULT_OVERFLOWGID; | |
73 | ||
74 | #ifdef CONFIG_UID16 | |
75 | EXPORT_SYMBOL(overflowuid); | |
76 | EXPORT_SYMBOL(overflowgid); | |
77 | #endif | |
78 | ||
79 | /* | |
80 | * the same as above, but for filesystems which can only store a 16-bit | |
81 | * UID and GID. as such, this is needed on all architectures | |
82 | */ | |
83 | ||
84 | int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; | |
85 | int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; | |
86 | ||
87 | EXPORT_SYMBOL(fs_overflowuid); | |
88 | EXPORT_SYMBOL(fs_overflowgid); | |
89 | ||
90 | /* | |
91 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes | |
92 | */ | |
93 | ||
94 | int C_A_D = 1; | |
95 | int cad_pid = 1; | |
96 | ||
97 | /* | |
98 | * Notifier list for kernel code which wants to be called | |
99 | * at shutdown. This is used to stop any idling DMA operations | |
100 | * and the like. | |
101 | */ | |
102 | ||
e041c683 AS |
103 | static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); |
104 | ||
105 | /* | |
106 | * Notifier chain core routines. The exported routines below | |
107 | * are layered on top of these, with appropriate locking added. | |
108 | */ | |
109 | ||
110 | static int notifier_chain_register(struct notifier_block **nl, | |
111 | struct notifier_block *n) | |
112 | { | |
113 | while ((*nl) != NULL) { | |
114 | if (n->priority > (*nl)->priority) | |
115 | break; | |
116 | nl = &((*nl)->next); | |
117 | } | |
118 | n->next = *nl; | |
119 | rcu_assign_pointer(*nl, n); | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static int notifier_chain_unregister(struct notifier_block **nl, | |
124 | struct notifier_block *n) | |
125 | { | |
126 | while ((*nl) != NULL) { | |
127 | if ((*nl) == n) { | |
128 | rcu_assign_pointer(*nl, n->next); | |
129 | return 0; | |
130 | } | |
131 | nl = &((*nl)->next); | |
132 | } | |
133 | return -ENOENT; | |
134 | } | |
135 | ||
136 | static int __kprobes notifier_call_chain(struct notifier_block **nl, | |
137 | unsigned long val, void *v) | |
138 | { | |
139 | int ret = NOTIFY_DONE; | |
140 | struct notifier_block *nb; | |
141 | ||
142 | nb = rcu_dereference(*nl); | |
143 | while (nb) { | |
144 | ret = nb->notifier_call(nb, val, v); | |
145 | if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) | |
146 | break; | |
147 | nb = rcu_dereference(nb->next); | |
148 | } | |
149 | return ret; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Atomic notifier chain routines. Registration and unregistration | |
154 | * use a mutex, and call_chain is synchronized by RCU (no locks). | |
155 | */ | |
1da177e4 LT |
156 | |
157 | /** | |
e041c683 AS |
158 | * atomic_notifier_chain_register - Add notifier to an atomic notifier chain |
159 | * @nh: Pointer to head of the atomic notifier chain | |
1da177e4 LT |
160 | * @n: New entry in notifier chain |
161 | * | |
e041c683 | 162 | * Adds a notifier to an atomic notifier chain. |
1da177e4 LT |
163 | * |
164 | * Currently always returns zero. | |
165 | */ | |
e041c683 AS |
166 | |
167 | int atomic_notifier_chain_register(struct atomic_notifier_head *nh, | |
168 | struct notifier_block *n) | |
169 | { | |
170 | unsigned long flags; | |
171 | int ret; | |
172 | ||
173 | spin_lock_irqsave(&nh->lock, flags); | |
174 | ret = notifier_chain_register(&nh->head, n); | |
175 | spin_unlock_irqrestore(&nh->lock, flags); | |
176 | return ret; | |
177 | } | |
178 | ||
179 | EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); | |
180 | ||
181 | /** | |
182 | * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain | |
183 | * @nh: Pointer to head of the atomic notifier chain | |
184 | * @n: Entry to remove from notifier chain | |
185 | * | |
186 | * Removes a notifier from an atomic notifier chain. | |
187 | * | |
188 | * Returns zero on success or %-ENOENT on failure. | |
189 | */ | |
190 | int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, | |
191 | struct notifier_block *n) | |
192 | { | |
193 | unsigned long flags; | |
194 | int ret; | |
195 | ||
196 | spin_lock_irqsave(&nh->lock, flags); | |
197 | ret = notifier_chain_unregister(&nh->head, n); | |
198 | spin_unlock_irqrestore(&nh->lock, flags); | |
199 | synchronize_rcu(); | |
200 | return ret; | |
201 | } | |
202 | ||
203 | EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); | |
204 | ||
205 | /** | |
206 | * atomic_notifier_call_chain - Call functions in an atomic notifier chain | |
207 | * @nh: Pointer to head of the atomic notifier chain | |
208 | * @val: Value passed unmodified to notifier function | |
209 | * @v: Pointer passed unmodified to notifier function | |
210 | * | |
211 | * Calls each function in a notifier chain in turn. The functions | |
212 | * run in an atomic context, so they must not block. | |
213 | * This routine uses RCU to synchronize with changes to the chain. | |
214 | * | |
215 | * If the return value of the notifier can be and'ed | |
216 | * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain | |
217 | * will return immediately, with the return value of | |
218 | * the notifier function which halted execution. | |
219 | * Otherwise the return value is the return value | |
220 | * of the last notifier function called. | |
221 | */ | |
1da177e4 | 222 | |
e041c683 AS |
223 | int atomic_notifier_call_chain(struct atomic_notifier_head *nh, |
224 | unsigned long val, void *v) | |
1da177e4 | 225 | { |
e041c683 AS |
226 | int ret; |
227 | ||
228 | rcu_read_lock(); | |
229 | ret = notifier_call_chain(&nh->head, val, v); | |
230 | rcu_read_unlock(); | |
231 | return ret; | |
1da177e4 LT |
232 | } |
233 | ||
e041c683 AS |
234 | EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); |
235 | ||
236 | /* | |
237 | * Blocking notifier chain routines. All access to the chain is | |
238 | * synchronized by an rwsem. | |
239 | */ | |
1da177e4 LT |
240 | |
241 | /** | |
e041c683 AS |
242 | * blocking_notifier_chain_register - Add notifier to a blocking notifier chain |
243 | * @nh: Pointer to head of the blocking notifier chain | |
1da177e4 LT |
244 | * @n: New entry in notifier chain |
245 | * | |
e041c683 AS |
246 | * Adds a notifier to a blocking notifier chain. |
247 | * Must be called in process context. | |
1da177e4 | 248 | * |
e041c683 | 249 | * Currently always returns zero. |
1da177e4 LT |
250 | */ |
251 | ||
e041c683 AS |
252 | int blocking_notifier_chain_register(struct blocking_notifier_head *nh, |
253 | struct notifier_block *n) | |
1da177e4 | 254 | { |
e041c683 AS |
255 | int ret; |
256 | ||
257 | /* | |
258 | * This code gets used during boot-up, when task switching is | |
259 | * not yet working and interrupts must remain disabled. At | |
260 | * such times we must not call down_write(). | |
261 | */ | |
262 | if (unlikely(system_state == SYSTEM_BOOTING)) | |
263 | return notifier_chain_register(&nh->head, n); | |
264 | ||
265 | down_write(&nh->rwsem); | |
266 | ret = notifier_chain_register(&nh->head, n); | |
267 | up_write(&nh->rwsem); | |
268 | return ret; | |
1da177e4 LT |
269 | } |
270 | ||
e041c683 | 271 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); |
1da177e4 LT |
272 | |
273 | /** | |
e041c683 AS |
274 | * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain |
275 | * @nh: Pointer to head of the blocking notifier chain | |
276 | * @n: Entry to remove from notifier chain | |
277 | * | |
278 | * Removes a notifier from a blocking notifier chain. | |
279 | * Must be called from process context. | |
280 | * | |
281 | * Returns zero on success or %-ENOENT on failure. | |
282 | */ | |
283 | int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, | |
284 | struct notifier_block *n) | |
285 | { | |
286 | int ret; | |
287 | ||
288 | /* | |
289 | * This code gets used during boot-up, when task switching is | |
290 | * not yet working and interrupts must remain disabled. At | |
291 | * such times we must not call down_write(). | |
292 | */ | |
293 | if (unlikely(system_state == SYSTEM_BOOTING)) | |
294 | return notifier_chain_unregister(&nh->head, n); | |
295 | ||
296 | down_write(&nh->rwsem); | |
297 | ret = notifier_chain_unregister(&nh->head, n); | |
298 | up_write(&nh->rwsem); | |
299 | return ret; | |
300 | } | |
301 | ||
302 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); | |
303 | ||
304 | /** | |
305 | * blocking_notifier_call_chain - Call functions in a blocking notifier chain | |
306 | * @nh: Pointer to head of the blocking notifier chain | |
1da177e4 LT |
307 | * @val: Value passed unmodified to notifier function |
308 | * @v: Pointer passed unmodified to notifier function | |
309 | * | |
e041c683 AS |
310 | * Calls each function in a notifier chain in turn. The functions |
311 | * run in a process context, so they are allowed to block. | |
1da177e4 | 312 | * |
e041c683 AS |
313 | * If the return value of the notifier can be and'ed |
314 | * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain | |
1da177e4 LT |
315 | * will return immediately, with the return value of |
316 | * the notifier function which halted execution. | |
e041c683 | 317 | * Otherwise the return value is the return value |
1da177e4 LT |
318 | * of the last notifier function called. |
319 | */ | |
320 | ||
e041c683 AS |
321 | int blocking_notifier_call_chain(struct blocking_notifier_head *nh, |
322 | unsigned long val, void *v) | |
1da177e4 | 323 | { |
e041c683 AS |
324 | int ret; |
325 | ||
326 | down_read(&nh->rwsem); | |
327 | ret = notifier_call_chain(&nh->head, val, v); | |
328 | up_read(&nh->rwsem); | |
1da177e4 LT |
329 | return ret; |
330 | } | |
331 | ||
e041c683 AS |
332 | EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); |
333 | ||
334 | /* | |
335 | * Raw notifier chain routines. There is no protection; | |
336 | * the caller must provide it. Use at your own risk! | |
337 | */ | |
338 | ||
339 | /** | |
340 | * raw_notifier_chain_register - Add notifier to a raw notifier chain | |
341 | * @nh: Pointer to head of the raw notifier chain | |
342 | * @n: New entry in notifier chain | |
343 | * | |
344 | * Adds a notifier to a raw notifier chain. | |
345 | * All locking must be provided by the caller. | |
346 | * | |
347 | * Currently always returns zero. | |
348 | */ | |
349 | ||
350 | int raw_notifier_chain_register(struct raw_notifier_head *nh, | |
351 | struct notifier_block *n) | |
352 | { | |
353 | return notifier_chain_register(&nh->head, n); | |
354 | } | |
355 | ||
356 | EXPORT_SYMBOL_GPL(raw_notifier_chain_register); | |
357 | ||
358 | /** | |
359 | * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain | |
360 | * @nh: Pointer to head of the raw notifier chain | |
361 | * @n: Entry to remove from notifier chain | |
362 | * | |
363 | * Removes a notifier from a raw notifier chain. | |
364 | * All locking must be provided by the caller. | |
365 | * | |
366 | * Returns zero on success or %-ENOENT on failure. | |
367 | */ | |
368 | int raw_notifier_chain_unregister(struct raw_notifier_head *nh, | |
369 | struct notifier_block *n) | |
370 | { | |
371 | return notifier_chain_unregister(&nh->head, n); | |
372 | } | |
373 | ||
374 | EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); | |
375 | ||
376 | /** | |
377 | * raw_notifier_call_chain - Call functions in a raw notifier chain | |
378 | * @nh: Pointer to head of the raw notifier chain | |
379 | * @val: Value passed unmodified to notifier function | |
380 | * @v: Pointer passed unmodified to notifier function | |
381 | * | |
382 | * Calls each function in a notifier chain in turn. The functions | |
383 | * run in an undefined context. | |
384 | * All locking must be provided by the caller. | |
385 | * | |
386 | * If the return value of the notifier can be and'ed | |
387 | * with %NOTIFY_STOP_MASK then raw_notifier_call_chain | |
388 | * will return immediately, with the return value of | |
389 | * the notifier function which halted execution. | |
390 | * Otherwise the return value is the return value | |
391 | * of the last notifier function called. | |
392 | */ | |
393 | ||
394 | int raw_notifier_call_chain(struct raw_notifier_head *nh, | |
395 | unsigned long val, void *v) | |
396 | { | |
397 | return notifier_call_chain(&nh->head, val, v); | |
398 | } | |
399 | ||
400 | EXPORT_SYMBOL_GPL(raw_notifier_call_chain); | |
1da177e4 LT |
401 | |
402 | /** | |
403 | * register_reboot_notifier - Register function to be called at reboot time | |
404 | * @nb: Info about notifier function to be called | |
405 | * | |
406 | * Registers a function with the list of functions | |
407 | * to be called at reboot time. | |
408 | * | |
e041c683 | 409 | * Currently always returns zero, as blocking_notifier_chain_register |
1da177e4 LT |
410 | * always returns zero. |
411 | */ | |
412 | ||
413 | int register_reboot_notifier(struct notifier_block * nb) | |
414 | { | |
e041c683 | 415 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); |
1da177e4 LT |
416 | } |
417 | ||
418 | EXPORT_SYMBOL(register_reboot_notifier); | |
419 | ||
420 | /** | |
421 | * unregister_reboot_notifier - Unregister previously registered reboot notifier | |
422 | * @nb: Hook to be unregistered | |
423 | * | |
424 | * Unregisters a previously registered reboot | |
425 | * notifier function. | |
426 | * | |
427 | * Returns zero on success, or %-ENOENT on failure. | |
428 | */ | |
429 | ||
430 | int unregister_reboot_notifier(struct notifier_block * nb) | |
431 | { | |
e041c683 | 432 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); |
1da177e4 LT |
433 | } |
434 | ||
435 | EXPORT_SYMBOL(unregister_reboot_notifier); | |
436 | ||
437 | static int set_one_prio(struct task_struct *p, int niceval, int error) | |
438 | { | |
439 | int no_nice; | |
440 | ||
441 | if (p->uid != current->euid && | |
442 | p->euid != current->euid && !capable(CAP_SYS_NICE)) { | |
443 | error = -EPERM; | |
444 | goto out; | |
445 | } | |
e43379f1 | 446 | if (niceval < task_nice(p) && !can_nice(p, niceval)) { |
1da177e4 LT |
447 | error = -EACCES; |
448 | goto out; | |
449 | } | |
450 | no_nice = security_task_setnice(p, niceval); | |
451 | if (no_nice) { | |
452 | error = no_nice; | |
453 | goto out; | |
454 | } | |
455 | if (error == -ESRCH) | |
456 | error = 0; | |
457 | set_user_nice(p, niceval); | |
458 | out: | |
459 | return error; | |
460 | } | |
461 | ||
462 | asmlinkage long sys_setpriority(int which, int who, int niceval) | |
463 | { | |
464 | struct task_struct *g, *p; | |
465 | struct user_struct *user; | |
466 | int error = -EINVAL; | |
467 | ||
468 | if (which > 2 || which < 0) | |
469 | goto out; | |
470 | ||
471 | /* normalize: avoid signed division (rounding problems) */ | |
472 | error = -ESRCH; | |
473 | if (niceval < -20) | |
474 | niceval = -20; | |
475 | if (niceval > 19) | |
476 | niceval = 19; | |
477 | ||
478 | read_lock(&tasklist_lock); | |
479 | switch (which) { | |
480 | case PRIO_PROCESS: | |
481 | if (!who) | |
482 | who = current->pid; | |
483 | p = find_task_by_pid(who); | |
484 | if (p) | |
485 | error = set_one_prio(p, niceval, error); | |
486 | break; | |
487 | case PRIO_PGRP: | |
488 | if (!who) | |
489 | who = process_group(current); | |
490 | do_each_task_pid(who, PIDTYPE_PGID, p) { | |
491 | error = set_one_prio(p, niceval, error); | |
492 | } while_each_task_pid(who, PIDTYPE_PGID, p); | |
493 | break; | |
494 | case PRIO_USER: | |
495 | user = current->user; | |
496 | if (!who) | |
497 | who = current->uid; | |
498 | else | |
499 | if ((who != current->uid) && !(user = find_user(who))) | |
500 | goto out_unlock; /* No processes for this user */ | |
501 | ||
502 | do_each_thread(g, p) | |
503 | if (p->uid == who) | |
504 | error = set_one_prio(p, niceval, error); | |
505 | while_each_thread(g, p); | |
506 | if (who != current->uid) | |
507 | free_uid(user); /* For find_user() */ | |
508 | break; | |
509 | } | |
510 | out_unlock: | |
511 | read_unlock(&tasklist_lock); | |
512 | out: | |
513 | return error; | |
514 | } | |
515 | ||
516 | /* | |
517 | * Ugh. To avoid negative return values, "getpriority()" will | |
518 | * not return the normal nice-value, but a negated value that | |
519 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) | |
520 | * to stay compatible. | |
521 | */ | |
522 | asmlinkage long sys_getpriority(int which, int who) | |
523 | { | |
524 | struct task_struct *g, *p; | |
525 | struct user_struct *user; | |
526 | long niceval, retval = -ESRCH; | |
527 | ||
528 | if (which > 2 || which < 0) | |
529 | return -EINVAL; | |
530 | ||
531 | read_lock(&tasklist_lock); | |
532 | switch (which) { | |
533 | case PRIO_PROCESS: | |
534 | if (!who) | |
535 | who = current->pid; | |
536 | p = find_task_by_pid(who); | |
537 | if (p) { | |
538 | niceval = 20 - task_nice(p); | |
539 | if (niceval > retval) | |
540 | retval = niceval; | |
541 | } | |
542 | break; | |
543 | case PRIO_PGRP: | |
544 | if (!who) | |
545 | who = process_group(current); | |
546 | do_each_task_pid(who, PIDTYPE_PGID, p) { | |
547 | niceval = 20 - task_nice(p); | |
548 | if (niceval > retval) | |
549 | retval = niceval; | |
550 | } while_each_task_pid(who, PIDTYPE_PGID, p); | |
551 | break; | |
552 | case PRIO_USER: | |
553 | user = current->user; | |
554 | if (!who) | |
555 | who = current->uid; | |
556 | else | |
557 | if ((who != current->uid) && !(user = find_user(who))) | |
558 | goto out_unlock; /* No processes for this user */ | |
559 | ||
560 | do_each_thread(g, p) | |
561 | if (p->uid == who) { | |
562 | niceval = 20 - task_nice(p); | |
563 | if (niceval > retval) | |
564 | retval = niceval; | |
565 | } | |
566 | while_each_thread(g, p); | |
567 | if (who != current->uid) | |
568 | free_uid(user); /* for find_user() */ | |
569 | break; | |
570 | } | |
571 | out_unlock: | |
572 | read_unlock(&tasklist_lock); | |
573 | ||
574 | return retval; | |
575 | } | |
576 | ||
e4c94330 EB |
577 | /** |
578 | * emergency_restart - reboot the system | |
579 | * | |
580 | * Without shutting down any hardware or taking any locks | |
581 | * reboot the system. This is called when we know we are in | |
582 | * trouble so this is our best effort to reboot. This is | |
583 | * safe to call in interrupt context. | |
584 | */ | |
7c903473 EB |
585 | void emergency_restart(void) |
586 | { | |
587 | machine_emergency_restart(); | |
588 | } | |
589 | EXPORT_SYMBOL_GPL(emergency_restart); | |
590 | ||
e4c94330 | 591 | void kernel_restart_prepare(char *cmd) |
4a00ea1e | 592 | { |
e041c683 | 593 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
4a00ea1e | 594 | system_state = SYSTEM_RESTART; |
4a00ea1e | 595 | device_shutdown(); |
e4c94330 | 596 | } |
1e5d5331 RD |
597 | |
598 | /** | |
599 | * kernel_restart - reboot the system | |
600 | * @cmd: pointer to buffer containing command to execute for restart | |
b8887e6e | 601 | * or %NULL |
1e5d5331 RD |
602 | * |
603 | * Shutdown everything and perform a clean reboot. | |
604 | * This is not safe to call in interrupt context. | |
605 | */ | |
e4c94330 EB |
606 | void kernel_restart(char *cmd) |
607 | { | |
608 | kernel_restart_prepare(cmd); | |
4a00ea1e EB |
609 | if (!cmd) { |
610 | printk(KERN_EMERG "Restarting system.\n"); | |
611 | } else { | |
612 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); | |
613 | } | |
614 | printk(".\n"); | |
615 | machine_restart(cmd); | |
616 | } | |
617 | EXPORT_SYMBOL_GPL(kernel_restart); | |
618 | ||
e4c94330 EB |
619 | /** |
620 | * kernel_kexec - reboot the system | |
621 | * | |
622 | * Move into place and start executing a preloaded standalone | |
623 | * executable. If nothing was preloaded return an error. | |
624 | */ | |
4a00ea1e EB |
625 | void kernel_kexec(void) |
626 | { | |
627 | #ifdef CONFIG_KEXEC | |
628 | struct kimage *image; | |
4bb8089c | 629 | image = xchg(&kexec_image, NULL); |
4a00ea1e EB |
630 | if (!image) { |
631 | return; | |
632 | } | |
e4c94330 | 633 | kernel_restart_prepare(NULL); |
4a00ea1e EB |
634 | printk(KERN_EMERG "Starting new kernel\n"); |
635 | machine_shutdown(); | |
636 | machine_kexec(image); | |
637 | #endif | |
638 | } | |
639 | EXPORT_SYMBOL_GPL(kernel_kexec); | |
640 | ||
729b4d4c AS |
641 | void kernel_shutdown_prepare(enum system_states state) |
642 | { | |
e041c683 | 643 | blocking_notifier_call_chain(&reboot_notifier_list, |
729b4d4c AS |
644 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); |
645 | system_state = state; | |
646 | device_shutdown(); | |
647 | } | |
e4c94330 EB |
648 | /** |
649 | * kernel_halt - halt the system | |
650 | * | |
651 | * Shutdown everything and perform a clean system halt. | |
652 | */ | |
e4c94330 EB |
653 | void kernel_halt(void) |
654 | { | |
729b4d4c | 655 | kernel_shutdown_prepare(SYSTEM_HALT); |
4a00ea1e EB |
656 | printk(KERN_EMERG "System halted.\n"); |
657 | machine_halt(); | |
658 | } | |
729b4d4c | 659 | |
4a00ea1e EB |
660 | EXPORT_SYMBOL_GPL(kernel_halt); |
661 | ||
e4c94330 EB |
662 | /** |
663 | * kernel_power_off - power_off the system | |
664 | * | |
665 | * Shutdown everything and perform a clean system power_off. | |
666 | */ | |
e4c94330 EB |
667 | void kernel_power_off(void) |
668 | { | |
729b4d4c | 669 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); |
4a00ea1e EB |
670 | printk(KERN_EMERG "Power down.\n"); |
671 | machine_power_off(); | |
672 | } | |
673 | EXPORT_SYMBOL_GPL(kernel_power_off); | |
1da177e4 LT |
674 | /* |
675 | * Reboot system call: for obvious reasons only root may call it, | |
676 | * and even root needs to set up some magic numbers in the registers | |
677 | * so that some mistake won't make this reboot the whole machine. | |
678 | * You can also set the meaning of the ctrl-alt-del-key here. | |
679 | * | |
680 | * reboot doesn't sync: do that yourself before calling this. | |
681 | */ | |
682 | asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) | |
683 | { | |
684 | char buffer[256]; | |
685 | ||
686 | /* We only trust the superuser with rebooting the system. */ | |
687 | if (!capable(CAP_SYS_BOOT)) | |
688 | return -EPERM; | |
689 | ||
690 | /* For safety, we require "magic" arguments. */ | |
691 | if (magic1 != LINUX_REBOOT_MAGIC1 || | |
692 | (magic2 != LINUX_REBOOT_MAGIC2 && | |
693 | magic2 != LINUX_REBOOT_MAGIC2A && | |
694 | magic2 != LINUX_REBOOT_MAGIC2B && | |
695 | magic2 != LINUX_REBOOT_MAGIC2C)) | |
696 | return -EINVAL; | |
697 | ||
5e38291d EB |
698 | /* Instead of trying to make the power_off code look like |
699 | * halt when pm_power_off is not set do it the easy way. | |
700 | */ | |
701 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) | |
702 | cmd = LINUX_REBOOT_CMD_HALT; | |
703 | ||
1da177e4 LT |
704 | lock_kernel(); |
705 | switch (cmd) { | |
706 | case LINUX_REBOOT_CMD_RESTART: | |
4a00ea1e | 707 | kernel_restart(NULL); |
1da177e4 LT |
708 | break; |
709 | ||
710 | case LINUX_REBOOT_CMD_CAD_ON: | |
711 | C_A_D = 1; | |
712 | break; | |
713 | ||
714 | case LINUX_REBOOT_CMD_CAD_OFF: | |
715 | C_A_D = 0; | |
716 | break; | |
717 | ||
718 | case LINUX_REBOOT_CMD_HALT: | |
4a00ea1e | 719 | kernel_halt(); |
1da177e4 LT |
720 | unlock_kernel(); |
721 | do_exit(0); | |
722 | break; | |
723 | ||
724 | case LINUX_REBOOT_CMD_POWER_OFF: | |
4a00ea1e | 725 | kernel_power_off(); |
1da177e4 LT |
726 | unlock_kernel(); |
727 | do_exit(0); | |
728 | break; | |
729 | ||
730 | case LINUX_REBOOT_CMD_RESTART2: | |
731 | if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { | |
732 | unlock_kernel(); | |
733 | return -EFAULT; | |
734 | } | |
735 | buffer[sizeof(buffer) - 1] = '\0'; | |
736 | ||
4a00ea1e | 737 | kernel_restart(buffer); |
1da177e4 LT |
738 | break; |
739 | ||
dc009d92 | 740 | case LINUX_REBOOT_CMD_KEXEC: |
4a00ea1e EB |
741 | kernel_kexec(); |
742 | unlock_kernel(); | |
743 | return -EINVAL; | |
744 | ||
1da177e4 LT |
745 | #ifdef CONFIG_SOFTWARE_SUSPEND |
746 | case LINUX_REBOOT_CMD_SW_SUSPEND: | |
747 | { | |
748 | int ret = software_suspend(); | |
749 | unlock_kernel(); | |
750 | return ret; | |
751 | } | |
752 | #endif | |
753 | ||
754 | default: | |
755 | unlock_kernel(); | |
756 | return -EINVAL; | |
757 | } | |
758 | unlock_kernel(); | |
759 | return 0; | |
760 | } | |
761 | ||
762 | static void deferred_cad(void *dummy) | |
763 | { | |
abcd9e51 | 764 | kernel_restart(NULL); |
1da177e4 LT |
765 | } |
766 | ||
767 | /* | |
768 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. | |
769 | * As it's called within an interrupt, it may NOT sync: the only choice | |
770 | * is whether to reboot at once, or just ignore the ctrl-alt-del. | |
771 | */ | |
772 | void ctrl_alt_del(void) | |
773 | { | |
774 | static DECLARE_WORK(cad_work, deferred_cad, NULL); | |
775 | ||
776 | if (C_A_D) | |
777 | schedule_work(&cad_work); | |
778 | else | |
779 | kill_proc(cad_pid, SIGINT, 1); | |
780 | } | |
781 | ||
782 | ||
783 | /* | |
784 | * Unprivileged users may change the real gid to the effective gid | |
785 | * or vice versa. (BSD-style) | |
786 | * | |
787 | * If you set the real gid at all, or set the effective gid to a value not | |
788 | * equal to the real gid, then the saved gid is set to the new effective gid. | |
789 | * | |
790 | * This makes it possible for a setgid program to completely drop its | |
791 | * privileges, which is often a useful assertion to make when you are doing | |
792 | * a security audit over a program. | |
793 | * | |
794 | * The general idea is that a program which uses just setregid() will be | |
795 | * 100% compatible with BSD. A program which uses just setgid() will be | |
796 | * 100% compatible with POSIX with saved IDs. | |
797 | * | |
798 | * SMP: There are not races, the GIDs are checked only by filesystem | |
799 | * operations (as far as semantic preservation is concerned). | |
800 | */ | |
801 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) | |
802 | { | |
803 | int old_rgid = current->gid; | |
804 | int old_egid = current->egid; | |
805 | int new_rgid = old_rgid; | |
806 | int new_egid = old_egid; | |
807 | int retval; | |
808 | ||
809 | retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); | |
810 | if (retval) | |
811 | return retval; | |
812 | ||
813 | if (rgid != (gid_t) -1) { | |
814 | if ((old_rgid == rgid) || | |
815 | (current->egid==rgid) || | |
816 | capable(CAP_SETGID)) | |
817 | new_rgid = rgid; | |
818 | else | |
819 | return -EPERM; | |
820 | } | |
821 | if (egid != (gid_t) -1) { | |
822 | if ((old_rgid == egid) || | |
823 | (current->egid == egid) || | |
824 | (current->sgid == egid) || | |
825 | capable(CAP_SETGID)) | |
826 | new_egid = egid; | |
827 | else { | |
828 | return -EPERM; | |
829 | } | |
830 | } | |
831 | if (new_egid != old_egid) | |
832 | { | |
d6e71144 | 833 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 834 | smp_wmb(); |
1da177e4 LT |
835 | } |
836 | if (rgid != (gid_t) -1 || | |
837 | (egid != (gid_t) -1 && egid != old_rgid)) | |
838 | current->sgid = new_egid; | |
839 | current->fsgid = new_egid; | |
840 | current->egid = new_egid; | |
841 | current->gid = new_rgid; | |
842 | key_fsgid_changed(current); | |
9f46080c | 843 | proc_id_connector(current, PROC_EVENT_GID); |
1da177e4 LT |
844 | return 0; |
845 | } | |
846 | ||
847 | /* | |
848 | * setgid() is implemented like SysV w/ SAVED_IDS | |
849 | * | |
850 | * SMP: Same implicit races as above. | |
851 | */ | |
852 | asmlinkage long sys_setgid(gid_t gid) | |
853 | { | |
854 | int old_egid = current->egid; | |
855 | int retval; | |
856 | ||
857 | retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); | |
858 | if (retval) | |
859 | return retval; | |
860 | ||
861 | if (capable(CAP_SETGID)) | |
862 | { | |
863 | if(old_egid != gid) | |
864 | { | |
d6e71144 | 865 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 866 | smp_wmb(); |
1da177e4 LT |
867 | } |
868 | current->gid = current->egid = current->sgid = current->fsgid = gid; | |
869 | } | |
870 | else if ((gid == current->gid) || (gid == current->sgid)) | |
871 | { | |
872 | if(old_egid != gid) | |
873 | { | |
d6e71144 | 874 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 875 | smp_wmb(); |
1da177e4 LT |
876 | } |
877 | current->egid = current->fsgid = gid; | |
878 | } | |
879 | else | |
880 | return -EPERM; | |
881 | ||
882 | key_fsgid_changed(current); | |
9f46080c | 883 | proc_id_connector(current, PROC_EVENT_GID); |
1da177e4 LT |
884 | return 0; |
885 | } | |
886 | ||
887 | static int set_user(uid_t new_ruid, int dumpclear) | |
888 | { | |
889 | struct user_struct *new_user; | |
890 | ||
891 | new_user = alloc_uid(new_ruid); | |
892 | if (!new_user) | |
893 | return -EAGAIN; | |
894 | ||
895 | if (atomic_read(&new_user->processes) >= | |
896 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && | |
897 | new_user != &root_user) { | |
898 | free_uid(new_user); | |
899 | return -EAGAIN; | |
900 | } | |
901 | ||
902 | switch_uid(new_user); | |
903 | ||
904 | if(dumpclear) | |
905 | { | |
d6e71144 | 906 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 907 | smp_wmb(); |
1da177e4 LT |
908 | } |
909 | current->uid = new_ruid; | |
910 | return 0; | |
911 | } | |
912 | ||
913 | /* | |
914 | * Unprivileged users may change the real uid to the effective uid | |
915 | * or vice versa. (BSD-style) | |
916 | * | |
917 | * If you set the real uid at all, or set the effective uid to a value not | |
918 | * equal to the real uid, then the saved uid is set to the new effective uid. | |
919 | * | |
920 | * This makes it possible for a setuid program to completely drop its | |
921 | * privileges, which is often a useful assertion to make when you are doing | |
922 | * a security audit over a program. | |
923 | * | |
924 | * The general idea is that a program which uses just setreuid() will be | |
925 | * 100% compatible with BSD. A program which uses just setuid() will be | |
926 | * 100% compatible with POSIX with saved IDs. | |
927 | */ | |
928 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) | |
929 | { | |
930 | int old_ruid, old_euid, old_suid, new_ruid, new_euid; | |
931 | int retval; | |
932 | ||
933 | retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); | |
934 | if (retval) | |
935 | return retval; | |
936 | ||
937 | new_ruid = old_ruid = current->uid; | |
938 | new_euid = old_euid = current->euid; | |
939 | old_suid = current->suid; | |
940 | ||
941 | if (ruid != (uid_t) -1) { | |
942 | new_ruid = ruid; | |
943 | if ((old_ruid != ruid) && | |
944 | (current->euid != ruid) && | |
945 | !capable(CAP_SETUID)) | |
946 | return -EPERM; | |
947 | } | |
948 | ||
949 | if (euid != (uid_t) -1) { | |
950 | new_euid = euid; | |
951 | if ((old_ruid != euid) && | |
952 | (current->euid != euid) && | |
953 | (current->suid != euid) && | |
954 | !capable(CAP_SETUID)) | |
955 | return -EPERM; | |
956 | } | |
957 | ||
958 | if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) | |
959 | return -EAGAIN; | |
960 | ||
961 | if (new_euid != old_euid) | |
962 | { | |
d6e71144 | 963 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 964 | smp_wmb(); |
1da177e4 LT |
965 | } |
966 | current->fsuid = current->euid = new_euid; | |
967 | if (ruid != (uid_t) -1 || | |
968 | (euid != (uid_t) -1 && euid != old_ruid)) | |
969 | current->suid = current->euid; | |
970 | current->fsuid = current->euid; | |
971 | ||
972 | key_fsuid_changed(current); | |
9f46080c | 973 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
974 | |
975 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); | |
976 | } | |
977 | ||
978 | ||
979 | ||
980 | /* | |
981 | * setuid() is implemented like SysV with SAVED_IDS | |
982 | * | |
983 | * Note that SAVED_ID's is deficient in that a setuid root program | |
984 | * like sendmail, for example, cannot set its uid to be a normal | |
985 | * user and then switch back, because if you're root, setuid() sets | |
986 | * the saved uid too. If you don't like this, blame the bright people | |
987 | * in the POSIX committee and/or USG. Note that the BSD-style setreuid() | |
988 | * will allow a root program to temporarily drop privileges and be able to | |
989 | * regain them by swapping the real and effective uid. | |
990 | */ | |
991 | asmlinkage long sys_setuid(uid_t uid) | |
992 | { | |
993 | int old_euid = current->euid; | |
994 | int old_ruid, old_suid, new_ruid, new_suid; | |
995 | int retval; | |
996 | ||
997 | retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); | |
998 | if (retval) | |
999 | return retval; | |
1000 | ||
1001 | old_ruid = new_ruid = current->uid; | |
1002 | old_suid = current->suid; | |
1003 | new_suid = old_suid; | |
1004 | ||
1005 | if (capable(CAP_SETUID)) { | |
1006 | if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) | |
1007 | return -EAGAIN; | |
1008 | new_suid = uid; | |
1009 | } else if ((uid != current->uid) && (uid != new_suid)) | |
1010 | return -EPERM; | |
1011 | ||
1012 | if (old_euid != uid) | |
1013 | { | |
d6e71144 | 1014 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1015 | smp_wmb(); |
1da177e4 LT |
1016 | } |
1017 | current->fsuid = current->euid = uid; | |
1018 | current->suid = new_suid; | |
1019 | ||
1020 | key_fsuid_changed(current); | |
9f46080c | 1021 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
1022 | |
1023 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); | |
1024 | } | |
1025 | ||
1026 | ||
1027 | /* | |
1028 | * This function implements a generic ability to update ruid, euid, | |
1029 | * and suid. This allows you to implement the 4.4 compatible seteuid(). | |
1030 | */ | |
1031 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) | |
1032 | { | |
1033 | int old_ruid = current->uid; | |
1034 | int old_euid = current->euid; | |
1035 | int old_suid = current->suid; | |
1036 | int retval; | |
1037 | ||
1038 | retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); | |
1039 | if (retval) | |
1040 | return retval; | |
1041 | ||
1042 | if (!capable(CAP_SETUID)) { | |
1043 | if ((ruid != (uid_t) -1) && (ruid != current->uid) && | |
1044 | (ruid != current->euid) && (ruid != current->suid)) | |
1045 | return -EPERM; | |
1046 | if ((euid != (uid_t) -1) && (euid != current->uid) && | |
1047 | (euid != current->euid) && (euid != current->suid)) | |
1048 | return -EPERM; | |
1049 | if ((suid != (uid_t) -1) && (suid != current->uid) && | |
1050 | (suid != current->euid) && (suid != current->suid)) | |
1051 | return -EPERM; | |
1052 | } | |
1053 | if (ruid != (uid_t) -1) { | |
1054 | if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) | |
1055 | return -EAGAIN; | |
1056 | } | |
1057 | if (euid != (uid_t) -1) { | |
1058 | if (euid != current->euid) | |
1059 | { | |
d6e71144 | 1060 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1061 | smp_wmb(); |
1da177e4 LT |
1062 | } |
1063 | current->euid = euid; | |
1064 | } | |
1065 | current->fsuid = current->euid; | |
1066 | if (suid != (uid_t) -1) | |
1067 | current->suid = suid; | |
1068 | ||
1069 | key_fsuid_changed(current); | |
9f46080c | 1070 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
1071 | |
1072 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); | |
1073 | } | |
1074 | ||
1075 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) | |
1076 | { | |
1077 | int retval; | |
1078 | ||
1079 | if (!(retval = put_user(current->uid, ruid)) && | |
1080 | !(retval = put_user(current->euid, euid))) | |
1081 | retval = put_user(current->suid, suid); | |
1082 | ||
1083 | return retval; | |
1084 | } | |
1085 | ||
1086 | /* | |
1087 | * Same as above, but for rgid, egid, sgid. | |
1088 | */ | |
1089 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) | |
1090 | { | |
1091 | int retval; | |
1092 | ||
1093 | retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); | |
1094 | if (retval) | |
1095 | return retval; | |
1096 | ||
1097 | if (!capable(CAP_SETGID)) { | |
1098 | if ((rgid != (gid_t) -1) && (rgid != current->gid) && | |
1099 | (rgid != current->egid) && (rgid != current->sgid)) | |
1100 | return -EPERM; | |
1101 | if ((egid != (gid_t) -1) && (egid != current->gid) && | |
1102 | (egid != current->egid) && (egid != current->sgid)) | |
1103 | return -EPERM; | |
1104 | if ((sgid != (gid_t) -1) && (sgid != current->gid) && | |
1105 | (sgid != current->egid) && (sgid != current->sgid)) | |
1106 | return -EPERM; | |
1107 | } | |
1108 | if (egid != (gid_t) -1) { | |
1109 | if (egid != current->egid) | |
1110 | { | |
d6e71144 | 1111 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1112 | smp_wmb(); |
1da177e4 LT |
1113 | } |
1114 | current->egid = egid; | |
1115 | } | |
1116 | current->fsgid = current->egid; | |
1117 | if (rgid != (gid_t) -1) | |
1118 | current->gid = rgid; | |
1119 | if (sgid != (gid_t) -1) | |
1120 | current->sgid = sgid; | |
1121 | ||
1122 | key_fsgid_changed(current); | |
9f46080c | 1123 | proc_id_connector(current, PROC_EVENT_GID); |
1da177e4 LT |
1124 | return 0; |
1125 | } | |
1126 | ||
1127 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) | |
1128 | { | |
1129 | int retval; | |
1130 | ||
1131 | if (!(retval = put_user(current->gid, rgid)) && | |
1132 | !(retval = put_user(current->egid, egid))) | |
1133 | retval = put_user(current->sgid, sgid); | |
1134 | ||
1135 | return retval; | |
1136 | } | |
1137 | ||
1138 | ||
1139 | /* | |
1140 | * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This | |
1141 | * is used for "access()" and for the NFS daemon (letting nfsd stay at | |
1142 | * whatever uid it wants to). It normally shadows "euid", except when | |
1143 | * explicitly set by setfsuid() or for access.. | |
1144 | */ | |
1145 | asmlinkage long sys_setfsuid(uid_t uid) | |
1146 | { | |
1147 | int old_fsuid; | |
1148 | ||
1149 | old_fsuid = current->fsuid; | |
1150 | if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) | |
1151 | return old_fsuid; | |
1152 | ||
1153 | if (uid == current->uid || uid == current->euid || | |
1154 | uid == current->suid || uid == current->fsuid || | |
1155 | capable(CAP_SETUID)) | |
1156 | { | |
1157 | if (uid != old_fsuid) | |
1158 | { | |
d6e71144 | 1159 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1160 | smp_wmb(); |
1da177e4 LT |
1161 | } |
1162 | current->fsuid = uid; | |
1163 | } | |
1164 | ||
1165 | key_fsuid_changed(current); | |
9f46080c | 1166 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
1167 | |
1168 | security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); | |
1169 | ||
1170 | return old_fsuid; | |
1171 | } | |
1172 | ||
1173 | /* | |
1174 |