[PATCH] CodingStyle cleanup for kernel/sys.c
[deliverable/linux.git] / kernel / sys.c
1 /*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
16 #include <linux/fs.h>
17 #include <linux/kernel.h>
18 #include <linux/kexec.h>
19 #include <linux/workqueue.h>
20 #include <linux/capability.h>
21 #include <linux/device.h>
22 #include <linux/key.h>
23 #include <linux/times.h>
24 #include <linux/posix-timers.h>
25 #include <linux/security.h>
26 #include <linux/dcookies.h>
27 #include <linux/suspend.h>
28 #include <linux/tty.h>
29 #include <linux/signal.h>
30 #include <linux/cn_proc.h>
31 #include <linux/getcpu.h>
32
33 #include <linux/compat.h>
34 #include <linux/syscalls.h>
35 #include <linux/kprobes.h>
36
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/unistd.h>
40
41 #ifndef SET_UNALIGN_CTL
42 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
43 #endif
44 #ifndef GET_UNALIGN_CTL
45 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
46 #endif
47 #ifndef SET_FPEMU_CTL
48 # define SET_FPEMU_CTL(a,b) (-EINVAL)
49 #endif
50 #ifndef GET_FPEMU_CTL
51 # define GET_FPEMU_CTL(a,b) (-EINVAL)
52 #endif
53 #ifndef SET_FPEXC_CTL
54 # define SET_FPEXC_CTL(a,b) (-EINVAL)
55 #endif
56 #ifndef GET_FPEXC_CTL
57 # define GET_FPEXC_CTL(a,b) (-EINVAL)
58 #endif
59 #ifndef GET_ENDIAN
60 # define GET_ENDIAN(a,b) (-EINVAL)
61 #endif
62 #ifndef SET_ENDIAN
63 # define SET_ENDIAN(a,b) (-EINVAL)
64 #endif
65
66 /*
67 * this is where the system-wide overflow UID and GID are defined, for
68 * architectures that now have 32-bit UID/GID but didn't in the past
69 */
70
71 int overflowuid = DEFAULT_OVERFLOWUID;
72 int overflowgid = DEFAULT_OVERFLOWGID;
73
74 #ifdef CONFIG_UID16
75 EXPORT_SYMBOL(overflowuid);
76 EXPORT_SYMBOL(overflowgid);
77 #endif
78
79 /*
80 * the same as above, but for filesystems which can only store a 16-bit
81 * UID and GID. as such, this is needed on all architectures
82 */
83
84 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
85 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
86
87 EXPORT_SYMBOL(fs_overflowuid);
88 EXPORT_SYMBOL(fs_overflowgid);
89
90 /*
91 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
92 */
93
94 int C_A_D = 1;
95 int cad_pid = 1;
96
97 /*
98 * Notifier list for kernel code which wants to be called
99 * at shutdown. This is used to stop any idling DMA operations
100 * and the like.
101 */
102
103 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
104
105 /*
106 * Notifier chain core routines. The exported routines below
107 * are layered on top of these, with appropriate locking added.
108 */
109
110 static int notifier_chain_register(struct notifier_block **nl,
111 struct notifier_block *n)
112 {
113 while ((*nl) != NULL) {
114 if (n->priority > (*nl)->priority)
115 break;
116 nl = &((*nl)->next);
117 }
118 n->next = *nl;
119 rcu_assign_pointer(*nl, n);
120 return 0;
121 }
122
123 static int notifier_chain_unregister(struct notifier_block **nl,
124 struct notifier_block *n)
125 {
126 while ((*nl) != NULL) {
127 if ((*nl) == n) {
128 rcu_assign_pointer(*nl, n->next);
129 return 0;
130 }
131 nl = &((*nl)->next);
132 }
133 return -ENOENT;
134 }
135
136 static int __kprobes notifier_call_chain(struct notifier_block **nl,
137 unsigned long val, void *v)
138 {
139 int ret = NOTIFY_DONE;
140 struct notifier_block *nb, *next_nb;
141
142 nb = rcu_dereference(*nl);
143 while (nb) {
144 next_nb = rcu_dereference(nb->next);
145 ret = nb->notifier_call(nb, val, v);
146 if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
147 break;
148 nb = next_nb;
149 }
150 return ret;
151 }
152
153 /*
154 * Atomic notifier chain routines. Registration and unregistration
155 * use a mutex, and call_chain is synchronized by RCU (no locks).
156 */
157
158 /**
159 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
160 * @nh: Pointer to head of the atomic notifier chain
161 * @n: New entry in notifier chain
162 *
163 * Adds a notifier to an atomic notifier chain.
164 *
165 * Currently always returns zero.
166 */
167
168 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
169 struct notifier_block *n)
170 {
171 unsigned long flags;
172 int ret;
173
174 spin_lock_irqsave(&nh->lock, flags);
175 ret = notifier_chain_register(&nh->head, n);
176 spin_unlock_irqrestore(&nh->lock, flags);
177 return ret;
178 }
179
180 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
181
182 /**
183 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
184 * @nh: Pointer to head of the atomic notifier chain
185 * @n: Entry to remove from notifier chain
186 *
187 * Removes a notifier from an atomic notifier chain.
188 *
189 * Returns zero on success or %-ENOENT on failure.
190 */
191 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
192 struct notifier_block *n)
193 {
194 unsigned long flags;
195 int ret;
196
197 spin_lock_irqsave(&nh->lock, flags);
198 ret = notifier_chain_unregister(&nh->head, n);
199 spin_unlock_irqrestore(&nh->lock, flags);
200 synchronize_rcu();
201 return ret;
202 }
203
204 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
205
206 /**
207 * atomic_notifier_call_chain - Call functions in an atomic notifier chain
208 * @nh: Pointer to head of the atomic notifier chain
209 * @val: Value passed unmodified to notifier function
210 * @v: Pointer passed unmodified to notifier function
211 *
212 * Calls each function in a notifier chain in turn. The functions
213 * run in an atomic context, so they must not block.
214 * This routine uses RCU to synchronize with changes to the chain.
215 *
216 * If the return value of the notifier can be and'ed
217 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain
218 * will return immediately, with the return value of
219 * the notifier function which halted execution.
220 * Otherwise the return value is the return value
221 * of the last notifier function called.
222 */
223
224 int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
225 unsigned long val, void *v)
226 {
227 int ret;
228
229 rcu_read_lock();
230 ret = notifier_call_chain(&nh->head, val, v);
231 rcu_read_unlock();
232 return ret;
233 }
234
235 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
236
237 /*
238 * Blocking notifier chain routines. All access to the chain is
239 * synchronized by an rwsem.
240 */
241
242 /**
243 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
244 * @nh: Pointer to head of the blocking notifier chain
245 * @n: New entry in notifier chain
246 *
247 * Adds a notifier to a blocking notifier chain.
248 * Must be called in process context.
249 *
250 * Currently always returns zero.
251 */
252
253 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
254 struct notifier_block *n)
255 {
256 int ret;
257
258 /*
259 * This code gets used during boot-up, when task switching is
260 * not yet working and interrupts must remain disabled. At
261 * such times we must not call down_write().
262 */
263 if (unlikely(system_state == SYSTEM_BOOTING))
264 return notifier_chain_register(&nh->head, n);
265
266 down_write(&nh->rwsem);
267 ret = notifier_chain_register(&nh->head, n);
268 up_write(&nh->rwsem);
269 return ret;
270 }
271
272 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
273
274 /**
275 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
276 * @nh: Pointer to head of the blocking notifier chain
277 * @n: Entry to remove from notifier chain
278 *
279 * Removes a notifier from a blocking notifier chain.
280 * Must be called from process context.
281 *
282 * Returns zero on success or %-ENOENT on failure.
283 */
284 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
285 struct notifier_block *n)
286 {
287 int ret;
288
289 /*
290 * This code gets used during boot-up, when task switching is
291 * not yet working and interrupts must remain disabled. At
292 * such times we must not call down_write().
293 */
294 if (unlikely(system_state == SYSTEM_BOOTING))
295 return notifier_chain_unregister(&nh->head, n);
296
297 down_write(&nh->rwsem);
298 ret = notifier_chain_unregister(&nh->head, n);
299 up_write(&nh->rwsem);
300 return ret;
301 }
302
303 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
304
305 /**
306 * blocking_notifier_call_chain - Call functions in a blocking notifier chain
307 * @nh: Pointer to head of the blocking notifier chain
308 * @val: Value passed unmodified to notifier function
309 * @v: Pointer passed unmodified to notifier function
310 *
311 * Calls each function in a notifier chain in turn. The functions
312 * run in a process context, so they are allowed to block.
313 *
314 * If the return value of the notifier can be and'ed
315 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain
316 * will return immediately, with the return value of
317 * the notifier function which halted execution.
318 * Otherwise the return value is the return value
319 * of the last notifier function called.
320 */
321
322 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
323 unsigned long val, void *v)
324 {
325 int ret;
326
327 down_read(&nh->rwsem);
328 ret = notifier_call_chain(&nh->head, val, v);
329 up_read(&nh->rwsem);
330 return ret;
331 }
332
333 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
334
335 /*
336 * Raw notifier chain routines. There is no protection;
337 * the caller must provide it. Use at your own risk!
338 */
339
340 /**
341 * raw_notifier_chain_register - Add notifier to a raw notifier chain
342 * @nh: Pointer to head of the raw notifier chain
343 * @n: New entry in notifier chain
344 *
345 * Adds a notifier to a raw notifier chain.
346 * All locking must be provided by the caller.
347 *
348 * Currently always returns zero.
349 */
350
351 int raw_notifier_chain_register(struct raw_notifier_head *nh,
352 struct notifier_block *n)
353 {
354 return notifier_chain_register(&nh->head, n);
355 }
356
357 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
358
359 /**
360 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
361 * @nh: Pointer to head of the raw notifier chain
362 * @n: Entry to remove from notifier chain
363 *
364 * Removes a notifier from a raw notifier chain.
365 * All locking must be provided by the caller.
366 *
367 * Returns zero on success or %-ENOENT on failure.
368 */
369 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
370 struct notifier_block *n)
371 {
372 return notifier_chain_unregister(&nh->head, n);
373 }
374
375 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
376
377 /**
378 * raw_notifier_call_chain - Call functions in a raw notifier chain
379 * @nh: Pointer to head of the raw notifier chain
380 * @val: Value passed unmodified to notifier function
381 * @v: Pointer passed unmodified to notifier function
382 *
383 * Calls each function in a notifier chain in turn. The functions
384 * run in an undefined context.
385 * All locking must be provided by the caller.
386 *
387 * If the return value of the notifier can be and'ed
388 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain
389 * will return immediately, with the return value of
390 * the notifier function which halted execution.
391 * Otherwise the return value is the return value
392 * of the last notifier function called.
393 */
394
395 int raw_notifier_call_chain(struct raw_notifier_head *nh,
396 unsigned long val, void *v)
397 {
398 return notifier_call_chain(&nh->head, val, v);
399 }
400
401 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
402
403 /**
404 * register_reboot_notifier - Register function to be called at reboot time
405 * @nb: Info about notifier function to be called
406 *
407 * Registers a function with the list of functions
408 * to be called at reboot time.
409 *
410 * Currently always returns zero, as blocking_notifier_chain_register
411 * always returns zero.
412 */
413
414 int register_reboot_notifier(struct notifier_block * nb)
415 {
416 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
417 }
418
419 EXPORT_SYMBOL(register_reboot_notifier);
420
421 /**
422 * unregister_reboot_notifier - Unregister previously registered reboot notifier
423 * @nb: Hook to be unregistered
424 *
425 * Unregisters a previously registered reboot
426 * notifier function.
427 *
428 * Returns zero on success, or %-ENOENT on failure.
429 */
430
431 int unregister_reboot_notifier(struct notifier_block * nb)
432 {
433 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
434 }
435
436 EXPORT_SYMBOL(unregister_reboot_notifier);
437
438 static int set_one_prio(struct task_struct *p, int niceval, int error)
439 {
440 int no_nice;
441
442 if (p->uid != current->euid &&
443 p->euid != current->euid && !capable(CAP_SYS_NICE)) {
444 error = -EPERM;
445 goto out;
446 }
447 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
448 error = -EACCES;
449 goto out;
450 }
451 no_nice = security_task_setnice(p, niceval);
452 if (no_nice) {
453 error = no_nice;
454 goto out;
455 }
456 if (error == -ESRCH)
457 error = 0;
458 set_user_nice(p, niceval);
459 out:
460 return error;
461 }
462
463 asmlinkage long sys_setpriority(int which, int who, int niceval)
464 {
465 struct task_struct *g, *p;
466 struct user_struct *user;
467 int error = -EINVAL;
468
469 if (which > 2 || which < 0)
470 goto out;
471
472 /* normalize: avoid signed division (rounding problems) */
473 error = -ESRCH;
474 if (niceval < -20)
475 niceval = -20;
476 if (niceval > 19)
477 niceval = 19;
478
479 read_lock(&tasklist_lock);
480 switch (which) {
481 case PRIO_PROCESS:
482 if (!who)
483 who = current->pid;
484 p = find_task_by_pid(who);
485 if (p)
486 error = set_one_prio(p, niceval, error);
487 break;
488 case PRIO_PGRP:
489 if (!who)
490 who = process_group(current);
491 do_each_task_pid(who, PIDTYPE_PGID, p) {
492 error = set_one_prio(p, niceval, error);
493 } while_each_task_pid(who, PIDTYPE_PGID, p);
494 break;
495 case PRIO_USER:
496 user = current->user;
497 if (!who)
498 who = current->uid;
499 else
500 if ((who != current->uid) && !(user = find_user(who)))
501 goto out_unlock; /* No processes for this user */
502
503 do_each_thread(g, p)
504 if (p->uid == who)
505 error = set_one_prio(p, niceval, error);
506 while_each_thread(g, p);
507 if (who != current->uid)
508 free_uid(user); /* For find_user() */
509 break;
510 }
511 out_unlock:
512 read_unlock(&tasklist_lock);
513 out:
514 return error;
515 }
516
517 /*
518 * Ugh. To avoid negative return values, "getpriority()" will
519 * not return the normal nice-value, but a negated value that
520 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
521 * to stay compatible.
522 */
523 asmlinkage long sys_getpriority(int which, int who)
524 {
525 struct task_struct *g, *p;
526 struct user_struct *user;
527 long niceval, retval = -ESRCH;
528
529 if (which > 2 || which < 0)
530 return -EINVAL;
531
532 read_lock(&tasklist_lock);
533 switch (which) {
534 case PRIO_PROCESS:
535 if (!who)
536 who = current->pid;
537 p = find_task_by_pid(who);
538 if (p) {
539 niceval = 20 - task_nice(p);
540 if (niceval > retval)
541 retval = niceval;
542 }
543 break;
544 case PRIO_PGRP:
545 if (!who)
546 who = process_group(current);
547 do_each_task_pid(who, PIDTYPE_PGID, p) {
548 niceval = 20 - task_nice(p);
549 if (niceval > retval)
550 retval = niceval;
551 } while_each_task_pid(who, PIDTYPE_PGID, p);
552 break;
553 case PRIO_USER:
554 user = current->user;
555 if (!who)
556 who = current->uid;
557 else
558 if ((who != current->uid) && !(user = find_user(who)))
559 goto out_unlock; /* No processes for this user */
560
561 do_each_thread(g, p)
562 if (p->uid == who) {
563 niceval = 20 - task_nice(p);
564 if (niceval > retval)
565 retval = niceval;
566 }
567 while_each_thread(g, p);
568 if (who != current->uid)
569 free_uid(user); /* for find_user() */
570 break;
571 }
572 out_unlock:
573 read_unlock(&tasklist_lock);
574
575 return retval;
576 }
577
578 /**
579 * emergency_restart - reboot the system
580 *
581 * Without shutting down any hardware or taking any locks
582 * reboot the system. This is called when we know we are in
583 * trouble so this is our best effort to reboot. This is
584 * safe to call in interrupt context.
585 */
586 void emergency_restart(void)
587 {
588 machine_emergency_restart();
589 }
590 EXPORT_SYMBOL_GPL(emergency_restart);
591
592 static void kernel_restart_prepare(char *cmd)
593 {
594 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
595 system_state = SYSTEM_RESTART;
596 device_shutdown();
597 }
598
599 /**
600 * kernel_restart - reboot the system
601 * @cmd: pointer to buffer containing command to execute for restart
602 * or %NULL
603 *
604 * Shutdown everything and perform a clean reboot.
605 * This is not safe to call in interrupt context.
606 */
607 void kernel_restart(char *cmd)
608 {
609 kernel_restart_prepare(cmd);
610 if (!cmd)
611 printk(KERN_EMERG "Restarting system.\n");
612 else
613 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
614 machine_restart(cmd);
615 }
616 EXPORT_SYMBOL_GPL(kernel_restart);
617
618 /**
619 * kernel_kexec - reboot the system
620 *
621 * Move into place and start executing a preloaded standalone
622 * executable. If nothing was preloaded return an error.
623 */
624 static void kernel_kexec(void)
625 {
626 #ifdef CONFIG_KEXEC
627 struct kimage *image;
628 image = xchg(&kexec_image, NULL);
629 if (!image)
630 return;
631 kernel_restart_prepare(NULL);
632 printk(KERN_EMERG "Starting new kernel\n");
633 machine_shutdown();
634 machine_kexec(image);
635 #endif
636 }
637
638 void kernel_shutdown_prepare(enum system_states state)
639 {
640 blocking_notifier_call_chain(&reboot_notifier_list,
641 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
642 system_state = state;
643 device_shutdown();
644 }
645 /**
646 * kernel_halt - halt the system
647 *
648 * Shutdown everything and perform a clean system halt.
649 */
650 void kernel_halt(void)
651 {
652 kernel_shutdown_prepare(SYSTEM_HALT);
653 printk(KERN_EMERG "System halted.\n");
654 machine_halt();
655 }
656
657 EXPORT_SYMBOL_GPL(kernel_halt);
658
659 /**
660 * kernel_power_off - power_off the system
661 *
662 * Shutdown everything and perform a clean system power_off.
663 */
664 void kernel_power_off(void)
665 {
666 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
667 printk(KERN_EMERG "Power down.\n");
668 machine_power_off();
669 }
670 EXPORT_SYMBOL_GPL(kernel_power_off);
671 /*
672 * Reboot system call: for obvious reasons only root may call it,
673 * and even root needs to set up some magic numbers in the registers
674 * so that some mistake won't make this reboot the whole machine.
675 * You can also set the meaning of the ctrl-alt-del-key here.
676 *
677 * reboot doesn't sync: do that yourself before calling this.
678 */
679 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
680 {
681 char buffer[256];
682
683 /* We only trust the superuser with rebooting the system. */
684 if (!capable(CAP_SYS_BOOT))
685 return -EPERM;
686
687 /* For safety, we require "magic" arguments. */
688 if (magic1 != LINUX_REBOOT_MAGIC1 ||
689 (magic2 != LINUX_REBOOT_MAGIC2 &&
690 magic2 != LINUX_REBOOT_MAGIC2A &&
691 magic2 != LINUX_REBOOT_MAGIC2B &&
692 magic2 != LINUX_REBOOT_MAGIC2C))
693 return -EINVAL;
694
695 /* Instead of trying to make the power_off code look like
696 * halt when pm_power_off is not set do it the easy way.
697 */
698 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
699 cmd = LINUX_REBOOT_CMD_HALT;
700
701 lock_kernel();
702 switch (cmd) {
703 case LINUX_REBOOT_CMD_RESTART:
704 kernel_restart(NULL);
705 break;
706
707 case LINUX_REBOOT_CMD_CAD_ON:
708 C_A_D = 1;
709 break;
710
711 case LINUX_REBOOT_CMD_CAD_OFF:
712 C_A_D = 0;
713 break;
714
715 case LINUX_REBOOT_CMD_HALT:
716 kernel_halt();
717 unlock_kernel();
718 do_exit(0);
719 break;
720
721 case LINUX_REBOOT_CMD_POWER_OFF:
722 kernel_power_off();
723 unlock_kernel();
724 do_exit(0);
725 break;
726
727 case LINUX_REBOOT_CMD_RESTART2:
728 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
729 unlock_kernel();
730 return -EFAULT;
731 }
732 buffer[sizeof(buffer) - 1] = '\0';
733
734 kernel_restart(buffer);
735 break;
736
737 case LINUX_REBOOT_CMD_KEXEC:
738 kernel_kexec();
739 unlock_kernel();
740 return -EINVAL;
741
742 #ifdef CONFIG_SOFTWARE_SUSPEND
743 case LINUX_REBOOT_CMD_SW_SUSPEND:
744 {
745 int ret = software_suspend();
746 unlock_kernel();
747 return ret;
748 }
749 #endif
750
751 default:
752 unlock_kernel();
753 return -EINVAL;
754 }
755 unlock_kernel();
756 return 0;
757 }
758
759 static void deferred_cad(void *dummy)
760 {
761 kernel_restart(NULL);
762 }
763
764 /*
765 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
766 * As it's called within an interrupt, it may NOT sync: the only choice
767 * is whether to reboot at once, or just ignore the ctrl-alt-del.
768 */
769 void ctrl_alt_del(void)
770 {
771 static DECLARE_WORK(cad_work, deferred_cad, NULL);
772
773 if (C_A_D)
774 schedule_work(&cad_work);
775 else
776 kill_proc(cad_pid, SIGINT, 1);
777 }
778
779
780 /*
781 * Unprivileged users may change the real gid to the effective gid
782 * or vice versa. (BSD-style)
783 *
784 * If you set the real gid at all, or set the effective gid to a value not
785 * equal to the real gid, then the saved gid is set to the new effective gid.
786 *
787 * This makes it possible for a setgid program to completely drop its
788 * privileges, which is often a useful assertion to make when you are doing
789 * a security audit over a program.
790 *
791 * The general idea is that a program which uses just setregid() will be
792 * 100% compatible with BSD. A program which uses just setgid() will be
793 * 100% compatible with POSIX with saved IDs.
794 *
795 * SMP: There are not races, the GIDs are checked only by filesystem
796 * operations (as far as semantic preservation is concerned).
797 */
798 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
799 {
800 int old_rgid = current->gid;
801 int old_egid = current->egid;
802 int new_rgid = old_rgid;
803 int new_egid = old_egid;
804 int retval;
805
806 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
807 if (retval)
808 return retval;
809
810 if (rgid != (gid_t) -1) {
811 if ((old_rgid == rgid) ||
812 (current->egid==rgid) ||
813 capable(CAP_SETGID))
814 new_rgid = rgid;
815 else
816 return -EPERM;
817 }
818 if (egid != (gid_t) -1) {
819 if ((old_rgid == egid) ||
820 (current->egid == egid) ||
821 (current->sgid == egid) ||
822 capable(CAP_SETGID))
823 new_egid = egid;
824 else
825 return -EPERM;
826 }
827 if (new_egid != old_egid) {
828 current->mm->dumpable = suid_dumpable;
829 smp_wmb();
830 }
831 if (rgid != (gid_t) -1 ||
832 (egid != (gid_t) -1 && egid != old_rgid))
833 current->sgid = new_egid;
834 current->fsgid = new_egid;
835 current->egid = new_egid;
836 current->gid = new_rgid;
837 key_fsgid_changed(current);
838 proc_id_connector(current, PROC_EVENT_GID);
839 return 0;
840 }
841
842 /*
843 * setgid() is implemented like SysV w/ SAVED_IDS
844 *
845 * SMP: Same implicit races as above.
846 */
847 asmlinkage long sys_setgid(gid_t gid)
848 {
849 int old_egid = current->egid;
850 int retval;
851
852 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
853 if (retval)
854 return retval;
855
856 if (capable(CAP_SETGID)) {
857 if (old_egid != gid) {
858 current->mm->dumpable = suid_dumpable;
859 smp_wmb();
860 }
861 current->gid = current->egid = current->sgid = current->fsgid = gid;
862 } else if ((gid == current->gid) || (gid == current->sgid)) {
863 if (old_egid != gid) {
864 current->mm->dumpable = suid_dumpable;
865 smp_wmb();
866 }
867 current->egid = current->fsgid = gid;
868 }
869 else
870 return -EPERM;
871
872 key_fsgid_changed(current);
873 proc_id_connector(current, PROC_EVENT_GID);
874 return 0;
875 }
876
877 static int set_user(uid_t new_ruid, int dumpclear)
878 {
879 struct user_struct *new_user;
880
881 new_user = alloc_uid(new_ruid);
882 if (!new_user)
883 return -EAGAIN;
884
885 if (atomic_read(&new_user->processes) >=
886 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
887 new_user != &root_user) {
888 free_uid(new_user);
889 return -EAGAIN;
890 }
891
892 switch_uid(new_user);
893
894 if (dumpclear) {
895 current->mm->dumpable = suid_dumpable;
896 smp_wmb();
897 }
898 current->uid = new_ruid;
899 return 0;
900 }
901
902 /*
903 * Unprivileged users may change the real uid to the effective uid
904 * or vice versa. (BSD-style)
905 *
906 * If you set the real uid at all, or set the effective uid to a value not
907 * equal to the real uid, then the saved uid is set to the new effective uid.
908 *
909 * This makes it possible for a setuid program to completely drop its
910 * privileges, which is often a useful assertion to make when you are doing
911 * a security audit over a program.
912 *
913 * The general idea is that a program which uses just setreuid() will be
914 * 100% compatible with BSD. A program which uses just setuid() will be
915 * 100% compatible with POSIX with saved IDs.
916 */
917 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
918 {
919 int old_ruid, old_euid, old_suid, new_ruid, new_euid;
920 int retval;
921
922 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
923 if (retval)
924 return retval;
925
926 new_ruid = old_ruid = current->uid;
927 new_euid = old_euid = current->euid;
928 old_suid = current->suid;
929
930 if (ruid != (uid_t) -1) {
931 new_ruid = ruid;
932 if ((old_ruid != ruid) &&
933 (current->euid != ruid) &&
934 !capable(CAP_SETUID))
935 return -EPERM;
936 }
937
938 if (euid != (uid_t) -1) {
939 new_euid = euid;
940 if ((old_ruid != euid) &&
941 (current->euid != euid) &&
942 (current->suid != euid) &&
943 !capable(CAP_SETUID))
944 return -EPERM;
945 }
946
947 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
948 return -EAGAIN;
949
950 if (new_euid != old_euid) {
951 current->mm->dumpable = suid_dumpable;
952 smp_wmb();
953 }
954 current->fsuid = current->euid = new_euid;
955 if (ruid != (uid_t) -1 ||
956 (euid != (uid_t) -1 && euid != old_ruid))
957 current->suid = current->euid;
958 current->fsuid = current->euid;
959
960 key_fsuid_changed(current);
961 proc_id_connector(current, PROC_EVENT_UID);
962
963 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
964 }
965
966
967
968 /*
969 * setuid() is implemented like SysV with SAVED_IDS
970 *
971 * Note that SAVED_ID's is deficient in that a setuid root program
972 * like sendmail, for example, cannot set its uid to be a normal
973 * user and then switch back, because if you're root, setuid() sets
974 * the saved uid too. If you don't like this, blame the bright people
975 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
976 * will allow a root program to temporarily drop privileges and be able to
977 * regain them by swapping the real and effective uid.
978 */
979 asmlinkage long sys_setuid(uid_t uid)
980 {
981 int old_euid = current->euid;
982 int old_ruid, old_suid, new_ruid, new_suid;
983 int retval;
984
985 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
986 if (retval)
987 return retval;
988
989 old_ruid = new_ruid = current->uid;
990 old_suid = current->suid;
991 new_suid = old_suid;
992
993 if (capable(CAP_SETUID)) {
994 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
995 return -EAGAIN;
996 new_suid = uid;
997 } else if ((uid != current->uid) && (uid != new_suid))
998 return -EPERM;
999
1000 if (old_euid != uid) {
1001 current->mm->dumpable = suid_dumpable;
1002 smp_wmb();
1003 }
1004 current->fsuid = current->euid = uid;
1005 current->suid = new_suid;
1006
1007 key_fsuid_changed(current);
1008 proc_id_connector(current, PROC_EVENT_UID);
1009
1010 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
1011 }
1012
1013
1014 /*
1015 * This function implements a generic ability to update ruid, euid,
1016 * and suid. This allows you to implement the 4.4 compatible seteuid().
1017 */
1018 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
1019 {
1020 int old_ruid = current->uid;
1021 int old_euid = current->euid;
1022 int old_suid = current->suid;
1023 int retval;
1024
1025 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
1026 if (retval)
1027 return retval;
1028
1029 if (!capable(CAP_SETUID)) {
1030 if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
1031 (ruid != current->euid) && (ruid != current->suid))
1032 return -EPERM;
1033 if ((euid != (uid_t) -1) && (euid != current->uid) &&
1034 (euid != current->euid) && (euid != current->suid))
1035 return -EPERM;
1036 if ((suid != (uid_t) -1) && (suid != current->uid) &&
1037 (suid != current->euid) && (suid != current->suid))
1038 return -EPERM;
1039 }
1040 if (ruid != (uid_t) -1) {
1041 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
1042 return -EAGAIN;
1043 }
1044 if (euid != (uid_t) -1) {
1045 if (euid != current->euid) {
1046 current->mm->dumpable = suid_dumpable;
1047 smp_wmb();
1048 }
1049 current->euid = euid;
1050 }
1051 current->fsuid = current->euid;
1052 if (suid != (uid_t) -1)
1053 current->suid = suid;
1054
1055 key_fsuid_changed(current);
1056 proc_id_connector(current, PROC_EVENT_UID);
1057
1058 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
1059 }
1060
1061 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
1062 {
1063 int retval;
1064
1065 if (!(retval = put_user(current->uid, ruid)) &&
1066 !(retval = put_user(current->euid, euid)))
1067 retval = put_user(current->suid, suid);
1068
1069 return retval;
1070 }
1071
1072 /*
1073 * Same as above, but for rgid, egid, sgid.
1074 */
1075 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
1076 {
1077 int retval;
1078
1079 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
1080 if (retval)
1081 return retval;
1082
1083 if (!capable(CAP_SETGID)) {
1084 if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
1085 (rgid != current->egid) && (rgid != current->sgid))
1086 return -EPERM;
1087 if ((egid != (gid_t) -1) && (egid != current->gid) &&
1088 (egid != current->egid) && (egid != current->sgid))
1089 return -EPERM;
1090 if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
1091 (sgid != current->egid) && (sgid != current->sgid))
1092 return -EPERM;
1093 }
1094 if (egid != (gid_t) -1) {
1095 if (egid != current->egid) {
1096 current->mm->dumpable = suid_dumpable;
1097 smp_wmb();
1098 }
1099 current->egid = egid;
1100 }
1101 current->fsgid = current->egid;
1102 if (rgid != (gid_t) -1)
1103 current->gid = rgid;
1104 if (sgid != (gid_t) -1)
1105 current->sgid = sgid;
1106
1107 key_fsgid_changed(current);
1108 proc_id_connector(current, PROC_EVENT_GID);
1109 return 0;
1110 }
1111
1112 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
1113 {
1114 int retval;
1115
1116 if (!(retval = put_user(current->gid, rgid)) &&
1117 !(retval = put_user(current->egid, egid)))
1118 retval = put_user(current->sgid, sgid);
1119
1120 return retval;
1121 }
1122
1123
1124 /*
1125 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1126 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1127 * whatever uid it wants to). It normally shadows "euid", except when
1128 * explicitly set by setfsuid() or for access..
1129 */
1130 asmlinkage long sys_setfsuid(uid_t uid)
1131 {
1132 int old_fsuid;
1133
1134 old_fsuid = current->fsuid;
1135 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
1136 return old_fsuid;
1137
1138 if (uid == current->uid || uid == current->euid ||
1139 uid == current->suid || uid == current->fsuid ||
1140 capable(CAP_SETUID)) {
1141 if (uid != old_fsuid) {
1142 current->mm->dumpable = suid_dumpable;
1143 smp_wmb();
1144 }
1145 current->fsuid = uid;
1146 }
1147
1148 key_fsuid_changed(current);
1149 proc_id_connector(current, PROC_EVENT_UID);
1150
1151 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
1152
1153 return old_fsuid;
1154 }
1155
1156 /*
1157 * Samma på svenska..
1158 */
1159 asmlinkage long sys_setfsgid(gid_t gid)
1160 {
1161 int old_fsgid;
1162
1163 old_fsgid = current->fsgid;
1164 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1165 return old_fsgid;
1166
1167 if (gid == current->gid || gid == current->egid ||
1168 gid == current->sgid || gid == current->fsgid ||
1169 capable(CAP_SETGID)) {
1170 if (gid != old_fsgid) {
1171 current->mm->dumpable = suid_dumpable;
1172 smp_wmb();
1173 }
1174 current->fsgid = gid;
1175 key_fsgid_changed(current);
1176 proc_id_connector(current, PROC_EVENT_GID);
1177 }
1178 return old_fsgid;
1179 }
1180
1181 asmlinkage long sys_times(struct tms __user * tbuf)
1182 {
1183 /*
1184 * In the SMP world we might just be unlucky and have one of
1185 * the times increment as we use it. Since the value is an
1186 * atomically safe type this is just fine. Conceptually its
1187 * as if the syscall took an instant longer to occur.
1188 */
1189 if (tbuf) {
1190 struct tms tmp;
1191 struct task_struct *tsk = current;
1192 struct task_struct *t;
1193 cputime_t utime, stime, cutime, cstime;
1194
1195 spin_lock_irq(&tsk->sighand->siglock);
1196 utime = tsk->signal->utime;
1197 stime = tsk->signal->stime;
1198 t = tsk;
1199 do {
1200 utime = cputime_add(utime, t->utime);
1201 stime = cputime_add(stime, t->stime);
1202 t = next_thread(t);
1203 } while (t != tsk);
1204
1205 cutime = tsk->signal->cutime;
1206 cstime = tsk->signal->cstime;
1207 spin_unlock_irq(&tsk->sighand->siglock);
1208
1209 tmp.tms_utime = cputime_to_clock_t(utime);
1210 tmp.tms_stime = cputime_to_clock_t(stime);
1211 tmp.tms_cutime = cputime_to_clock_t(cutime);
1212 tmp.tms_cstime = cputime_to_clock_t(cstime);
1213 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1214 return -EFAULT;
1215 }
1216 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1217 }
1218
1219 /*
1220 * This needs some heavy checking ...
1221 * I just haven't the stomach for it. I also don't fully
1222 * understand sessions/pgrp etc. Let somebody who does explain it.
1223 *
1224 * OK, I think I have the protection semantics right.... this is really
1225 * only important on a multi-user system anyway, to make sure one user
1226 * can't send a signal to a process owned by another. -TYT, 12/12/91
1227 *
1228 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1229 * LBT 04.03.94
1230 */
1231
1232 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1233 {
1234 struct task_struct *p;
1235 struct task_struct *group_leader = current->group_leader;
1236 int err = -EINVAL;
1237
1238 if (!pid)
1239 pid = group_leader->pid;
1240 if (!pgid)
1241 pgid = pid;
1242 if (pgid < 0)
1243 return -EINVAL;
1244
1245 /* From this point forward we keep holding onto the tasklist lock
1246 * so that our parent does not change from under us. -DaveM
1247 */
1248 write_lock_irq(&tasklist_lock);
1249
1250 err = -ESRCH;
1251 p = find_task_by_pid(pid);
1252 if (!p)
1253 goto out;
1254
1255 err = -EINVAL;
1256 if (!thread_group_leader(p))
1257 goto out;
1258
1259 if (p->real_parent == group_leader) {
1260 err = -EPERM;
1261 if (p->signal->session != group_leader->signal->session)
1262 goto out;
1263 err = -EACCES;
1264 if (p->did_exec)
1265 goto out;
1266 } else {
1267 err = -ESRCH;
1268 if (p != group_leader)
1269 goto out;
1270 }
1271
1272 err = -EPERM;
1273 if (p->signal->leader)
1274 goto out;
1275
1276 if (pgid != pid) {
1277 struct task_struct *p;
1278
1279 do_each_task_pid(pgid, PIDTYPE_PGID, p) {
1280 if (p->signal->session == group_leader->signal->session)
1281 goto ok_pgid;
1282 } while_each_task_pid(pgid, PIDTYPE_PGID, p);
1283 goto out;
1284 }
1285
1286 ok_pgid:
1287 err = security_task_setpgid(p, pgid);
1288 if (err)
1289 goto out;
1290
1291 if (process_group(p) != pgid) {
1292 detach_pid(p, PIDTYPE_PGID);
1293 p->signal->pgrp = pgid;
1294 attach_pid(p, PIDTYPE_PGID, pgid);
1295 }
1296
1297 err = 0;
1298 out:
1299 /* All paths lead to here, thus we are safe. -DaveM */
1300 write_unlock_irq(&tasklist_lock);
1301 return err;
1302 }
1303
1304 asmlinkage long sys_getpgid(pid_t pid)
1305 {
1306 if (!pid)
1307 return process_group(current);
1308 else {
1309 int retval;
1310 struct task_struct *p;
1311
1312 read_lock(&tasklist_lock);
1313 p = find_task_by_pid(pid);
1314
1315 retval = -ESRCH;
1316 if (p) {
1317 retval = security_task_getpgid(p);
1318 if (!retval)
1319 retval = process_group(p);
1320 }
1321 read_unlock(&tasklist_lock);
1322 return retval;
1323 }
1324 }
1325
1326 #ifdef __ARCH_WANT_SYS_GETPGRP
1327
1328 asmlinkage long sys_getpgrp(void)
1329 {
1330 /* SMP - assuming writes are word atomic this is fine */
1331 return process_group(current);
1332 }
1333
1334 #endif
1335
1336 asmlinkage long sys_getsid(pid_t pid)
1337 {
1338 if (!pid)
1339 return current->signal->session;
1340 else {
1341 int retval;
1342 struct task_struct *p;
1343
1344 read_lock(&tasklist_lock);
1345 p = find_task_by_pid(pid);
1346
1347 retval = -ESRCH;
1348 if (p) {
1349 retval = security_task_getsid(p);
1350 if (!retval)
1351 retval = p->signal->session;
1352 }
1353 read_unlock(&tasklist_lock);
1354 return retval;
1355 }
1356 }
1357
1358 asmlinkage long sys_setsid(void)
1359 {
1360 struct task_struct *group_leader = current->group_leader;
1361 pid_t session;
1362 int err = -EPERM;
1363
1364 mutex_lock(&tty_mutex);
1365 write_lock_irq(&tasklist_lock);
1366
1367 /* Fail if I am already a session leader */
1368 if (group_leader->signal->leader)
1369 goto out;
1370
1371 session = group_leader->pid;
1372 /* Fail if a process group id already exists that equals the
1373 * proposed session id.
1374 *
1375 * Don't check if session id == 1 because kernel threads use this
1376 * session id and so the check will always fail and make it so
1377 * init cannot successfully call setsid.
1378 */
1379 if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1380 goto out;
1381
1382 group_leader->signal->leader = 1;
1383 __set_special_pids(session, session);
1384 group_leader->signal->tty = NULL;
1385 group_leader->signal->tty_old_pgrp = 0;
1386 err = process_group(group_leader);
1387 out:
1388 write_unlock_irq(&tasklist_lock);
1389 mutex_unlock(&tty_mutex);
1390 return err;
1391 }
1392
1393 /*
1394 * Supplementary group IDs
1395 */
1396
1397 /* init to 2 - one for init_task, one to ensure it is never freed */
1398 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1399
1400 struct group_info *groups_alloc(int gidsetsize)
1401 {
1402 struct group_info *group_info;
1403 int nblocks;
1404 int i;
1405
1406 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1407 /* Make sure we always allocate at least one indirect block pointer */
1408 nblocks = nblocks ? : 1;
1409 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1410 if (!group_info)
1411 return NULL;
1412 group_info->ngroups = gidsetsize;
1413 group_info->nblocks = nblocks;
1414 atomic_set(&group_info->usage, 1);
1415
1416 if (gidsetsize <= NGROUPS_SMALL)
1417 group_info->blocks[0] = group_info->small_block;
1418 else {
1419 for (i = 0; i < nblocks; i++) {
1420 gid_t *b;
1421 b = (void *)__get_free_page(GFP_USER);
1422 if (!b)
1423 goto out_undo_partial_alloc;
1424 group_info->blocks[i] = b;
1425 }
1426 }
1427 return group_info;
1428
1429 out_undo_partial_alloc:
1430 while (--i >= 0) {
1431 free_page((unsigned long)group_info->blocks[i]);
1432 }
1433 kfree(group_info);
1434 return NULL;
1435 }
1436
1437 EXPORT_SYMBOL(groups_alloc);
1438
1439 void groups_free(struct group_info *group_info)
1440 {
1441 if (group_info->blocks[0] != group_info->small_block) {
1442 int i;
1443 for (i = 0; i < group_info->nblocks; i++)
1444 free_page((unsigned long)group_info->blocks[i]);
1445 }
1446 kfree(group_info);
1447 }
1448
1449 EXPORT_SYMBOL(groups_free);
1450
1451 /* export the group_info to a user-space array */
1452 static int groups_to_user(gid_t __user *grouplist,
1453 struct group_info *group_info)
1454 {
1455 int i;
1456 int count = group_info->ngroups;
1457
1458 for (i = 0; i < group_info->nblocks; i++) {
1459 int cp_count = min(NGROUPS_PER_BLOCK, count);
1460 int off = i * NGROUPS_PER_BLOCK;
1461 int len = cp_count * sizeof(*grouplist);
1462
1463 if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1464 return -EFAULT;
1465
1466 count -= cp_count;
1467 }
1468 return 0;
1469 }
1470
1471 /* fill a group_info from a user-space array - it must be allocated already */
1472 static int groups_from_user(struct group_info *group_info,
1473 gid_t __user *grouplist)
1474 {
1475 int i;
1476 int count = group_info->ngroups;
1477
1478 for (i = 0; i < group_info->nblocks; i++) {
1479 int cp_count = min(NGROUPS_PER_BLOCK, count);
1480 int off = i * NGROUPS_PER_BLOCK;
1481 int len = cp_count * sizeof(*grouplist);
1482
1483 if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1484 return -EFAULT;
1485
1486 count -= cp_count;
1487 }
1488 return 0;
1489 }
1490
1491 /* a simple Shell sort */
1492 static void groups_sort(struct group_info *group_info)
1493 {
1494 int base, max, stride;
1495 int gidsetsize = group_info->ngroups;
1496
1497 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1498 ; /* nothing */
1499 stride /= 3;
1500
1501 while (stride) {
1502 max = gidsetsize - stride;
1503 for (base = 0; base < max; base++) {
1504 int left = base;
1505 int right = left + stride;
1506 gid_t tmp = GROUP_AT(group_info, right);
1507
1508 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1509 GROUP_AT(group_info, right) =
1510 GROUP_AT(group_info, left);
1511 right = left;
1512 left -= stride;
1513 }
1514 GROUP_AT(group_info, right) = tmp;
1515 }
1516 stride /= 3;
1517 }
1518 }
1519
1520 /* a simple bsearch */
1521 int groups_search(struct group_info *group_info, gid_t grp)
1522 {
1523 unsigned int left, right;
1524
1525 if (!group_info)
1526 return 0;
1527
1528 left = 0;
1529 right = group_info->ngroups;
1530 while (left < right) {
1531 unsigned int mid = (left+right)/2;
1532 int cmp = grp - GROUP_AT(group_info, mid);
1533 if (cmp > 0)
1534 left = mid + 1;
1535 else if (cmp < 0)
1536 right = mid;
1537 else
1538 return 1;
1539 }
1540 return 0;
1541 }
1542
1543 /* validate and set current->group_info */
1544 int set_current_groups(struct group_info *group_info)
1545 {
1546 int retval;
1547 struct group_info *old_info;
1548
1549 retval = security_task_setgroups(group_info);
1550 if (retval)
1551 return retval;
1552
1553 groups_sort(group_info);
1554 get_group_info(group_info);
1555
1556 task_lock(current);
1557 old_info = current->group_info;
1558 current->group_info = group_info;
1559 task_unlock(current);
1560
1561 put_group_info(old_info);
1562
1563 return 0;
1564 }
1565
1566 EXPORT_SYMBOL(set_current_groups);
1567
1568 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1569 {
1570 int i = 0;
1571
1572 /*
1573 * SMP: Nobody else can change our grouplist. Thus we are
1574 * safe.
1575 */
1576
1577 if (gidsetsize < 0)
1578 return -EINVAL;
1579
1580 /* no need to grab task_lock here; it cannot change */
1581 i = current->group_info->ngroups;
1582 if (gidsetsize) {
1583 if (i > gidsetsize) {
1584 i = -EINVAL;
1585 goto out;
1586 }
1587 if (groups_to_user(grouplist, current->group_info)) {
1588 i = -EFAULT;
1589 goto out;
1590 }
1591 }
1592 out:
1593 return i;
1594 }
1595
1596 /*
1597 * SMP: Our groups are copy-on-write. We can set them safely
1598 * without another task interfering.
1599 */
1600
1601 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1602 {
1603 struct group_info *group_info;
1604 int retval;
1605
1606 if (!capable(CAP_SETGID))
1607 return -EPERM;
1608 if ((unsigned)gidsetsize > NGROUPS_MAX)
1609 return -EINVAL;
1610
1611 group_info = groups_alloc(gidsetsize);
1612 if (!group_info)
1613 return -ENOMEM;
1614 retval = groups_from_user(group_info, grouplist);
1615 if (retval) {
1616 put_group_info(group_info);
1617 return retval;
1618 }
1619
1620 retval = set_current_groups(group_info);
1621 put_group_info(group_info);
1622
1623 return retval;
1624 }
1625
1626 /*
1627 * Check whether we're fsgid/egid or in the supplemental group..
1628 */
1629 int in_group_p(gid_t grp)
1630 {
1631 int retval = 1;
1632 if (grp != current->fsgid)
1633 retval = groups_search(current->group_info, grp);
1634 return retval;
1635 }
1636
1637 EXPORT_SYMBOL(in_group_p);
1638
1639 int in_egroup_p(gid_t grp)
1640 {
1641 int retval = 1;
1642 if (grp != current->egid)
1643 retval = groups_search(current->group_info, grp);
1644 return retval;
1645 }
1646
1647 EXPORT_SYMBOL(in_egroup_p);
1648
1649 DECLARE_RWSEM(uts_sem);
1650
1651 EXPORT_SYMBOL(uts_sem);
1652
1653 asmlinkage long sys_newuname(struct new_utsname __user * name)
1654 {
1655 int errno = 0;
1656
1657 down_read(&uts_sem);
1658 if (copy_to_user(name,&system_utsname,sizeof *name))
1659 errno = -EFAULT;
1660 up_read(&uts_sem);
1661 return errno;
1662 }
1663
1664 asmlinkage long sys_sethostname(char __user *name, int len)
1665 {
1666 int errno;
1667 char tmp[__NEW_UTS_LEN];
1668
1669 if (!capable(CAP_SYS_ADMIN))
1670 return -EPERM;
1671 if (len < 0 || len > __NEW_UTS_LEN)
1672 return -EINVAL;
1673 down_write(&uts_sem);
1674 errno = -EFAULT;
1675 if (!copy_from_user(tmp, name, len)) {
1676 memcpy(system_utsname.nodename, tmp, len);
1677 system_utsname.nodename[len] = 0;
1678 errno = 0;
1679 }
1680 up_write(&uts_sem);
1681 return errno;
1682 }
1683
1684 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1685
1686 asmlinkage long sys_gethostname(char __user *name, int len)
1687 {
1688 int i, errno;
1689
1690 if (len < 0)
1691 return -EINVAL;
1692 down_read(&uts_sem);
1693 i = 1 + strlen(system_utsname.nodename);
1694 if (i > len)
1695 i = len;
1696 errno = 0;
1697 if (copy_to_user(name, system_utsname.nodename, i))
1698 errno = -EFAULT;
1699 up_read(&uts_sem);
1700 return errno;
1701 }
1702
1703 #endif
1704
1705 /*
1706 * Only setdomainname; getdomainname can be implemented by calling
1707 * uname()
1708 */
1709 asmlinkage long sys_setdomainname(char __user *name, int len)
1710 {
1711 int errno;
1712 char tmp[__NEW_UTS_LEN];
1713
1714 if (!capable(CAP_SYS_ADMIN))
1715 return -EPERM;
1716 if (len < 0 || len > __NEW_UTS_LEN)
1717 return -EINVAL;
1718
1719 down_write(&uts_sem);
1720 errno = -EFAULT;
1721 if (!copy_from_user(tmp, name, len)) {
1722 memcpy(system_utsname.domainname, tmp, len);
1723 system_utsname.domainname[len] = 0;
1724 errno = 0;
1725 }
1726 up_write(&uts_sem);
1727 return errno;
1728 }
1729
1730 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1731 {
1732 if (resource >= RLIM_NLIMITS)
1733 return -EINVAL;
1734 else {
1735 struct rlimit value;
1736 task_lock(current->group_leader);
1737 value = current->signal->rlim[resource];
1738 task_unlock(current->group_leader);
1739 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1740 }
1741 }
1742
1743 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1744
1745 /*
1746 * Back compatibility for getrlimit. Needed for some apps.
1747 */
1748
1749 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1750 {
1751 struct rlimit x;
1752 if (resource >= RLIM_NLIMITS)
1753 return -EINVAL;
1754
1755 task_lock(current->group_leader);
1756 x = current->signal->rlim[resource];
1757 task_unlock(current->group_leader);
1758 if (x.rlim_cur > 0x7FFFFFFF)
1759 x.rlim_cur = 0x7FFFFFFF;
1760 if (x.rlim_max > 0x7FFFFFFF)
1761 x.rlim_max = 0x7FFFFFFF;
1762 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1763 }
1764
1765 #endif
1766
1767 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1768 {
1769 struct rlimit new_rlim, *old_rlim;
1770 unsigned long it_prof_secs;
1771 int retval;
1772
1773 if (resource >= RLIM_NLIMITS)
1774 return -EINVAL;
1775 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1776 return -EFAULT;
1777 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1778 return -EINVAL;
1779 old_rlim = current->signal->rlim + resource;
1780 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1781 !capable(CAP_SYS_RESOURCE))
1782 return -EPERM;
1783 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1784 return -EPERM;
1785
1786 retval = security_task_setrlimit(resource, &new_rlim);
1787 if (retval)
1788 return retval;
1789
1790 task_lock(current->group_leader);
1791 *old_rlim = new_rlim;
1792 task_unlock(current->group_leader);
1793
1794 if (resource != RLIMIT_CPU)
1795 goto out;
1796
1797 /*
1798 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1799 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1800 * very long-standing error, and fixing it now risks breakage of
1801 * applications, so we live with it
1802 */
1803 if (new_rlim.rlim_cur == RLIM_INFINITY)
1804 goto out;
1805
1806 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
1807 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1808 unsigned long rlim_cur = new_rlim.rlim_cur;
1809 cputime_t cputime;
1810
1811 if (rlim_cur == 0) {
1812 /*
1813 * The caller is asking for an immediate RLIMIT_CPU
1814 * expiry. But we use the zero value to mean "it was
1815 * never set". So let's cheat and make it one second
1816 * instead
1817 */
1818 rlim_cur = 1;
1819 }
1820 cputime = secs_to_cputime(rlim_cur);
1821 read_lock(&tasklist_lock);
1822 spin_lock_irq(&current->sighand->siglock);
1823 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
1824 spin_unlock_irq(&current->sighand->siglock);
1825 read_unlock(&tasklist_lock);
1826 }
1827 out:
1828 return 0;
1829 }
1830
1831 /*
1832 * It would make sense to put struct rusage in the task_struct,
1833 * except that would make the task_struct be *really big*. After
1834 * task_struct gets moved into malloc'ed memory, it would
1835 * make sense to do this. It will make moving the rest of the information
1836 * a lot simpler! (Which we're not doing right now because we're not
1837 * measuring them yet).
1838 *
1839 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1840 * races with threads incrementing their own counters. But since word
1841 * reads are atomic, we either get new values or old values and we don't
1842 * care which for the sums. We always take the siglock to protect reading
1843 * the c* fields from p->signal from races with exit.c updating those
1844 * fields when reaping, so a sample either gets all the additions of a
1845 * given child after it's reaped, or none so this sample is before reaping.
1846 *
1847 * Locking:
1848 * We need to take the siglock for CHILDEREN, SELF and BOTH
1849 * for the cases current multithreaded, non-current single threaded
1850 * non-current multithreaded. Thread traversal is now safe with
1851 * the siglock held.
1852 * Strictly speaking, we donot need to take the siglock if we are current and
1853 * single threaded, as no one else can take our signal_struct away, no one
1854 * else can reap the children to update signal->c* counters, and no one else
1855 * can race with the signal-> fields. If we do not take any lock, the
1856 * signal-> fields could be read out of order while another thread was just
1857 * exiting. So we should place a read memory barrier when we avoid the lock.
1858 * On the writer side, write memory barrier is implied in __exit_signal
1859 * as __exit_signal releases the siglock spinlock after updating the signal->
1860 * fields. But we don't do this yet to keep things simple.
1861 *
1862 */
1863
1864 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1865 {
1866 struct task_struct *t;
1867 unsigned long flags;
1868 cputime_t utime, stime;
1869
1870 memset((char *) r, 0, sizeof *r);
1871 utime = stime = cputime_zero;
1872
1873 rcu_read_lock();
1874 if (!lock_task_sighand(p, &flags)) {
1875 rcu_read_unlock();
1876 return;
1877 }
1878
1879 switch (who) {
1880 case RUSAGE_BOTH:
1881 case RUSAGE_CHILDREN:
1882 utime = p->signal->cutime;
1883 stime = p->signal->cstime;
1884 r->ru_nvcsw = p->signal->cnvcsw;
1885 r->ru_nivcsw = p->signal->cnivcsw;
1886 r->ru_minflt = p->signal->cmin_flt;
1887 r->ru_majflt = p->signal->cmaj_flt;
1888
1889 if (who == RUSAGE_CHILDREN)
1890 break;
1891
1892 case RUSAGE_SELF:
1893 utime = cputime_add(utime, p->signal->utime);
1894 stime = cputime_add(stime, p->signal->stime);
1895 r->ru_nvcsw += p->signal->nvcsw;
1896 r->ru_nivcsw += p->signal->nivcsw;
1897 r->ru_minflt += p->signal->min_flt;
1898 r->ru_majflt += p->signal->maj_flt;
1899 t = p;
1900 do {
1901 utime = cputime_add(utime, t->utime);
1902 stime = cputime_add(stime, t->stime);
1903 r->ru_nvcsw += t->nvcsw;
1904 r->ru_nivcsw += t->nivcsw;
1905 r->ru_minflt += t->min_flt;
1906 r->ru_majflt += t->maj_flt;
1907 t = next_thread(t);
1908 } while (t != p);
1909 break;
1910
1911 default:
1912 BUG();
1913 }
1914
1915 unlock_task_sighand(p, &flags);
1916 rcu_read_unlock();
1917
1918 cputime_to_timeval(utime, &r->ru_utime);
1919 cputime_to_timeval(stime, &r->ru_stime);
1920 }
1921
1922 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1923 {
1924 struct rusage r;
1925 k_getrusage(p, who, &r);
1926 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1927 }
1928
1929 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1930 {
1931 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1932 return -EINVAL;
1933 return getrusage(current, who, ru);
1934 }
1935
1936 asmlinkage long sys_umask(int mask)
1937 {
1938 mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1939 return mask;
1940 }
1941
1942 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1943 unsigned long arg4, unsigned long arg5)
1944 {
1945 long error;
1946
1947 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1948 if (error)
1949 return error;
1950
1951 switch (option) {
1952 case PR_SET_PDEATHSIG:
1953 if (!valid_signal(arg2)) {
1954 error = -EINVAL;
1955 break;
1956 }
1957 current->pdeath_signal = arg2;
1958 break;
1959 case PR_GET_PDEATHSIG:
1960 error = put_user(current->pdeath_signal, (int __user *)arg2);
1961 break;
1962 case PR_GET_DUMPABLE:
1963 error = current->mm->dumpable;
1964 break;
1965 case PR_SET_DUMPABLE:
1966 if (arg2 < 0 || arg2 > 1) {
1967 error = -EINVAL;
1968 break;
1969 }
1970 current->mm->dumpable = arg2;
1971 break;
1972
1973 case PR_SET_UNALIGN:
1974 error = SET_UNALIGN_CTL(current, arg2);
1975 break;
1976 case PR_GET_UNALIGN:
1977 error = GET_UNALIGN_CTL(current, arg2);
1978 break;
1979 case PR_SET_FPEMU:
1980 error = SET_FPEMU_CTL(current, arg2);
1981 break;
1982 case PR_GET_FPEMU:
1983 error = GET_FPEMU_CTL(current, arg2);
1984 break;
1985 case PR_SET_FPEXC:
1986 error = SET_FPEXC_CTL(current, arg2);
1987 break;
1988 case PR_GET_FPEXC:
1989 error = GET_FPEXC_CTL(current, arg2);
1990 break;
1991 case PR_GET_TIMING:
1992 error = PR_TIMING_STATISTICAL;
1993 break;
1994 case PR_SET_TIMING:
1995 if (arg2 == PR_TIMING_STATISTICAL)
1996 error = 0;
1997 else
1998 error = -EINVAL;
1999 break;
2000
2001 case PR_GET_KEEPCAPS:
2002 if (current->keep_capabilities)
2003 error = 1;
2004 break;
2005 case PR_SET_KEEPCAPS:
2006 if (arg2 != 0 && arg2 != 1) {
2007 error = -EINVAL;
2008 break;
2009 }
2010 current->keep_capabilities = arg2;
2011 break;
2012 case PR_SET_NAME: {
2013 struct task_struct *me = current;
2014 unsigned char ncomm[sizeof(me->comm)];
2015
2016 ncomm[sizeof(me->comm)-1] = 0;
2017 if (strncpy_from_user(ncomm, (char __user *)arg2,
2018 sizeof(me->comm)-1) < 0)
2019 return -EFAULT;
2020 set_task_comm(me, ncomm);
2021 return 0;
2022 }
2023 case PR_GET_NAME: {
2024 struct task_struct *me = current;
2025 unsigned char tcomm[sizeof(me->comm)];
2026
2027 get_task_comm(tcomm, me);
2028 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
2029 return -EFAULT;
2030 return 0;
2031 }
2032 case PR_GET_ENDIAN:
2033 error = GET_ENDIAN(current, arg2);
2034 break;
2035 case PR_SET_ENDIAN:
2036 error = SET_ENDIAN(current, arg2);
2037 break;
2038
2039 default:
2040 error = -EINVAL;
2041 break;
2042 }
2043 return error;
2044 }
2045
2046 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
2047 struct getcpu_cache __user *cache)
2048 {
2049 int err = 0;
2050 int cpu = raw_smp_processor_id();
2051 if (cpup)
2052 err |= put_user(cpu, cpup);
2053 if (nodep)
2054 err |= put_user(cpu_to_node(cpu), nodep);
2055 if (cache) {
2056 /*
2057 * The cache is not needed for this implementation,
2058 * but make sure user programs pass something
2059 * valid. vsyscall implementations can instead make
2060 * good use of the cache. Only use t0 and t1 because
2061 * these are available in both 32bit and 64bit ABI (no
2062 * need for a compat_getcpu). 32bit has enough
2063 * padding
2064 */
2065 unsigned long t0, t1;
2066 get_user(t0, &cache->blob[0]);
2067 get_user(t1, &cache->blob[1]);
2068 t0++;
2069 t1++;
2070 put_user(t0, &cache->blob[0]);
2071 put_user(t1, &cache->blob[1]);
2072 }
2073 return err ? -EFAULT : 0;
2074 }
This page took 0.072952 seconds and 5 git commands to generate.