4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/export.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/reboot.h>
12 #include <linux/prctl.h>
13 #include <linux/highuid.h>
15 #include <linux/kmod.h>
16 #include <linux/perf_event.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/gfp.h>
40 #include <linux/syscore_ops.h>
41 #include <linux/version.h>
42 #include <linux/ctype.h>
44 #include <linux/compat.h>
45 #include <linux/syscalls.h>
46 #include <linux/kprobes.h>
47 #include <linux/user_namespace.h>
49 #include <linux/kmsg_dump.h>
50 /* Move somewhere else to avoid recompiling? */
51 #include <generated/utsrelease.h>
53 #include <asm/uaccess.h>
55 #include <asm/unistd.h>
57 #ifndef SET_UNALIGN_CTL
58 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
60 #ifndef GET_UNALIGN_CTL
61 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
64 # define SET_FPEMU_CTL(a,b) (-EINVAL)
67 # define GET_FPEMU_CTL(a,b) (-EINVAL)
70 # define SET_FPEXC_CTL(a,b) (-EINVAL)
73 # define GET_FPEXC_CTL(a,b) (-EINVAL)
76 # define GET_ENDIAN(a,b) (-EINVAL)
79 # define SET_ENDIAN(a,b) (-EINVAL)
82 # define GET_TSC_CTL(a) (-EINVAL)
85 # define SET_TSC_CTL(a) (-EINVAL)
89 * this is where the system-wide overflow UID and GID are defined, for
90 * architectures that now have 32-bit UID/GID but didn't in the past
93 int overflowuid = DEFAULT_OVERFLOWUID;
94 int overflowgid = DEFAULT_OVERFLOWGID;
97 EXPORT_SYMBOL(overflowuid);
98 EXPORT_SYMBOL(overflowgid);
102 * the same as above, but for filesystems which can only store a 16-bit
103 * UID and GID. as such, this is needed on all architectures
106 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
107 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
109 EXPORT_SYMBOL(fs_overflowuid);
110 EXPORT_SYMBOL(fs_overflowgid);
113 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
118 EXPORT_SYMBOL(cad_pid);
121 * If set, this is used for preparing the system to power off.
124 void (*pm_power_off_prepare)(void);
127 * Returns true if current's euid is same as p's uid or euid,
128 * or has CAP_SYS_NICE to p's user_ns.
130 * Called with rcu_read_lock, creds are safe
132 static bool set_one_prio_perm(struct task_struct *p)
134 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
136 if (pcred->user->user_ns == cred->user->user_ns &&
137 (pcred->uid == cred->euid ||
138 pcred->euid == cred->euid))
140 if (ns_capable(pcred->user->user_ns, CAP_SYS_NICE))
146 * set the priority of a task
147 * - the caller must hold the RCU read lock
149 static int set_one_prio(struct task_struct *p, int niceval, int error)
153 if (!set_one_prio_perm(p)) {
157 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
161 no_nice = security_task_setnice(p, niceval);
168 set_user_nice(p, niceval);
173 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
175 struct task_struct *g, *p;
176 struct user_struct *user;
177 const struct cred *cred = current_cred();
181 if (which > PRIO_USER || which < PRIO_PROCESS)
184 /* normalize: avoid signed division (rounding problems) */
192 read_lock(&tasklist_lock);
196 p = find_task_by_vpid(who);
200 error = set_one_prio(p, niceval, error);
204 pgrp = find_vpid(who);
206 pgrp = task_pgrp(current);
207 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
208 error = set_one_prio(p, niceval, error);
209 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
212 user = (struct user_struct *) cred->user;
215 else if ((who != cred->uid) &&
216 !(user = find_user(who)))
217 goto out_unlock; /* No processes for this user */
219 do_each_thread(g, p) {
220 if (__task_cred(p)->uid == who)
221 error = set_one_prio(p, niceval, error);
222 } while_each_thread(g, p);
223 if (who != cred->uid)
224 free_uid(user); /* For find_user() */
228 read_unlock(&tasklist_lock);
235 * Ugh. To avoid negative return values, "getpriority()" will
236 * not return the normal nice-value, but a negated value that
237 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
238 * to stay compatible.
240 SYSCALL_DEFINE2(getpriority, int, which, int, who)
242 struct task_struct *g, *p;
243 struct user_struct *user;
244 const struct cred *cred = current_cred();
245 long niceval, retval = -ESRCH;
248 if (which > PRIO_USER || which < PRIO_PROCESS)
252 read_lock(&tasklist_lock);
256 p = find_task_by_vpid(who);
260 niceval = 20 - task_nice(p);
261 if (niceval > retval)
267 pgrp = find_vpid(who);
269 pgrp = task_pgrp(current);
270 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
271 niceval = 20 - task_nice(p);
272 if (niceval > retval)
274 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
277 user = (struct user_struct *) cred->user;
280 else if ((who != cred->uid) &&
281 !(user = find_user(who)))
282 goto out_unlock; /* No processes for this user */
284 do_each_thread(g, p) {
285 if (__task_cred(p)->uid == who) {
286 niceval = 20 - task_nice(p);
287 if (niceval > retval)
290 } while_each_thread(g, p);
291 if (who != cred->uid)
292 free_uid(user); /* for find_user() */
296 read_unlock(&tasklist_lock);
303 * emergency_restart - reboot the system
305 * Without shutting down any hardware or taking any locks
306 * reboot the system. This is called when we know we are in
307 * trouble so this is our best effort to reboot. This is
308 * safe to call in interrupt context.
310 void emergency_restart(void)
312 kmsg_dump(KMSG_DUMP_EMERG);
313 machine_emergency_restart();
315 EXPORT_SYMBOL_GPL(emergency_restart);
317 void kernel_restart_prepare(char *cmd)
319 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
320 system_state = SYSTEM_RESTART;
321 usermodehelper_disable();
326 * register_reboot_notifier - Register function to be called at reboot time
327 * @nb: Info about notifier function to be called
329 * Registers a function with the list of functions
330 * to be called at reboot time.
332 * Currently always returns zero, as blocking_notifier_chain_register()
333 * always returns zero.
335 int register_reboot_notifier(struct notifier_block *nb)
337 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
339 EXPORT_SYMBOL(register_reboot_notifier);
342 * unregister_reboot_notifier - Unregister previously registered reboot notifier
343 * @nb: Hook to be unregistered
345 * Unregisters a previously registered reboot
348 * Returns zero on success, or %-ENOENT on failure.
350 int unregister_reboot_notifier(struct notifier_block *nb)
352 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
354 EXPORT_SYMBOL(unregister_reboot_notifier);
357 * kernel_restart - reboot the system
358 * @cmd: pointer to buffer containing command to execute for restart
361 * Shutdown everything and perform a clean reboot.
362 * This is not safe to call in interrupt context.
364 void kernel_restart(char *cmd)
366 kernel_restart_prepare(cmd);
367 disable_nonboot_cpus();
370 printk(KERN_EMERG "Restarting system.\n");
372 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
373 kmsg_dump(KMSG_DUMP_RESTART);
374 machine_restart(cmd);
376 EXPORT_SYMBOL_GPL(kernel_restart);
378 static void kernel_shutdown_prepare(enum system_states state)
380 blocking_notifier_call_chain(&reboot_notifier_list,
381 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
382 system_state = state;
383 usermodehelper_disable();
387 * kernel_halt - halt the system
389 * Shutdown everything and perform a clean system halt.
391 void kernel_halt(void)
393 kernel_shutdown_prepare(SYSTEM_HALT);
394 disable_nonboot_cpus();
396 printk(KERN_EMERG "System halted.\n");
397 kmsg_dump(KMSG_DUMP_HALT);
401 EXPORT_SYMBOL_GPL(kernel_halt);
404 * kernel_power_off - power_off the system
406 * Shutdown everything and perform a clean system power_off.
408 void kernel_power_off(void)
410 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
411 if (pm_power_off_prepare)
412 pm_power_off_prepare();
413 disable_nonboot_cpus();
415 printk(KERN_EMERG "Power down.\n");
416 kmsg_dump(KMSG_DUMP_POWEROFF);
419 EXPORT_SYMBOL_GPL(kernel_power_off);
421 static DEFINE_MUTEX(reboot_mutex);
424 * Reboot system call: for obvious reasons only root may call it,
425 * and even root needs to set up some magic numbers in the registers
426 * so that some mistake won't make this reboot the whole machine.
427 * You can also set the meaning of the ctrl-alt-del-key here.
429 * reboot doesn't sync: do that yourself before calling this.
431 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
437 /* We only trust the superuser with rebooting the system. */
438 if (!capable(CAP_SYS_BOOT))
441 /* For safety, we require "magic" arguments. */
442 if (magic1 != LINUX_REBOOT_MAGIC1 ||
443 (magic2 != LINUX_REBOOT_MAGIC2 &&
444 magic2 != LINUX_REBOOT_MAGIC2A &&
445 magic2 != LINUX_REBOOT_MAGIC2B &&
446 magic2 != LINUX_REBOOT_MAGIC2C))
449 /* Instead of trying to make the power_off code look like
450 * halt when pm_power_off is not set do it the easy way.
452 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
453 cmd = LINUX_REBOOT_CMD_HALT;
455 mutex_lock(&reboot_mutex);
457 case LINUX_REBOOT_CMD_RESTART:
458 kernel_restart(NULL);
461 case LINUX_REBOOT_CMD_CAD_ON:
465 case LINUX_REBOOT_CMD_CAD_OFF:
469 case LINUX_REBOOT_CMD_HALT:
472 panic("cannot halt");
474 case LINUX_REBOOT_CMD_POWER_OFF:
479 case LINUX_REBOOT_CMD_RESTART2:
480 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
484 buffer[sizeof(buffer) - 1] = '\0';
486 kernel_restart(buffer);
490 case LINUX_REBOOT_CMD_KEXEC:
491 ret = kernel_kexec();
495 #ifdef CONFIG_HIBERNATION
496 case LINUX_REBOOT_CMD_SW_SUSPEND:
505 mutex_unlock(&reboot_mutex);
509 static void deferred_cad(struct work_struct *dummy)
511 kernel_restart(NULL);
515 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
516 * As it's called within an interrupt, it may NOT sync: the only choice
517 * is whether to reboot at once, or just ignore the ctrl-alt-del.
519 void ctrl_alt_del(void)
521 static DECLARE_WORK(cad_work, deferred_cad);
524 schedule_work(&cad_work);
526 kill_cad_pid(SIGINT, 1);
530 * Unprivileged users may change the real gid to the effective gid
531 * or vice versa. (BSD-style)
533 * If you set the real gid at all, or set the effective gid to a value not
534 * equal to the real gid, then the saved gid is set to the new effective gid.
536 * This makes it possible for a setgid program to completely drop its
537 * privileges, which is often a useful assertion to make when you are doing
538 * a security audit over a program.
540 * The general idea is that a program which uses just setregid() will be
541 * 100% compatible with BSD. A program which uses just setgid() will be
542 * 100% compatible with POSIX with saved IDs.
544 * SMP: There are not races, the GIDs are checked only by filesystem
545 * operations (as far as semantic preservation is concerned).
547 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
549 const struct cred *old;
553 new = prepare_creds();
556 old = current_cred();
559 if (rgid != (gid_t) -1) {
560 if (old->gid == rgid ||
562 nsown_capable(CAP_SETGID))
567 if (egid != (gid_t) -1) {
568 if (old->gid == egid ||
571 nsown_capable(CAP_SETGID))
577 if (rgid != (gid_t) -1 ||
578 (egid != (gid_t) -1 && egid != old->gid))
579 new->sgid = new->egid;
580 new->fsgid = new->egid;
582 return commit_creds(new);
590 * setgid() is implemented like SysV w/ SAVED_IDS
592 * SMP: Same implicit races as above.
594 SYSCALL_DEFINE1(setgid, gid_t, gid)
596 const struct cred *old;
600 new = prepare_creds();
603 old = current_cred();
606 if (nsown_capable(CAP_SETGID))
607 new->gid = new->egid = new->sgid = new->fsgid = gid;
608 else if (gid == old->gid || gid == old->sgid)
609 new->egid = new->fsgid = gid;
613 return commit_creds(new);
621 * change the user struct in a credentials set to match the new UID
623 static int set_user(struct cred *new)
625 struct user_struct *new_user;
627 new_user = alloc_uid(current_user_ns(), new->uid);
632 * We don't fail in case of NPROC limit excess here because too many
633 * poorly written programs don't check set*uid() return code, assuming
634 * it never fails if called by root. We may still enforce NPROC limit
635 * for programs doing set*uid()+execve() by harmlessly deferring the
636 * failure to the execve() stage.
638 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
639 new_user != INIT_USER)
640 current->flags |= PF_NPROC_EXCEEDED;
642 current->flags &= ~PF_NPROC_EXCEEDED;
645 new->user = new_user;
650 * Unprivileged users may change the real uid to the effective uid
651 * or vice versa. (BSD-style)
653 * If you set the real uid at all, or set the effective uid to a value not
654 * equal to the real uid, then the saved uid is set to the new effective uid.
656 * This makes it possible for a setuid program to completely drop its
657 * privileges, which is often a useful assertion to make when you are doing
658 * a security audit over a program.
660 * The general idea is that a program which uses just setreuid() will be
661 * 100% compatible with BSD. A program which uses just setuid() will be
662 * 100% compatible with POSIX with saved IDs.
664 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
666 const struct cred *old;
670 new = prepare_creds();
673 old = current_cred();
676 if (ruid != (uid_t) -1) {
678 if (old->uid != ruid &&
680 !nsown_capable(CAP_SETUID))
684 if (euid != (uid_t) -1) {
686 if (old->uid != euid &&
689 !nsown_capable(CAP_SETUID))
693 if (new->uid != old->uid) {
694 retval = set_user(new);
698 if (ruid != (uid_t) -1 ||
699 (euid != (uid_t) -1 && euid != old->uid))
700 new->suid = new->euid;
701 new->fsuid = new->euid;
703 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
707 return commit_creds(new);
715 * setuid() is implemented like SysV with SAVED_IDS
717 * Note that SAVED_ID's is deficient in that a setuid root program
718 * like sendmail, for example, cannot set its uid to be a normal
719 * user and then switch back, because if you're root, setuid() sets
720 * the saved uid too. If you don't like this, blame the bright people
721 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
722 * will allow a root program to temporarily drop privileges and be able to
723 * regain them by swapping the real and effective uid.
725 SYSCALL_DEFINE1(setuid, uid_t, uid)
727 const struct cred *old;
731 new = prepare_creds();
734 old = current_cred();
737 if (nsown_capable(CAP_SETUID)) {
738 new->suid = new->uid = uid;
739 if (uid != old->uid) {
740 retval = set_user(new);
744 } else if (uid != old->uid && uid != new->suid) {
748 new->fsuid = new->euid = uid;
750 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
754 return commit_creds(new);
763 * This function implements a generic ability to update ruid, euid,
764 * and suid. This allows you to implement the 4.4 compatible seteuid().
766 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
768 const struct cred *old;
772 new = prepare_creds();
776 old = current_cred();
779 if (!nsown_capable(CAP_SETUID)) {
780 if (ruid != (uid_t) -1 && ruid != old->uid &&
781 ruid != old->euid && ruid != old->suid)
783 if (euid != (uid_t) -1 && euid != old->uid &&
784 euid != old->euid && euid != old->suid)
786 if (suid != (uid_t) -1 && suid != old->uid &&
787 suid != old->euid && suid != old->suid)
791 if (ruid != (uid_t) -1) {
793 if (ruid != old->uid) {
794 retval = set_user(new);
799 if (euid != (uid_t) -1)
801 if (suid != (uid_t) -1)
803 new->fsuid = new->euid;
805 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
809 return commit_creds(new);
816 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
818 const struct cred *cred = current_cred();
821 if (!(retval = put_user(cred->uid, ruid)) &&
822 !(retval = put_user(cred->euid, euid)))
823 retval = put_user(cred->suid, suid);
829 * Same as above, but for rgid, egid, sgid.
831 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
833 const struct cred *old;
837 new = prepare_creds();
840 old = current_cred();
843 if (!nsown_capable(CAP_SETGID)) {
844 if (rgid != (gid_t) -1 && rgid != old->gid &&
845 rgid != old->egid && rgid != old->sgid)
847 if (egid != (gid_t) -1 && egid != old->gid &&
848 egid != old->egid && egid != old->sgid)
850 if (sgid != (gid_t) -1 && sgid != old->gid &&
851 sgid != old->egid && sgid != old->sgid)
855 if (rgid != (gid_t) -1)
857 if (egid != (gid_t) -1)
859 if (sgid != (gid_t) -1)
861 new->fsgid = new->egid;
863 return commit_creds(new);
870 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
872 const struct cred *cred = current_cred();
875 if (!(retval = put_user(cred->gid, rgid)) &&
876 !(retval = put_user(cred->egid, egid)))
877 retval = put_user(cred->sgid, sgid);
884 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
885 * is used for "access()" and for the NFS daemon (letting nfsd stay at
886 * whatever uid it wants to). It normally shadows "euid", except when
887 * explicitly set by setfsuid() or for access..
889 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
891 const struct cred *old;
895 new = prepare_creds();
897 return current_fsuid();
898 old = current_cred();
899 old_fsuid = old->fsuid;
901 if (uid == old->uid || uid == old->euid ||
902 uid == old->suid || uid == old->fsuid ||
903 nsown_capable(CAP_SETUID)) {
904 if (uid != old_fsuid) {
906 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
920 * Samma på svenska..
922 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
924 const struct cred *old;
928 new = prepare_creds();
930 return current_fsgid();
931 old = current_cred();
932 old_fsgid = old->fsgid;
934 if (gid == old->gid || gid == old->egid ||
935 gid == old->sgid || gid == old->fsgid ||
936 nsown_capable(CAP_SETGID)) {
937 if (gid != old_fsgid) {
951 void do_sys_times(struct tms *tms)
953 cputime_t tgutime, tgstime, cutime, cstime;
955 spin_lock_irq(¤t->sighand->siglock);
956 thread_group_times(current, &tgutime, &tgstime);
957 cutime = current->signal->cutime;
958 cstime = current->signal->cstime;
959 spin_unlock_irq(¤t->sighand->siglock);
960 tms->tms_utime = cputime_to_clock_t(tgutime);
961 tms->tms_stime = cputime_to_clock_t(tgstime);
962 tms->tms_cutime = cputime_to_clock_t(cutime);
963 tms->tms_cstime = cputime_to_clock_t(cstime);
966 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
972 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
975 force_successful_syscall_return();
976 return (long) jiffies_64_to_clock_t(get_jiffies_64());
980 * This needs some heavy checking ...
981 * I just haven't the stomach for it. I also don't fully
982 * understand sessions/pgrp etc. Let somebody who does explain it.
984 * OK, I think I have the protection semantics right.... this is really
985 * only important on a multi-user system anyway, to make sure one user
986 * can't send a signal to a process owned by another. -TYT, 12/12/91
988 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
991 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
993 struct task_struct *p;
994 struct task_struct *group_leader = current->group_leader;
999 pid = task_pid_vnr(group_leader);
1006 /* From this point forward we keep holding onto the tasklist lock
1007 * so that our parent does not change from under us. -DaveM
1009 write_lock_irq(&tasklist_lock);
1012 p = find_task_by_vpid(pid);
1017 if (!thread_group_leader(p))
1020 if (same_thread_group(p->real_parent, group_leader)) {
1022 if (task_session(p) != task_session(group_leader))
1029 if (p != group_leader)
1034 if (p->signal->leader)
1039 struct task_struct *g;
1041 pgrp = find_vpid(pgid);
1042 g = pid_task(pgrp, PIDTYPE_PGID);
1043 if (!g || task_session(g) != task_session(group_leader))
1047 err = security_task_setpgid(p, pgid);
1051 if (task_pgrp(p) != pgrp)
1052 change_pid(p, PIDTYPE_PGID, pgrp);
1056 /* All paths lead to here, thus we are safe. -DaveM */
1057 write_unlock_irq(&tasklist_lock);
1062 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1064 struct task_struct *p;
1070 grp = task_pgrp(current);
1073 p = find_task_by_vpid(pid);
1080 retval = security_task_getpgid(p);
1084 retval = pid_vnr(grp);
1090 #ifdef __ARCH_WANT_SYS_GETPGRP
1092 SYSCALL_DEFINE0(getpgrp)
1094 return sys_getpgid(0);
1099 SYSCALL_DEFINE1(getsid, pid_t, pid)
1101 struct task_struct *p;
1107 sid = task_session(current);
1110 p = find_task_by_vpid(pid);
1113 sid = task_session(p);
1117 retval = security_task_getsid(p);
1121 retval = pid_vnr(sid);
1127 SYSCALL_DEFINE0(setsid)
1129 struct task_struct *group_leader = current->group_leader;
1130 struct pid *sid = task_pid(group_leader);
1131 pid_t session = pid_vnr(sid);
1134 write_lock_irq(&tasklist_lock);
1135 /* Fail if I am already a session leader */
1136 if (group_leader->signal->leader)
1139 /* Fail if a process group id already exists that equals the
1140 * proposed session id.
1142 if (pid_task(sid, PIDTYPE_PGID))
1145 group_leader->signal->leader = 1;
1146 __set_special_pids(sid);
1148 proc_clear_tty(group_leader);
1152 write_unlock_irq(&tasklist_lock);
1154 proc_sid_connector(group_leader);
1155 sched_autogroup_create_attach(group_leader);
1160 DECLARE_RWSEM(uts_sem);
1162 #ifdef COMPAT_UTS_MACHINE
1163 #define override_architecture(name) \
1164 (personality(current->personality) == PER_LINUX32 && \
1165 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1166 sizeof(COMPAT_UTS_MACHINE)))
1168 #define override_architecture(name) 0
1172 * Work around broken programs that cannot handle "Linux 3.0".
1173 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1175 static int override_release(char __user *release, size_t len)
1179 if (current->personality & UNAME26) {
1180 const char *rest = UTS_RELEASE;
1181 char buf[65] = { 0 };
1187 if (*rest == '.' && ++ndots >= 3)
1189 if (!isdigit(*rest) && *rest != '.')
1193 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1194 copy = clamp_t(size_t, len, 1, sizeof(buf));
1195 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1196 ret = copy_to_user(release, buf, copy + 1);
1201 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1205 down_read(&uts_sem);
1206 if (copy_to_user(name, utsname(), sizeof *name))
1210 if (!errno && override_release(name->release, sizeof(name->release)))
1212 if (!errno && override_architecture(name))
1217 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1221 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1228 down_read(&uts_sem);
1229 if (copy_to_user(name, utsname(), sizeof(*name)))
1233 if (!error && override_release(name->release, sizeof(name->release)))
1235 if (!error && override_architecture(name))
1240 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1246 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1249 down_read(&uts_sem);
1250 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1252 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1253 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1255 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1256 error |= __copy_to_user(&name->release, &utsname()->release,
1258 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1259 error |= __copy_to_user(&name->version, &utsname()->version,
1261 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1262 error |= __copy_to_user(&name->machine, &utsname()->machine,
1264 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1267 if (!error && override_architecture(name))
1269 if (!error && override_release(name->release, sizeof(name->release)))
1271 return error ? -EFAULT : 0;
1275 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1278 char tmp[__NEW_UTS_LEN];
1280 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1283 if (len < 0 || len > __NEW_UTS_LEN)
1285 down_write(&uts_sem);
1287 if (!copy_from_user(tmp, name, len)) {
1288 struct new_utsname *u = utsname();
1290 memcpy(u->nodename, tmp, len);
1291 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1294 uts_proc_notify(UTS_PROC_HOSTNAME);
1299 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1301 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1304 struct new_utsname *u;
1308 down_read(&uts_sem);
1310 i = 1 + strlen(u->nodename);
1314 if (copy_to_user(name, u->nodename, i))
1323 * Only setdomainname; getdomainname can be implemented by calling
1326 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1329 char tmp[__NEW_UTS_LEN];
1331 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1333 if (len < 0 || len > __NEW_UTS_LEN)
1336 down_write(&uts_sem);
1338 if (!copy_from_user(tmp, name, len)) {
1339 struct new_utsname *u = utsname();
1341 memcpy(u->domainname, tmp, len);
1342 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1345 uts_proc_notify(UTS_PROC_DOMAINNAME);
1350 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1352 struct rlimit value;
1355 ret = do_prlimit(current, resource, NULL, &value);
1357 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1362 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1365 * Back compatibility for getrlimit. Needed for some apps.
1368 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1369 struct rlimit __user *, rlim)
1372 if (resource >= RLIM_NLIMITS)
1375 task_lock(current->group_leader);
1376 x = current->signal->rlim[resource];
1377 task_unlock(current->group_leader);
1378 if (x.rlim_cur > 0x7FFFFFFF)
1379 x.rlim_cur = 0x7FFFFFFF;
1380 if (x.rlim_max > 0x7FFFFFFF)
1381 x.rlim_max = 0x7FFFFFFF;
1382 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1387 static inline bool rlim64_is_infinity(__u64 rlim64)
1389 #if BITS_PER_LONG < 64
1390 return rlim64 >= ULONG_MAX;
1392 return rlim64 == RLIM64_INFINITY;
1396 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1398 if (rlim->rlim_cur == RLIM_INFINITY)
1399 rlim64->rlim_cur = RLIM64_INFINITY;
1401 rlim64->rlim_cur = rlim->rlim_cur;
1402 if (rlim->rlim_max == RLIM_INFINITY)
1403 rlim64->rlim_max = RLIM64_INFINITY;
1405 rlim64->rlim_max = rlim->rlim_max;
1408 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1410 if (rlim64_is_infinity(rlim64->rlim_cur))
1411 rlim->rlim_cur = RLIM_INFINITY;
1413 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1414 if (rlim64_is_infinity(rlim64->rlim_max))
1415 rlim->rlim_max = RLIM_INFINITY;
1417 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1420 /* make sure you are allowed to change @tsk limits before calling this */
1421 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1422 struct rlimit *new_rlim, struct rlimit *old_rlim)
1424 struct rlimit *rlim;
1427 if (resource >= RLIM_NLIMITS)
1430 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1432 if (resource == RLIMIT_NOFILE &&
1433 new_rlim->rlim_max > sysctl_nr_open)
1437 /* protect tsk->signal and tsk->sighand from disappearing */
1438 read_lock(&tasklist_lock);
1439 if (!tsk->sighand) {
1444 rlim = tsk->signal->rlim + resource;
1445 task_lock(tsk->group_leader);
1447 /* Keep the capable check against init_user_ns until
1448 cgroups can contain all limits */
1449 if (new_rlim->rlim_max > rlim->rlim_max &&
1450 !capable(CAP_SYS_RESOURCE))
1453 retval = security_task_setrlimit(tsk->group_leader,
1454 resource, new_rlim);
1455 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1457 * The caller is asking for an immediate RLIMIT_CPU
1458 * expiry. But we use the zero value to mean "it was
1459 * never set". So let's cheat and make it one second
1462 new_rlim->rlim_cur = 1;
1471 task_unlock(tsk->group_leader);
1474 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1475 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1476 * very long-standing error, and fixing it now risks breakage of
1477 * applications, so we live with it
1479 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1480 new_rlim->rlim_cur != RLIM_INFINITY)
1481 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1483 read_unlock(&tasklist_lock);
1487 /* rcu lock must be held */
1488 static int check_prlimit_permission(struct task_struct *task)
1490 const struct cred *cred = current_cred(), *tcred;
1492 if (current == task)
1495 tcred = __task_cred(task);
1496 if (cred->user->user_ns == tcred->user->user_ns &&
1497 (cred->uid == tcred->euid &&
1498 cred->uid == tcred->suid &&
1499 cred->uid == tcred->uid &&
1500 cred->gid == tcred->egid &&
1501 cred->gid == tcred->sgid &&
1502 cred->gid == tcred->gid))
1504 if (ns_capable(tcred->user->user_ns, CAP_SYS_RESOURCE))
1510 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1511 const struct rlimit64 __user *, new_rlim,
1512 struct rlimit64 __user *, old_rlim)
1514 struct rlimit64 old64, new64;
1515 struct rlimit old, new;
1516 struct task_struct *tsk;
1520 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1522 rlim64_to_rlim(&new64, &new);
1526 tsk = pid ? find_task_by_vpid(pid) : current;
1531 ret = check_prlimit_permission(tsk);
1536 get_task_struct(tsk);
1539 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1540 old_rlim ? &old : NULL);
1542 if (!ret && old_rlim) {
1543 rlim_to_rlim64(&old, &old64);
1544 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1548 put_task_struct(tsk);
1552 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1554 struct rlimit new_rlim;
1556 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1558 return do_prlimit(current, resource, &new_rlim, NULL);
1562 * It would make sense to put struct rusage in the task_struct,
1563 * except that would make the task_struct be *really big*. After
1564 * task_struct gets moved into malloc'ed memory, it would
1565 * make sense to do this. It will make moving the rest of the information
1566 * a lot simpler! (Which we're not doing right now because we're not
1567 * measuring them yet).
1569 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1570 * races with threads incrementing their own counters. But since word
1571 * reads are atomic, we either get new values or old values and we don't
1572 * care which for the sums. We always take the siglock to protect reading
1573 * the c* fields from p->signal from races with exit.c updating those
1574 * fields when reaping, so a sample either gets all the additions of a
1575 * given child after it's reaped, or none so this sample is before reaping.
1578 * We need to take the siglock for CHILDEREN, SELF and BOTH
1579 * for the cases current multithreaded, non-current single threaded
1580 * non-current multithreaded. Thread traversal is now safe with
1582 * Strictly speaking, we donot need to take the siglock if we are current and
1583 * single threaded, as no one else can take our signal_struct away, no one
1584 * else can reap the children to update signal->c* counters, and no one else
1585 * can race with the signal-> fields. If we do not take any lock, the
1586 * signal-> fields could be read out of order while another thread was just
1587 * exiting. So we should place a read memory barrier when we avoid the lock.
1588 * On the writer side, write memory barrier is implied in __exit_signal
1589 * as __exit_signal releases the siglock spinlock after updating the signal->
1590 * fields. But we don't do this yet to keep things simple.
1594 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1596 r->ru_nvcsw += t->nvcsw;
1597 r->ru_nivcsw += t->nivcsw;
1598 r->ru_minflt += t->min_flt;
1599 r->ru_majflt += t->maj_flt;
1600 r->ru_inblock += task_io_get_inblock(t);
1601 r->ru_oublock += task_io_get_oublock(t);
1604 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1606 struct task_struct *t;
1607 unsigned long flags;
1608 cputime_t tgutime, tgstime, utime, stime;
1609 unsigned long maxrss = 0;
1611 memset((char *) r, 0, sizeof *r);
1612 utime = stime = cputime_zero;
1614 if (who == RUSAGE_THREAD) {
1615 task_times(current, &utime, &stime);
1616 accumulate_thread_rusage(p, r);
1617 maxrss = p->signal->maxrss;
1621 if (!lock_task_sighand(p, &flags))
1626 case RUSAGE_CHILDREN:
1627 utime = p->signal->cutime;
1628 stime = p->signal->cstime;
1629 r->ru_nvcsw = p->signal->cnvcsw;
1630 r->ru_nivcsw = p->signal->cnivcsw;
1631 r->ru_minflt = p->signal->cmin_flt;
1632 r->ru_majflt = p->signal->cmaj_flt;
1633 r->ru_inblock = p->signal->cinblock;
1634 r->ru_oublock = p->signal->coublock;
1635 maxrss = p->signal->cmaxrss;
1637 if (who == RUSAGE_CHILDREN)
1641 thread_group_times(p, &tgutime, &tgstime);
1642 utime = cputime_add(utime, tgutime);
1643 stime = cputime_add(stime, tgstime);
1644 r->ru_nvcsw += p->signal->nvcsw;
1645 r->ru_nivcsw += p->signal->nivcsw;
1646 r->ru_minflt += p->signal->min_flt;
1647 r->ru_majflt += p->signal->maj_flt;
1648 r->ru_inblock += p->signal->inblock;
1649 r->ru_oublock += p->signal->oublock;
1650 if (maxrss < p->signal->maxrss)
1651 maxrss = p->signal->maxrss;
1654 accumulate_thread_rusage(t, r);
1662 unlock_task_sighand(p, &flags);
1665 cputime_to_timeval(utime, &r->ru_utime);
1666 cputime_to_timeval(stime, &r->ru_stime);
1668 if (who != RUSAGE_CHILDREN) {
1669 struct mm_struct *mm = get_task_mm(p);
1671 setmax_mm_hiwater_rss(&maxrss, mm);
1675 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1678 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1681 k_getrusage(p, who, &r);
1682 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1685 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1687 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1688 who != RUSAGE_THREAD)
1690 return getrusage(current, who, ru);
1693 SYSCALL_DEFINE1(umask, int, mask)
1695 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1699 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1700 unsigned long, arg4, unsigned long, arg5)
1702 struct task_struct *me = current;
1703 unsigned char comm[sizeof(me->comm)];
1706 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1707 if (error != -ENOSYS)
1712 case PR_SET_PDEATHSIG:
1713 if (!valid_signal(arg2)) {
1717 me->pdeath_signal = arg2;
1720 case PR_GET_PDEATHSIG:
1721 error = put_user(me->pdeath_signal, (int __user *)arg2);
1723 case PR_GET_DUMPABLE:
1724 error = get_dumpable(me->mm);
1726 case PR_SET_DUMPABLE:
1727 if (arg2 < 0 || arg2 > 1) {
1731 set_dumpable(me->mm, arg2);
1735 case PR_SET_UNALIGN:
1736 error = SET_UNALIGN_CTL(me, arg2);
1738 case PR_GET_UNALIGN:
1739 error = GET_UNALIGN_CTL(me, arg2);
1742 error = SET_FPEMU_CTL(me, arg2);
1745 error = GET_FPEMU_CTL(me, arg2);
1748 error = SET_FPEXC_CTL(me, arg2);
1751 error = GET_FPEXC_CTL(me, arg2);
1754 error = PR_TIMING_STATISTICAL;
1757 if (arg2 != PR_TIMING_STATISTICAL)
1764 comm[sizeof(me->comm)-1] = 0;
1765 if (strncpy_from_user(comm, (char __user *)arg2,
1766 sizeof(me->comm) - 1) < 0)
1768 set_task_comm(me, comm);
1769 proc_comm_connector(me);
1772 get_task_comm(comm, me);
1773 if (copy_to_user((char __user *)arg2, comm,
1778 error = GET_ENDIAN(me, arg2);
1781 error = SET_ENDIAN(me, arg2);
1784 case PR_GET_SECCOMP:
1785 error = prctl_get_seccomp();
1787 case PR_SET_SECCOMP:
1788 error = prctl_set_seccomp(arg2);
1791 error = GET_TSC_CTL(arg2);
1794 error = SET_TSC_CTL(arg2);
1796 case PR_TASK_PERF_EVENTS_DISABLE:
1797 error = perf_event_task_disable();
1799 case PR_TASK_PERF_EVENTS_ENABLE:
1800 error = perf_event_task_enable();
1802 case PR_GET_TIMERSLACK:
1803 error = current->timer_slack_ns;
1805 case PR_SET_TIMERSLACK:
1807 current->timer_slack_ns =
1808 current->default_timer_slack_ns;
1810 current->timer_slack_ns = arg2;
1817 case PR_MCE_KILL_CLEAR:
1820 current->flags &= ~PF_MCE_PROCESS;
1822 case PR_MCE_KILL_SET:
1823 current->flags |= PF_MCE_PROCESS;
1824 if (arg3 == PR_MCE_KILL_EARLY)
1825 current->flags |= PF_MCE_EARLY;
1826 else if (arg3 == PR_MCE_KILL_LATE)
1827 current->flags &= ~PF_MCE_EARLY;
1828 else if (arg3 == PR_MCE_KILL_DEFAULT)
1830 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1839 case PR_MCE_KILL_GET:
1840 if (arg2 | arg3 | arg4 | arg5)
1842 if (current->flags & PF_MCE_PROCESS)
1843 error = (current->flags & PF_MCE_EARLY) ?
1844 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1846 error = PR_MCE_KILL_DEFAULT;
1855 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
1856 struct getcpu_cache __user *, unused)
1859 int cpu = raw_smp_processor_id();
1861 err |= put_user(cpu, cpup);
1863 err |= put_user(cpu_to_node(cpu), nodep);
1864 return err ? -EFAULT : 0;
1867 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
1869 static void argv_cleanup(struct subprocess_info *info)
1871 argv_free(info->argv);
1875 * orderly_poweroff - Trigger an orderly system poweroff
1876 * @force: force poweroff if command execution fails
1878 * This may be called from any context to trigger a system shutdown.
1879 * If the orderly shutdown fails, it will force an immediate shutdown.
1881 int orderly_poweroff(bool force)
1884 char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
1885 static char *envp[] = {
1887 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
1891 struct subprocess_info *info;
1894 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
1895 __func__, poweroff_cmd);
1899 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1905 call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
1907 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1911 printk(KERN_WARNING "Failed to start orderly shutdown: "
1912 "forcing the issue\n");
1914 /* I guess this should try to kick off some daemon to
1915 sync and poweroff asap. Or not even bother syncing
1916 if we're doing an emergency shutdown? */
1923 EXPORT_SYMBOL_GPL(orderly_poweroff);