00001
00002
00003
00004
00005
00006
00007 #include <linux/module.h>
00008 #include <linux/mm.h>
00009 #include <linux/utsname.h>
00010 #include <linux/mman.h>
00011 #include <linux/smp_lock.h>
00012 #include <linux/notifier.h>
00013 #include <linux/reboot.h>
00014 #include <linux/prctl.h>
00015 #include <linux/highuid.h>
00016 #include <linux/fs.h>
00017 #include <linux/resource.h>
00018 #include <linux/kernel.h>
00019 #include <linux/kexec.h>
00020 #include <linux/workqueue.h>
00021 #include <linux/capability.h>
00022 #include <linux/device.h>
00023 #include <linux/key.h>
00024 #include <linux/times.h>
00025 #include <linux/posix-timers.h>
00026 #include <linux/security.h>
00027 #include <linux/dcookies.h>
00028 #include <linux/suspend.h>
00029 #include <linux/tty.h>
00030 #include <linux/signal.h>
00031 #include <linux/cn_proc.h>
00032 #include <linux/getcpu.h>
00033 #include <linux/task_io_accounting_ops.h>
00034 #include <linux/seccomp.h>
00035 #include <linux/cpu.h>
00036 #include <linux/ptrace.h>
00037
00038 #include <linux/compat.h>
00039 #include <linux/syscalls.h>
00040 #include <linux/kprobes.h>
00041 #include <linux/user_namespace.h>
00042
00043 #include <asm/uaccess.h>
00044 #include <asm/io.h>
00045 #include <asm/unistd.h>
00046
00047 #ifndef SET_UNALIGN_CTL
00048 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
00049 #endif
00050 #ifndef GET_UNALIGN_CTL
00051 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
00052 #endif
00053 #ifndef SET_FPEMU_CTL
00054 # define SET_FPEMU_CTL(a,b) (-EINVAL)
00055 #endif
00056 #ifndef GET_FPEMU_CTL
00057 # define GET_FPEMU_CTL(a,b) (-EINVAL)
00058 #endif
00059 #ifndef SET_FPEXC_CTL
00060 # define SET_FPEXC_CTL(a,b) (-EINVAL)
00061 #endif
00062 #ifndef GET_FPEXC_CTL
00063 # define GET_FPEXC_CTL(a,b) (-EINVAL)
00064 #endif
00065 #ifndef GET_ENDIAN
00066 # define GET_ENDIAN(a,b) (-EINVAL)
00067 #endif
00068 #ifndef SET_ENDIAN
00069 # define SET_ENDIAN(a,b) (-EINVAL)
00070 #endif
00071 #ifndef GET_TSC_CTL
00072 # define GET_TSC_CTL(a) (-EINVAL)
00073 #endif
00074 #ifndef SET_TSC_CTL
00075 # define SET_TSC_CTL(a) (-EINVAL)
00076 #endif
00077
00078 #ifndef DDE_LINUX
00079
00080
00081
00082
00083
00084 int overflowuid = DEFAULT_OVERFLOWUID;
00085 int overflowgid = DEFAULT_OVERFLOWGID;
00086
00087 #ifdef CONFIG_UID16
00088 EXPORT_SYMBOL(overflowuid);
00089 EXPORT_SYMBOL(overflowgid);
00090 #endif
00091
00092
00093
00094
00095
00096
00097 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
00098 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
00099
00100 EXPORT_SYMBOL(fs_overflowuid);
00101 EXPORT_SYMBOL(fs_overflowgid);
00102
00103
00104
00105
00106
00107 int C_A_D = 1;
00108 #endif
00109 struct pid *cad_pid;
00110 EXPORT_SYMBOL(cad_pid);
00111
00112
00113
00114
00115
00116 void (*pm_power_off_prepare)(void);
00117
00118 #ifndef DDE_LINUX
00119
00120
00121
00122
00123 static int set_one_prio(struct task_struct *p, int niceval, int error)
00124 {
00125 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
00126 int no_nice;
00127
00128 if (pcred->uid != cred->euid &&
00129 pcred->euid != cred->euid && !capable(CAP_SYS_NICE)) {
00130 error = -EPERM;
00131 goto out;
00132 }
00133 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
00134 error = -EACCES;
00135 goto out;
00136 }
00137 no_nice = security_task_setnice(p, niceval);
00138 if (no_nice) {
00139 error = no_nice;
00140 goto out;
00141 }
00142 if (error == -ESRCH)
00143 error = 0;
00144 set_user_nice(p, niceval);
00145 out:
00146 return error;
00147 }
00148
00149 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
00150 {
00151 struct task_struct *g, *p;
00152 struct user_struct *user;
00153 const struct cred *cred = current_cred();
00154 int error = -EINVAL;
00155 struct pid *pgrp;
00156
00157 if (which > PRIO_USER || which < PRIO_PROCESS)
00158 goto out;
00159
00160
00161 error = -ESRCH;
00162 if (niceval < -20)
00163 niceval = -20;
00164 if (niceval > 19)
00165 niceval = 19;
00166
00167 read_lock(&tasklist_lock);
00168 switch (which) {
00169 case PRIO_PROCESS:
00170 if (who)
00171 p = find_task_by_vpid(who);
00172 else
00173 p = current;
00174 if (p)
00175 error = set_one_prio(p, niceval, error);
00176 break;
00177 case PRIO_PGRP:
00178 if (who)
00179 pgrp = find_vpid(who);
00180 else
00181 pgrp = task_pgrp(current);
00182 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
00183 error = set_one_prio(p, niceval, error);
00184 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
00185 break;
00186 case PRIO_USER:
00187 user = (struct user_struct *) cred->user;
00188 if (!who)
00189 who = cred->uid;
00190 else if ((who != cred->uid) &&
00191 !(user = find_user(who)))
00192 goto out_unlock;
00193
00194 do_each_thread(g, p)
00195 if (__task_cred(p)->uid == who)
00196 error = set_one_prio(p, niceval, error);
00197 while_each_thread(g, p);
00198 if (who != cred->uid)
00199 free_uid(user);
00200 break;
00201 }
00202 out_unlock:
00203 read_unlock(&tasklist_lock);
00204 out:
00205 return error;
00206 }
00207
00208
00209
00210
00211
00212
00213
00214 SYSCALL_DEFINE2(getpriority, int, which, int, who)
00215 {
00216 struct task_struct *g, *p;
00217 struct user_struct *user;
00218 const struct cred *cred = current_cred();
00219 long niceval, retval = -ESRCH;
00220 struct pid *pgrp;
00221
00222 if (which > PRIO_USER || which < PRIO_PROCESS)
00223 return -EINVAL;
00224
00225 read_lock(&tasklist_lock);
00226 switch (which) {
00227 case PRIO_PROCESS:
00228 if (who)
00229 p = find_task_by_vpid(who);
00230 else
00231 p = current;
00232 if (p) {
00233 niceval = 20 - task_nice(p);
00234 if (niceval > retval)
00235 retval = niceval;
00236 }
00237 break;
00238 case PRIO_PGRP:
00239 if (who)
00240 pgrp = find_vpid(who);
00241 else
00242 pgrp = task_pgrp(current);
00243 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
00244 niceval = 20 - task_nice(p);
00245 if (niceval > retval)
00246 retval = niceval;
00247 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
00248 break;
00249 case PRIO_USER:
00250 user = (struct user_struct *) cred->user;
00251 if (!who)
00252 who = cred->uid;
00253 else if ((who != cred->uid) &&
00254 !(user = find_user(who)))
00255 goto out_unlock;
00256
00257 do_each_thread(g, p)
00258 if (__task_cred(p)->uid == who) {
00259 niceval = 20 - task_nice(p);
00260 if (niceval > retval)
00261 retval = niceval;
00262 }
00263 while_each_thread(g, p);
00264 if (who != cred->uid)
00265 free_uid(user);
00266 break;
00267 }
00268 out_unlock:
00269 read_unlock(&tasklist_lock);
00270
00271 return retval;
00272 }
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282 void emergency_restart(void)
00283 {
00284 machine_emergency_restart();
00285 }
00286 EXPORT_SYMBOL_GPL(emergency_restart);
00287
00288 void kernel_restart_prepare(char *cmd)
00289 {
00290 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
00291 system_state = SYSTEM_RESTART;
00292 device_shutdown();
00293 sysdev_shutdown();
00294 }
00295
00296
00297
00298
00299
00300
00301
00302
00303
00304 void kernel_restart(char *cmd)
00305 {
00306 kernel_restart_prepare(cmd);
00307 if (!cmd)
00308 printk(KERN_EMERG "Restarting system.\n");
00309 else
00310 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
00311 machine_restart(cmd);
00312 }
00313 EXPORT_SYMBOL_GPL(kernel_restart);
00314
00315 static void kernel_shutdown_prepare(enum system_states state)
00316 {
00317 blocking_notifier_call_chain(&reboot_notifier_list,
00318 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
00319 system_state = state;
00320 device_shutdown();
00321 }
00322
00323
00324
00325
00326
00327 void kernel_halt(void)
00328 {
00329 kernel_shutdown_prepare(SYSTEM_HALT);
00330 sysdev_shutdown();
00331 printk(KERN_EMERG "System halted.\n");
00332 machine_halt();
00333 }
00334
00335 EXPORT_SYMBOL_GPL(kernel_halt);
00336
00337
00338
00339
00340
00341
00342 void kernel_power_off(void)
00343 {
00344 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
00345 if (pm_power_off_prepare)
00346 pm_power_off_prepare();
00347 disable_nonboot_cpus();
00348 sysdev_shutdown();
00349 printk(KERN_EMERG "Power down.\n");
00350 machine_power_off();
00351 }
00352 EXPORT_SYMBOL_GPL(kernel_power_off);
00353
00354
00355
00356
00357
00358
00359
00360
00361 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
00362 void __user *, arg)
00363 {
00364 char buffer[256];
00365
00366
00367 if (!capable(CAP_SYS_BOOT))
00368 return -EPERM;
00369
00370
00371 if (magic1 != LINUX_REBOOT_MAGIC1 ||
00372 (magic2 != LINUX_REBOOT_MAGIC2 &&
00373 magic2 != LINUX_REBOOT_MAGIC2A &&
00374 magic2 != LINUX_REBOOT_MAGIC2B &&
00375 magic2 != LINUX_REBOOT_MAGIC2C))
00376 return -EINVAL;
00377
00378
00379
00380
00381 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
00382 cmd = LINUX_REBOOT_CMD_HALT;
00383
00384 lock_kernel();
00385 switch (cmd) {
00386 case LINUX_REBOOT_CMD_RESTART:
00387 kernel_restart(NULL);
00388 break;
00389
00390 case LINUX_REBOOT_CMD_CAD_ON:
00391 C_A_D = 1;
00392 break;
00393
00394 case LINUX_REBOOT_CMD_CAD_OFF:
00395 C_A_D = 0;
00396 break;
00397
00398 case LINUX_REBOOT_CMD_HALT:
00399 kernel_halt();
00400 unlock_kernel();
00401 do_exit(0);
00402 break;
00403
00404 case LINUX_REBOOT_CMD_POWER_OFF:
00405 kernel_power_off();
00406 unlock_kernel();
00407 do_exit(0);
00408 break;
00409
00410 case LINUX_REBOOT_CMD_RESTART2:
00411 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
00412 unlock_kernel();
00413 return -EFAULT;
00414 }
00415 buffer[sizeof(buffer) - 1] = '\0';
00416
00417 kernel_restart(buffer);
00418 break;
00419
00420 #ifdef CONFIG_KEXEC
00421 case LINUX_REBOOT_CMD_KEXEC:
00422 {
00423 int ret;
00424 ret = kernel_kexec();
00425 unlock_kernel();
00426 return ret;
00427 }
00428 #endif
00429
00430 #ifdef CONFIG_HIBERNATION
00431 case LINUX_REBOOT_CMD_SW_SUSPEND:
00432 {
00433 int ret = hibernate();
00434 unlock_kernel();
00435 return ret;
00436 }
00437 #endif
00438
00439 default:
00440 unlock_kernel();
00441 return -EINVAL;
00442 }
00443 unlock_kernel();
00444 return 0;
00445 }
00446
00447 static void deferred_cad(struct work_struct *dummy)
00448 {
00449 kernel_restart(NULL);
00450 }
00451
00452
00453
00454
00455
00456
00457 void ctrl_alt_del(void)
00458 {
00459 static DECLARE_WORK(cad_work, deferred_cad);
00460
00461 if (C_A_D)
00462 schedule_work(&cad_work);
00463 else
00464 kill_cad_pid(SIGINT, 1);
00465 }
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
00486 {
00487 const struct cred *old;
00488 struct cred *new;
00489 int retval;
00490
00491 new = prepare_creds();
00492 if (!new)
00493 return -ENOMEM;
00494 old = current_cred();
00495
00496 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
00497 if (retval)
00498 goto error;
00499
00500 retval = -EPERM;
00501 if (rgid != (gid_t) -1) {
00502 if (old->gid == rgid ||
00503 old->egid == rgid ||
00504 capable(CAP_SETGID))
00505 new->gid = rgid;
00506 else
00507 goto error;
00508 }
00509 if (egid != (gid_t) -1) {
00510 if (old->gid == egid ||
00511 old->egid == egid ||
00512 old->sgid == egid ||
00513 capable(CAP_SETGID))
00514 new->egid = egid;
00515 else
00516 goto error;
00517 }
00518
00519 if (rgid != (gid_t) -1 ||
00520 (egid != (gid_t) -1 && egid != old->gid))
00521 new->sgid = new->egid;
00522 new->fsgid = new->egid;
00523
00524 return commit_creds(new);
00525
00526 error:
00527 abort_creds(new);
00528 return retval;
00529 }
00530
00531
00532
00533
00534
00535
00536 SYSCALL_DEFINE1(setgid, gid_t, gid)
00537 {
00538 const struct cred *old;
00539 struct cred *new;
00540 int retval;
00541
00542 new = prepare_creds();
00543 if (!new)
00544 return -ENOMEM;
00545 old = current_cred();
00546
00547 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
00548 if (retval)
00549 goto error;
00550
00551 retval = -EPERM;
00552 if (capable(CAP_SETGID))
00553 new->gid = new->egid = new->sgid = new->fsgid = gid;
00554 else if (gid == old->gid || gid == old->sgid)
00555 new->egid = new->fsgid = gid;
00556 else
00557 goto error;
00558
00559 return commit_creds(new);
00560
00561 error:
00562 abort_creds(new);
00563 return retval;
00564 }
00565
00566
00567
00568
00569 static int set_user(struct cred *new)
00570 {
00571 struct user_struct *new_user;
00572
00573 new_user = alloc_uid(current_user_ns(), new->uid);
00574 if (!new_user)
00575 return -EAGAIN;
00576
00577 if (!task_can_switch_user(new_user, current)) {
00578 free_uid(new_user);
00579 return -EINVAL;
00580 }
00581
00582 if (atomic_read(&new_user->processes) >=
00583 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
00584 new_user != INIT_USER) {
00585 free_uid(new_user);
00586 return -EAGAIN;
00587 }
00588
00589 free_uid(new->user);
00590 new->user = new_user;
00591 return 0;
00592 }
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
00610 {
00611 const struct cred *old;
00612 struct cred *new;
00613 int retval;
00614
00615 new = prepare_creds();
00616 if (!new)
00617 return -ENOMEM;
00618 old = current_cred();
00619
00620 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
00621 if (retval)
00622 goto error;
00623
00624 retval = -EPERM;
00625 if (ruid != (uid_t) -1) {
00626 new->uid = ruid;
00627 if (old->uid != ruid &&
00628 old->euid != ruid &&
00629 !capable(CAP_SETUID))
00630 goto error;
00631 }
00632
00633 if (euid != (uid_t) -1) {
00634 new->euid = euid;
00635 if (old->uid != euid &&
00636 old->euid != euid &&
00637 old->suid != euid &&
00638 !capable(CAP_SETUID))
00639 goto error;
00640 }
00641
00642 if (new->uid != old->uid) {
00643 retval = set_user(new);
00644 if (retval < 0)
00645 goto error;
00646 }
00647 if (ruid != (uid_t) -1 ||
00648 (euid != (uid_t) -1 && euid != old->uid))
00649 new->suid = new->euid;
00650 new->fsuid = new->euid;
00651
00652 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
00653 if (retval < 0)
00654 goto error;
00655
00656 return commit_creds(new);
00657
00658 error:
00659 abort_creds(new);
00660 return retval;
00661 }
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674 SYSCALL_DEFINE1(setuid, uid_t, uid)
00675 {
00676 const struct cred *old;
00677 struct cred *new;
00678 int retval;
00679
00680 new = prepare_creds();
00681 if (!new)
00682 return -ENOMEM;
00683 old = current_cred();
00684
00685 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
00686 if (retval)
00687 goto error;
00688
00689 retval = -EPERM;
00690 if (capable(CAP_SETUID)) {
00691 new->suid = new->uid = uid;
00692 if (uid != old->uid) {
00693 retval = set_user(new);
00694 if (retval < 0)
00695 goto error;
00696 }
00697 } else if (uid != old->uid && uid != new->suid) {
00698 goto error;
00699 }
00700
00701 new->fsuid = new->euid = uid;
00702
00703 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
00704 if (retval < 0)
00705 goto error;
00706
00707 return commit_creds(new);
00708
00709 error:
00710 abort_creds(new);
00711 return retval;
00712 }
00713
00714
00715
00716
00717
00718
00719 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
00720 {
00721 const struct cred *old;
00722 struct cred *new;
00723 int retval;
00724
00725 new = prepare_creds();
00726 if (!new)
00727 return -ENOMEM;
00728
00729 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
00730 if (retval)
00731 goto error;
00732 old = current_cred();
00733
00734 retval = -EPERM;
00735 if (!capable(CAP_SETUID)) {
00736 if (ruid != (uid_t) -1 && ruid != old->uid &&
00737 ruid != old->euid && ruid != old->suid)
00738 goto error;
00739 if (euid != (uid_t) -1 && euid != old->uid &&
00740 euid != old->euid && euid != old->suid)
00741 goto error;
00742 if (suid != (uid_t) -1 && suid != old->uid &&
00743 suid != old->euid && suid != old->suid)
00744 goto error;
00745 }
00746
00747 if (ruid != (uid_t) -1) {
00748 new->uid = ruid;
00749 if (ruid != old->uid) {
00750 retval = set_user(new);
00751 if (retval < 0)
00752 goto error;
00753 }
00754 }
00755 if (euid != (uid_t) -1)
00756 new->euid = euid;
00757 if (suid != (uid_t) -1)
00758 new->suid = suid;
00759 new->fsuid = new->euid;
00760
00761 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
00762 if (retval < 0)
00763 goto error;
00764
00765 return commit_creds(new);
00766
00767 error:
00768 abort_creds(new);
00769 return retval;
00770 }
00771
00772 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
00773 {
00774 const struct cred *cred = current_cred();
00775 int retval;
00776
00777 if (!(retval = put_user(cred->uid, ruid)) &&
00778 !(retval = put_user(cred->euid, euid)))
00779 retval = put_user(cred->suid, suid);
00780
00781 return retval;
00782 }
00783
00784
00785
00786
00787 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
00788 {
00789 const struct cred *old;
00790 struct cred *new;
00791 int retval;
00792
00793 new = prepare_creds();
00794 if (!new)
00795 return -ENOMEM;
00796 old = current_cred();
00797
00798 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
00799 if (retval)
00800 goto error;
00801
00802 retval = -EPERM;
00803 if (!capable(CAP_SETGID)) {
00804 if (rgid != (gid_t) -1 && rgid != old->gid &&
00805 rgid != old->egid && rgid != old->sgid)
00806 goto error;
00807 if (egid != (gid_t) -1 && egid != old->gid &&
00808 egid != old->egid && egid != old->sgid)
00809 goto error;
00810 if (sgid != (gid_t) -1 && sgid != old->gid &&
00811 sgid != old->egid && sgid != old->sgid)
00812 goto error;
00813 }
00814
00815 if (rgid != (gid_t) -1)
00816 new->gid = rgid;
00817 if (egid != (gid_t) -1)
00818 new->egid = egid;
00819 if (sgid != (gid_t) -1)
00820 new->sgid = sgid;
00821 new->fsgid = new->egid;
00822
00823 return commit_creds(new);
00824
00825 error:
00826 abort_creds(new);
00827 return retval;
00828 }
00829
00830 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
00831 {
00832 const struct cred *cred = current_cred();
00833 int retval;
00834
00835 if (!(retval = put_user(cred->gid, rgid)) &&
00836 !(retval = put_user(cred->egid, egid)))
00837 retval = put_user(cred->sgid, sgid);
00838
00839 return retval;
00840 }
00841
00842
00843
00844
00845
00846
00847
00848
00849 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
00850 {
00851 const struct cred *old;
00852 struct cred *new;
00853 uid_t old_fsuid;
00854
00855 new = prepare_creds();
00856 if (!new)
00857 return current_fsuid();
00858 old = current_cred();
00859 old_fsuid = old->fsuid;
00860
00861 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
00862 goto error;
00863
00864 if (uid == old->uid || uid == old->euid ||
00865 uid == old->suid || uid == old->fsuid ||
00866 capable(CAP_SETUID)) {
00867 if (uid != old_fsuid) {
00868 new->fsuid = uid;
00869 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
00870 goto change_okay;
00871 }
00872 }
00873
00874 error:
00875 abort_creds(new);
00876 return old_fsuid;
00877
00878 change_okay:
00879 commit_creds(new);
00880 return old_fsuid;
00881 }
00882
00883
00884
00885
00886 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
00887 {
00888 const struct cred *old;
00889 struct cred *new;
00890 gid_t old_fsgid;
00891
00892 new = prepare_creds();
00893 if (!new)
00894 return current_fsgid();
00895 old = current_cred();
00896 old_fsgid = old->fsgid;
00897
00898 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
00899 goto error;
00900
00901 if (gid == old->gid || gid == old->egid ||
00902 gid == old->sgid || gid == old->fsgid ||
00903 capable(CAP_SETGID)) {
00904 if (gid != old_fsgid) {
00905 new->fsgid = gid;
00906 goto change_okay;
00907 }
00908 }
00909
00910 error:
00911 abort_creds(new);
00912 return old_fsgid;
00913
00914 change_okay:
00915 commit_creds(new);
00916 return old_fsgid;
00917 }
00918
00919 void do_sys_times(struct tms *tms)
00920 {
00921 struct task_cputime cputime;
00922 cputime_t cutime, cstime;
00923
00924 thread_group_cputime(current, &cputime);
00925 spin_lock_irq(¤t->sighand->siglock);
00926 cutime = current->signal->cutime;
00927 cstime = current->signal->cstime;
00928 spin_unlock_irq(¤t->sighand->siglock);
00929 tms->tms_utime = cputime_to_clock_t(cputime.utime);
00930 tms->tms_stime = cputime_to_clock_t(cputime.stime);
00931 tms->tms_cutime = cputime_to_clock_t(cutime);
00932 tms->tms_cstime = cputime_to_clock_t(cstime);
00933 }
00934
00935 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
00936 {
00937 if (tbuf) {
00938 struct tms tmp;
00939
00940 do_sys_times(&tmp);
00941 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
00942 return -EFAULT;
00943 }
00944 force_successful_syscall_return();
00945 return (long) jiffies_64_to_clock_t(get_jiffies_64());
00946 }
00947
00948
00949
00950
00951
00952
00953
00954
00955
00956
00957
00958
00959
00960 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
00961 {
00962 struct task_struct *p;
00963 struct task_struct *group_leader = current->group_leader;
00964 struct pid *pgrp;
00965 int err;
00966
00967 if (!pid)
00968 pid = task_pid_vnr(group_leader);
00969 if (!pgid)
00970 pgid = pid;
00971 if (pgid < 0)
00972 return -EINVAL;
00973
00974
00975
00976
00977 write_lock_irq(&tasklist_lock);
00978
00979 err = -ESRCH;
00980 p = find_task_by_vpid(pid);
00981 if (!p)
00982 goto out;
00983
00984 err = -EINVAL;
00985 if (!thread_group_leader(p))
00986 goto out;
00987
00988 if (same_thread_group(p->real_parent, group_leader)) {
00989 err = -EPERM;
00990 if (task_session(p) != task_session(group_leader))
00991 goto out;
00992 err = -EACCES;
00993 if (p->did_exec)
00994 goto out;
00995 } else {
00996 err = -ESRCH;
00997 if (p != group_leader)
00998 goto out;
00999 }
01000
01001 err = -EPERM;
01002 if (p->signal->leader)
01003 goto out;
01004
01005 pgrp = task_pid(p);
01006 if (pgid != pid) {
01007 struct task_struct *g;
01008
01009 pgrp = find_vpid(pgid);
01010 g = pid_task(pgrp, PIDTYPE_PGID);
01011 if (!g || task_session(g) != task_session(group_leader))
01012 goto out;
01013 }
01014
01015 err = security_task_setpgid(p, pgid);
01016 if (err)
01017 goto out;
01018
01019 if (task_pgrp(p) != pgrp) {
01020 change_pid(p, PIDTYPE_PGID, pgrp);
01021 set_task_pgrp(p, pid_nr(pgrp));
01022 }
01023
01024 err = 0;
01025 out:
01026
01027 write_unlock_irq(&tasklist_lock);
01028 return err;
01029 }
01030
01031 SYSCALL_DEFINE1(getpgid, pid_t, pid)
01032 {
01033 struct task_struct *p;
01034 struct pid *grp;
01035 int retval;
01036
01037 rcu_read_lock();
01038 if (!pid)
01039 grp = task_pgrp(current);
01040 else {
01041 retval = -ESRCH;
01042 p = find_task_by_vpid(pid);
01043 if (!p)
01044 goto out;
01045 grp = task_pgrp(p);
01046 if (!grp)
01047 goto out;
01048
01049 retval = security_task_getpgid(p);
01050 if (retval)
01051 goto out;
01052 }
01053 retval = pid_vnr(grp);
01054 out:
01055 rcu_read_unlock();
01056 return retval;
01057 }
01058
01059 #ifdef __ARCH_WANT_SYS_GETPGRP
01060
01061 SYSCALL_DEFINE0(getpgrp)
01062 {
01063 return sys_getpgid(0);
01064 }
01065
01066 #endif
01067
01068 SYSCALL_DEFINE1(getsid, pid_t, pid)
01069 {
01070 struct task_struct *p;
01071 struct pid *sid;
01072 int retval;
01073
01074 rcu_read_lock();
01075 if (!pid)
01076 sid = task_session(current);
01077 else {
01078 retval = -ESRCH;
01079 p = find_task_by_vpid(pid);
01080 if (!p)
01081 goto out;
01082 sid = task_session(p);
01083 if (!sid)
01084 goto out;
01085
01086 retval = security_task_getsid(p);
01087 if (retval)
01088 goto out;
01089 }
01090 retval = pid_vnr(sid);
01091 out:
01092 rcu_read_unlock();
01093 return retval;
01094 }
01095
01096 SYSCALL_DEFINE0(setsid)
01097 {
01098 struct task_struct *group_leader = current->group_leader;
01099 struct pid *sid = task_pid(group_leader);
01100 pid_t session = pid_vnr(sid);
01101 int err = -EPERM;
01102
01103 write_lock_irq(&tasklist_lock);
01104
01105 if (group_leader->signal->leader)
01106 goto out;
01107
01108
01109
01110
01111 if (pid_task(sid, PIDTYPE_PGID))
01112 goto out;
01113
01114 group_leader->signal->leader = 1;
01115 __set_special_pids(sid);
01116
01117 proc_clear_tty(group_leader);
01118
01119 err = session;
01120 out:
01121 write_unlock_irq(&tasklist_lock);
01122 return err;
01123 }
01124
01125
01126
01127
01128
01129
01130 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
01131
01132 struct group_info *groups_alloc(int gidsetsize)
01133 {
01134 struct group_info *group_info;
01135 int nblocks;
01136 int i;
01137
01138 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
01139
01140 nblocks = nblocks ? : 1;
01141 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
01142 if (!group_info)
01143 return NULL;
01144 group_info->ngroups = gidsetsize;
01145 group_info->nblocks = nblocks;
01146 atomic_set(&group_info->usage, 1);
01147
01148 if (gidsetsize <= NGROUPS_SMALL)
01149 group_info->blocks[0] = group_info->small_block;
01150 else {
01151 for (i = 0; i < nblocks; i++) {
01152 gid_t *b;
01153 b = (void *)__get_free_page(GFP_USER);
01154 if (!b)
01155 goto out_undo_partial_alloc;
01156 group_info->blocks[i] = b;
01157 }
01158 }
01159 return group_info;
01160
01161 out_undo_partial_alloc:
01162 while (--i >= 0) {
01163 free_page((unsigned long)group_info->blocks[i]);
01164 }
01165 kfree(group_info);
01166 return NULL;
01167 }
01168
01169 EXPORT_SYMBOL(groups_alloc);
01170
01171 void groups_free(struct group_info *group_info)
01172 {
01173 if (group_info->blocks[0] != group_info->small_block) {
01174 int i;
01175 for (i = 0; i < group_info->nblocks; i++)
01176 free_page((unsigned long)group_info->blocks[i]);
01177 }
01178 kfree(group_info);
01179 }
01180
01181 EXPORT_SYMBOL(groups_free);
01182
01183
01184 static int groups_to_user(gid_t __user *grouplist,
01185 const struct group_info *group_info)
01186 {
01187 int i;
01188 unsigned int count = group_info->ngroups;
01189
01190 for (i = 0; i < group_info->nblocks; i++) {
01191 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
01192 unsigned int len = cp_count * sizeof(*grouplist);
01193
01194 if (copy_to_user(grouplist, group_info->blocks[i], len))
01195 return -EFAULT;
01196
01197 grouplist += NGROUPS_PER_BLOCK;
01198 count -= cp_count;
01199 }
01200 return 0;
01201 }
01202
01203
01204 static int groups_from_user(struct group_info *group_info,
01205 gid_t __user *grouplist)
01206 {
01207 int i;
01208 unsigned int count = group_info->ngroups;
01209
01210 for (i = 0; i < group_info->nblocks; i++) {
01211 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
01212 unsigned int len = cp_count * sizeof(*grouplist);
01213
01214 if (copy_from_user(group_info->blocks[i], grouplist, len))
01215 return -EFAULT;
01216
01217 grouplist += NGROUPS_PER_BLOCK;
01218 count -= cp_count;
01219 }
01220 return 0;
01221 }
01222
01223
01224 static void groups_sort(struct group_info *group_info)
01225 {
01226 int base, max, stride;
01227 int gidsetsize = group_info->ngroups;
01228
01229 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
01230 ;
01231 stride /= 3;
01232
01233 while (stride) {
01234 max = gidsetsize - stride;
01235 for (base = 0; base < max; base++) {
01236 int left = base;
01237 int right = left + stride;
01238 gid_t tmp = GROUP_AT(group_info, right);
01239
01240 while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
01241 GROUP_AT(group_info, right) =
01242 GROUP_AT(group_info, left);
01243 right = left;
01244 left -= stride;
01245 }
01246 GROUP_AT(group_info, right) = tmp;
01247 }
01248 stride /= 3;
01249 }
01250 }
01251
01252
01253 int groups_search(const struct group_info *group_info, gid_t grp)
01254 {
01255 unsigned int left, right;
01256
01257 if (!group_info)
01258 return 0;
01259
01260 left = 0;
01261 right = group_info->ngroups;
01262 while (left < right) {
01263 unsigned int mid = (left+right)/2;
01264 int cmp = grp - GROUP_AT(group_info, mid);
01265 if (cmp > 0)
01266 left = mid + 1;
01267 else if (cmp < 0)
01268 right = mid;
01269 else
01270 return 1;
01271 }
01272 return 0;
01273 }
01274
01275
01276
01277
01278
01279
01280
01281
01282
01283 int set_groups(struct cred *new, struct group_info *group_info)
01284 {
01285 int retval;
01286
01287 retval = security_task_setgroups(group_info);
01288 if (retval)
01289 return retval;
01290
01291 put_group_info(new->group_info);
01292 groups_sort(group_info);
01293 get_group_info(group_info);
01294 new->group_info = group_info;
01295 return 0;
01296 }
01297
01298 EXPORT_SYMBOL(set_groups);
01299
01300
01301
01302
01303
01304
01305
01306
01307 int set_current_groups(struct group_info *group_info)
01308 {
01309 struct cred *new;
01310 int ret;
01311
01312 new = prepare_creds();
01313 if (!new)
01314 return -ENOMEM;
01315
01316 ret = set_groups(new, group_info);
01317 if (ret < 0) {
01318 abort_creds(new);
01319 return ret;
01320 }
01321
01322 return commit_creds(new);
01323 }
01324
01325 EXPORT_SYMBOL(set_current_groups);
01326
01327 SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
01328 {
01329 const struct cred *cred = current_cred();
01330 int i;
01331
01332 if (gidsetsize < 0)
01333 return -EINVAL;
01334
01335
01336 i = cred->group_info->ngroups;
01337 if (gidsetsize) {
01338 if (i > gidsetsize) {
01339 i = -EINVAL;
01340 goto out;
01341 }
01342 if (groups_to_user(grouplist, cred->group_info)) {
01343 i = -EFAULT;
01344 goto out;
01345 }
01346 }
01347 out:
01348 return i;
01349 }
01350
01351
01352
01353
01354
01355
01356 SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
01357 {
01358 struct group_info *group_info;
01359 int retval;
01360
01361 if (!capable(CAP_SETGID))
01362 return -EPERM;
01363 if ((unsigned)gidsetsize > NGROUPS_MAX)
01364 return -EINVAL;
01365
01366 group_info = groups_alloc(gidsetsize);
01367 if (!group_info)
01368 return -ENOMEM;
01369 retval = groups_from_user(group_info, grouplist);
01370 if (retval) {
01371 put_group_info(group_info);
01372 return retval;
01373 }
01374
01375 retval = set_current_groups(group_info);
01376 put_group_info(group_info);
01377
01378 return retval;
01379 }
01380
01381
01382
01383
01384 int in_group_p(gid_t grp)
01385 {
01386 const struct cred *cred = current_cred();
01387 int retval = 1;
01388
01389 if (grp != cred->fsgid)
01390 retval = groups_search(cred->group_info, grp);
01391 return retval;
01392 }
01393
01394 EXPORT_SYMBOL(in_group_p);
01395
01396 int in_egroup_p(gid_t grp)
01397 {
01398 const struct cred *cred = current_cred();
01399 int retval = 1;
01400
01401 if (grp != cred->egid)
01402 retval = groups_search(cred->group_info, grp);
01403 return retval;
01404 }
01405
01406 EXPORT_SYMBOL(in_egroup_p);
01407
01408 DECLARE_RWSEM(uts_sem);
01409
01410 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
01411 {
01412 int errno = 0;
01413
01414 down_read(&uts_sem);
01415 if (copy_to_user(name, utsname(), sizeof *name))
01416 errno = -EFAULT;
01417 up_read(&uts_sem);
01418 return errno;
01419 }
01420
01421 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
01422 {
01423 int errno;
01424 char tmp[__NEW_UTS_LEN];
01425
01426 if (!capable(CAP_SYS_ADMIN))
01427 return -EPERM;
01428 if (len < 0 || len > __NEW_UTS_LEN)
01429 return -EINVAL;
01430 down_write(&uts_sem);
01431 errno = -EFAULT;
01432 if (!copy_from_user(tmp, name, len)) {
01433 struct new_utsname *u = utsname();
01434
01435 memcpy(u->nodename, tmp, len);
01436 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
01437 errno = 0;
01438 }
01439 up_write(&uts_sem);
01440 return errno;
01441 }
01442
01443 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
01444
01445 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
01446 {
01447 int i, errno;
01448 struct new_utsname *u;
01449
01450 if (len < 0)
01451 return -EINVAL;
01452 down_read(&uts_sem);
01453 u = utsname();
01454 i = 1 + strlen(u->nodename);
01455 if (i > len)
01456 i = len;
01457 errno = 0;
01458 if (copy_to_user(name, u->nodename, i))
01459 errno = -EFAULT;
01460 up_read(&uts_sem);
01461 return errno;
01462 }
01463
01464 #endif
01465
01466
01467
01468
01469
01470 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
01471 {
01472 int errno;
01473 char tmp[__NEW_UTS_LEN];
01474
01475 if (!capable(CAP_SYS_ADMIN))
01476 return -EPERM;
01477 if (len < 0 || len > __NEW_UTS_LEN)
01478 return -EINVAL;
01479
01480 down_write(&uts_sem);
01481 errno = -EFAULT;
01482 if (!copy_from_user(tmp, name, len)) {
01483 struct new_utsname *u = utsname();
01484
01485 memcpy(u->domainname, tmp, len);
01486 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
01487 errno = 0;
01488 }
01489 up_write(&uts_sem);
01490 return errno;
01491 }
01492
01493 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
01494 {
01495 if (resource >= RLIM_NLIMITS)
01496 return -EINVAL;
01497 else {
01498 struct rlimit value;
01499 task_lock(current->group_leader);
01500 value = current->signal->rlim[resource];
01501 task_unlock(current->group_leader);
01502 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
01503 }
01504 }
01505
01506 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
01507
01508
01509
01510
01511
01512 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
01513 struct rlimit __user *, rlim)
01514 {
01515 struct rlimit x;
01516 if (resource >= RLIM_NLIMITS)
01517 return -EINVAL;
01518
01519 task_lock(current->group_leader);
01520 x = current->signal->rlim[resource];
01521 task_unlock(current->group_leader);
01522 if (x.rlim_cur > 0x7FFFFFFF)
01523 x.rlim_cur = 0x7FFFFFFF;
01524 if (x.rlim_max > 0x7FFFFFFF)
01525 x.rlim_max = 0x7FFFFFFF;
01526 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
01527 }
01528
01529 #endif
01530
01531 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
01532 {
01533 struct rlimit new_rlim, *old_rlim;
01534 int retval;
01535
01536 if (resource >= RLIM_NLIMITS)
01537 return -EINVAL;
01538 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
01539 return -EFAULT;
01540 if (new_rlim.rlim_cur > new_rlim.rlim_max)
01541 return -EINVAL;
01542 old_rlim = current->signal->rlim + resource;
01543 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
01544 !capable(CAP_SYS_RESOURCE))
01545 return -EPERM;
01546 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
01547 return -EPERM;
01548
01549 retval = security_task_setrlimit(resource, &new_rlim);
01550 if (retval)
01551 return retval;
01552
01553 if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
01554
01555
01556
01557
01558
01559
01560 new_rlim.rlim_cur = 1;
01561 }
01562
01563 task_lock(current->group_leader);
01564 *old_rlim = new_rlim;
01565 task_unlock(current->group_leader);
01566
01567 if (resource != RLIMIT_CPU)
01568 goto out;
01569
01570
01571
01572
01573
01574
01575
01576 if (new_rlim.rlim_cur == RLIM_INFINITY)
01577 goto out;
01578
01579 update_rlimit_cpu(new_rlim.rlim_cur);
01580 out:
01581 return 0;
01582 }
01583
01584
01585
01586
01587
01588
01589
01590
01591
01592
01593
01594
01595
01596
01597
01598
01599
01600
01601
01602
01603
01604
01605
01606
01607
01608
01609
01610
01611
01612
01613
01614
01615
01616
01617 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
01618 {
01619 r->ru_nvcsw += t->nvcsw;
01620 r->ru_nivcsw += t->nivcsw;
01621 r->ru_minflt += t->min_flt;
01622 r->ru_majflt += t->maj_flt;
01623 r->ru_inblock += task_io_get_inblock(t);
01624 r->ru_oublock += task_io_get_oublock(t);
01625 }
01626
01627 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
01628 {
01629 struct task_struct *t;
01630 unsigned long flags;
01631 cputime_t utime, stime;
01632 struct task_cputime cputime;
01633
01634 memset((char *) r, 0, sizeof *r);
01635 utime = stime = cputime_zero;
01636
01637 if (who == RUSAGE_THREAD) {
01638 utime = task_utime(current);
01639 stime = task_stime(current);
01640 accumulate_thread_rusage(p, r);
01641 goto out;
01642 }
01643
01644 if (!lock_task_sighand(p, &flags))
01645 return;
01646
01647 switch (who) {
01648 case RUSAGE_BOTH:
01649 case RUSAGE_CHILDREN:
01650 utime = p->signal->cutime;
01651 stime = p->signal->cstime;
01652 r->ru_nvcsw = p->signal->cnvcsw;
01653 r->ru_nivcsw = p->signal->cnivcsw;
01654 r->ru_minflt = p->signal->cmin_flt;
01655 r->ru_majflt = p->signal->cmaj_flt;
01656 r->ru_inblock = p->signal->cinblock;
01657 r->ru_oublock = p->signal->coublock;
01658
01659 if (who == RUSAGE_CHILDREN)
01660 break;
01661
01662 case RUSAGE_SELF:
01663 thread_group_cputime(p, &cputime);
01664 utime = cputime_add(utime, cputime.utime);
01665 stime = cputime_add(stime, cputime.stime);
01666 r->ru_nvcsw += p->signal->nvcsw;
01667 r->ru_nivcsw += p->signal->nivcsw;
01668 r->ru_minflt += p->signal->min_flt;
01669 r->ru_majflt += p->signal->maj_flt;
01670 r->ru_inblock += p->signal->inblock;
01671 r->ru_oublock += p->signal->oublock;
01672 t = p;
01673 do {
01674 accumulate_thread_rusage(t, r);
01675 t = next_thread(t);
01676 } while (t != p);
01677 break;
01678
01679 default:
01680 BUG();
01681 }
01682 unlock_task_sighand(p, &flags);
01683
01684 out:
01685 cputime_to_timeval(utime, &r->ru_utime);
01686 cputime_to_timeval(stime, &r->ru_stime);
01687 }
01688
01689 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
01690 {
01691 struct rusage r;
01692 k_getrusage(p, who, &r);
01693 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
01694 }
01695
01696 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
01697 {
01698 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
01699 who != RUSAGE_THREAD)
01700 return -EINVAL;
01701 return getrusage(current, who, ru);
01702 }
01703
01704 SYSCALL_DEFINE1(umask, int, mask)
01705 {
01706 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
01707 return mask;
01708 }
01709
01710 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
01711 unsigned long, arg4, unsigned long, arg5)
01712 {
01713 struct task_struct *me = current;
01714 unsigned char comm[sizeof(me->comm)];
01715 long error;
01716
01717 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
01718 if (error != -ENOSYS)
01719 return error;
01720
01721 error = 0;
01722 switch (option) {
01723 case PR_SET_PDEATHSIG:
01724 if (!valid_signal(arg2)) {
01725 error = -EINVAL;
01726 break;
01727 }
01728 me->pdeath_signal = arg2;
01729 error = 0;
01730 break;
01731 case PR_GET_PDEATHSIG:
01732 error = put_user(me->pdeath_signal, (int __user *)arg2);
01733 break;
01734 case PR_GET_DUMPABLE:
01735 error = get_dumpable(me->mm);
01736 break;
01737 case PR_SET_DUMPABLE:
01738 if (arg2 < 0 || arg2 > 1) {
01739 error = -EINVAL;
01740 break;
01741 }
01742 set_dumpable(me->mm, arg2);
01743 error = 0;
01744 break;
01745
01746 case PR_SET_UNALIGN:
01747 error = SET_UNALIGN_CTL(me, arg2);
01748 break;
01749 case PR_GET_UNALIGN:
01750 error = GET_UNALIGN_CTL(me, arg2);
01751 break;
01752 case PR_SET_FPEMU:
01753 error = SET_FPEMU_CTL(me, arg2);
01754 break;
01755 case PR_GET_FPEMU:
01756 error = GET_FPEMU_CTL(me, arg2);
01757 break;
01758 case PR_SET_FPEXC:
01759 error = SET_FPEXC_CTL(me, arg2);
01760 break;
01761 case PR_GET_FPEXC:
01762 error = GET_FPEXC_CTL(me, arg2);
01763 break;
01764 case PR_GET_TIMING:
01765 error = PR_TIMING_STATISTICAL;
01766 break;
01767 case PR_SET_TIMING:
01768 if (arg2 != PR_TIMING_STATISTICAL)
01769 error = -EINVAL;
01770 else
01771 error = 0;
01772 break;
01773
01774 case PR_SET_NAME:
01775 comm[sizeof(me->comm)-1] = 0;
01776 if (strncpy_from_user(comm, (char __user *)arg2,
01777 sizeof(me->comm) - 1) < 0)
01778 return -EFAULT;
01779 set_task_comm(me, comm);
01780 return 0;
01781 case PR_GET_NAME:
01782 get_task_comm(comm, me);
01783 if (copy_to_user((char __user *)arg2, comm,
01784 sizeof(comm)))
01785 return -EFAULT;
01786 return 0;
01787 case PR_GET_ENDIAN:
01788 error = GET_ENDIAN(me, arg2);
01789 break;
01790 case PR_SET_ENDIAN:
01791 error = SET_ENDIAN(me, arg2);
01792 break;
01793
01794 case PR_GET_SECCOMP:
01795 error = prctl_get_seccomp();
01796 break;
01797 case PR_SET_SECCOMP:
01798 error = prctl_set_seccomp(arg2);
01799 break;
01800 case PR_GET_TSC:
01801 error = GET_TSC_CTL(arg2);
01802 break;
01803 case PR_SET_TSC:
01804 error = SET_TSC_CTL(arg2);
01805 break;
01806 case PR_GET_TIMERSLACK:
01807 error = current->timer_slack_ns;
01808 break;
01809 case PR_SET_TIMERSLACK:
01810 if (arg2 <= 0)
01811 current->timer_slack_ns =
01812 current->default_timer_slack_ns;
01813 else
01814 current->timer_slack_ns = arg2;
01815 error = 0;
01816 break;
01817 default:
01818 error = -EINVAL;
01819 break;
01820 }
01821 return error;
01822 }
01823
01824 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
01825 struct getcpu_cache __user *, unused)
01826 {
01827 int err = 0;
01828 int cpu = raw_smp_processor_id();
01829 if (cpup)
01830 err |= put_user(cpu, cpup);
01831 if (nodep)
01832 err |= put_user(cpu_to_node(cpu), nodep);
01833 return err ? -EFAULT : 0;
01834 }
01835
01836 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
01837
01838 static void argv_cleanup(char **argv, char **envp)
01839 {
01840 argv_free(argv);
01841 }
01842
01843
01844
01845
01846
01847
01848
01849
01850 int orderly_poweroff(bool force)
01851 {
01852 int argc;
01853 char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
01854 static char *envp[] = {
01855 "HOME=/",
01856 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
01857 NULL
01858 };
01859 int ret = -ENOMEM;
01860 struct subprocess_info *info;
01861
01862 if (argv == NULL) {
01863 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
01864 __func__, poweroff_cmd);
01865 goto out;
01866 }
01867
01868 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
01869 if (info == NULL) {
01870 argv_free(argv);
01871 goto out;
01872 }
01873
01874 call_usermodehelper_setcleanup(info, argv_cleanup);
01875
01876 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
01877
01878 out:
01879 if (ret && force) {
01880 printk(KERN_WARNING "Failed to start orderly shutdown: "
01881 "forcing the issue\n");
01882
01883
01884
01885
01886 emergency_sync();
01887 kernel_power_off();
01888 }
01889
01890 return ret;
01891 }
01892 EXPORT_SYMBOL_GPL(orderly_poweroff);
01893 #endif