aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sys.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sys.c')
-rw-r--r--kernel/sys.c168
1 files changed, 75 insertions, 93 deletions
diff --git a/kernel/sys.c b/kernel/sys.c
index 14e97282eb6c..31deba8f7d16 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -169,9 +169,9 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
169 pgrp = find_vpid(who); 169 pgrp = find_vpid(who);
170 else 170 else
171 pgrp = task_pgrp(current); 171 pgrp = task_pgrp(current);
172 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 172 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
173 error = set_one_prio(p, niceval, error); 173 error = set_one_prio(p, niceval, error);
174 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 174 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
175 break; 175 break;
176 case PRIO_USER: 176 case PRIO_USER:
177 user = current->user; 177 user = current->user;
@@ -229,11 +229,11 @@ asmlinkage long sys_getpriority(int which, int who)
229 pgrp = find_vpid(who); 229 pgrp = find_vpid(who);
230 else 230 else
231 pgrp = task_pgrp(current); 231 pgrp = task_pgrp(current);
232 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 232 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
233 niceval = 20 - task_nice(p); 233 niceval = 20 - task_nice(p);
234 if (niceval > retval) 234 if (niceval > retval)
235 retval = niceval; 235 retval = niceval;
236 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 236 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
237 break; 237 break;
238 case PRIO_USER: 238 case PRIO_USER:
239 user = current->user; 239 user = current->user;
@@ -274,7 +274,7 @@ void emergency_restart(void)
274} 274}
275EXPORT_SYMBOL_GPL(emergency_restart); 275EXPORT_SYMBOL_GPL(emergency_restart);
276 276
277static void kernel_restart_prepare(char *cmd) 277void kernel_restart_prepare(char *cmd)
278{ 278{
279 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 279 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
280 system_state = SYSTEM_RESTART; 280 system_state = SYSTEM_RESTART;
@@ -301,26 +301,6 @@ void kernel_restart(char *cmd)
301} 301}
302EXPORT_SYMBOL_GPL(kernel_restart); 302EXPORT_SYMBOL_GPL(kernel_restart);
303 303
304/**
305 * kernel_kexec - reboot the system
306 *
307 * Move into place and start executing a preloaded standalone
308 * executable. If nothing was preloaded return an error.
309 */
310static void kernel_kexec(void)
311{
312#ifdef CONFIG_KEXEC
313 struct kimage *image;
314 image = xchg(&kexec_image, NULL);
315 if (!image)
316 return;
317 kernel_restart_prepare(NULL);
318 printk(KERN_EMERG "Starting new kernel\n");
319 machine_shutdown();
320 machine_kexec(image);
321#endif
322}
323
324static void kernel_shutdown_prepare(enum system_states state) 304static void kernel_shutdown_prepare(enum system_states state)
325{ 305{
326 blocking_notifier_call_chain(&reboot_notifier_list, 306 blocking_notifier_call_chain(&reboot_notifier_list,
@@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
425 kernel_restart(buffer); 405 kernel_restart(buffer);
426 break; 406 break;
427 407
408#ifdef CONFIG_KEXEC
428 case LINUX_REBOOT_CMD_KEXEC: 409 case LINUX_REBOOT_CMD_KEXEC:
429 kernel_kexec(); 410 {
430 unlock_kernel(); 411 int ret;
431 return -EINVAL; 412 ret = kernel_kexec();
413 unlock_kernel();
414 return ret;
415 }
416#endif
432 417
433#ifdef CONFIG_HIBERNATION 418#ifdef CONFIG_HIBERNATION
434 case LINUX_REBOOT_CMD_SW_SUSPEND: 419 case LINUX_REBOOT_CMD_SW_SUSPEND:
@@ -868,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid)
868 return old_fsgid; 853 return old_fsgid;
869} 854}
870 855
856void do_sys_times(struct tms *tms)
857{
858 struct task_cputime cputime;
859 cputime_t cutime, cstime;
860
861 spin_lock_irq(&current->sighand->siglock);
862 thread_group_cputime(current, &cputime);
863 cutime = current->signal->cutime;
864 cstime = current->signal->cstime;
865 spin_unlock_irq(&current->sighand->siglock);
866 tms->tms_utime = cputime_to_clock_t(cputime.utime);
867 tms->tms_stime = cputime_to_clock_t(cputime.stime);
868 tms->tms_cutime = cputime_to_clock_t(cutime);
869 tms->tms_cstime = cputime_to_clock_t(cstime);
870}
871
871asmlinkage long sys_times(struct tms __user * tbuf) 872asmlinkage long sys_times(struct tms __user * tbuf)
872{ 873{
873 /*
874 * In the SMP world we might just be unlucky and have one of
875 * the times increment as we use it. Since the value is an
876 * atomically safe type this is just fine. Conceptually its
877 * as if the syscall took an instant longer to occur.
878 */
879 if (tbuf) { 874 if (tbuf) {
880 struct tms tmp; 875 struct tms tmp;
881 struct task_struct *tsk = current; 876
882 struct task_struct *t; 877 do_sys_times(&tmp);
883 cputime_t utime, stime, cutime, cstime;
884
885 spin_lock_irq(&tsk->sighand->siglock);
886 utime = tsk->signal->utime;
887 stime = tsk->signal->stime;
888 t = tsk;
889 do {
890 utime = cputime_add(utime, t->utime);
891 stime = cputime_add(stime, t->stime);
892 t = next_thread(t);
893 } while (t != tsk);
894
895 cutime = tsk->signal->cutime;
896 cstime = tsk->signal->cstime;
897 spin_unlock_irq(&tsk->sighand->siglock);
898
899 tmp.tms_utime = cputime_to_clock_t(utime);
900 tmp.tms_stime = cputime_to_clock_t(stime);
901 tmp.tms_cutime = cputime_to_clock_t(cutime);
902 tmp.tms_cstime = cputime_to_clock_t(cstime);
903 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 878 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
904 return -EFAULT; 879 return -EFAULT;
905 } 880 }
@@ -1075,9 +1050,7 @@ asmlinkage long sys_setsid(void)
1075 group_leader->signal->leader = 1; 1050 group_leader->signal->leader = 1;
1076 __set_special_pids(sid); 1051 __set_special_pids(sid);
1077 1052
1078 spin_lock(&group_leader->sighand->siglock); 1053 proc_clear_tty(group_leader);
1079 group_leader->signal->tty = NULL;
1080 spin_unlock(&group_leader->sighand->siglock);
1081 1054
1082 err = session; 1055 err = session;
1083out: 1056out:
@@ -1343,8 +1316,6 @@ EXPORT_SYMBOL(in_egroup_p);
1343 1316
1344DECLARE_RWSEM(uts_sem); 1317DECLARE_RWSEM(uts_sem);
1345 1318
1346EXPORT_SYMBOL(uts_sem);
1347
1348asmlinkage long sys_newuname(struct new_utsname __user * name) 1319asmlinkage long sys_newuname(struct new_utsname __user * name)
1349{ 1320{
1350 int errno = 0; 1321 int errno = 0;
@@ -1368,8 +1339,10 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1368 down_write(&uts_sem); 1339 down_write(&uts_sem);
1369 errno = -EFAULT; 1340 errno = -EFAULT;
1370 if (!copy_from_user(tmp, name, len)) { 1341 if (!copy_from_user(tmp, name, len)) {
1371 memcpy(utsname()->nodename, tmp, len); 1342 struct new_utsname *u = utsname();
1372 utsname()->nodename[len] = 0; 1343
1344 memcpy(u->nodename, tmp, len);
1345 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1373 errno = 0; 1346 errno = 0;
1374 } 1347 }
1375 up_write(&uts_sem); 1348 up_write(&uts_sem);
@@ -1381,15 +1354,17 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1381asmlinkage long sys_gethostname(char __user *name, int len) 1354asmlinkage long sys_gethostname(char __user *name, int len)
1382{ 1355{
1383 int i, errno; 1356 int i, errno;
1357 struct new_utsname *u;
1384 1358
1385 if (len < 0) 1359 if (len < 0)
1386 return -EINVAL; 1360 return -EINVAL;
1387 down_read(&uts_sem); 1361 down_read(&uts_sem);
1388 i = 1 + strlen(utsname()->nodename); 1362 u = utsname();
1363 i = 1 + strlen(u->nodename);
1389 if (i > len) 1364 if (i > len)
1390 i = len; 1365 i = len;
1391 errno = 0; 1366 errno = 0;
1392 if (copy_to_user(name, utsname()->nodename, i)) 1367 if (copy_to_user(name, u->nodename, i))
1393 errno = -EFAULT; 1368 errno = -EFAULT;
1394 up_read(&uts_sem); 1369 up_read(&uts_sem);
1395 return errno; 1370 return errno;
@@ -1414,8 +1389,10 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
1414 down_write(&uts_sem); 1389 down_write(&uts_sem);
1415 errno = -EFAULT; 1390 errno = -EFAULT;
1416 if (!copy_from_user(tmp, name, len)) { 1391 if (!copy_from_user(tmp, name, len)) {
1417 memcpy(utsname()->domainname, tmp, len); 1392 struct new_utsname *u = utsname();
1418 utsname()->domainname[len] = 0; 1393
1394 memcpy(u->domainname, tmp, len);
1395 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1419 errno = 0; 1396 errno = 0;
1420 } 1397 }
1421 up_write(&uts_sem); 1398 up_write(&uts_sem);
@@ -1462,21 +1439,28 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
1462asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1439asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1463{ 1440{
1464 struct rlimit new_rlim, *old_rlim; 1441 struct rlimit new_rlim, *old_rlim;
1465 unsigned long it_prof_secs;
1466 int retval; 1442 int retval;
1467 1443
1468 if (resource >= RLIM_NLIMITS) 1444 if (resource >= RLIM_NLIMITS)
1469 return -EINVAL; 1445 return -EINVAL;
1470 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1446 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1471 return -EFAULT; 1447 return -EFAULT;
1472 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1473 return -EINVAL;
1474 old_rlim = current->signal->rlim + resource; 1448 old_rlim = current->signal->rlim + resource;
1475 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1449 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1476 !capable(CAP_SYS_RESOURCE)) 1450 !capable(CAP_SYS_RESOURCE))
1477 return -EPERM; 1451 return -EPERM;
1478 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) 1452
1479 return -EPERM; 1453 if (resource == RLIMIT_NOFILE) {
1454 if (new_rlim.rlim_max == RLIM_INFINITY)
1455 new_rlim.rlim_max = sysctl_nr_open;
1456 if (new_rlim.rlim_cur == RLIM_INFINITY)
1457 new_rlim.rlim_cur = sysctl_nr_open;
1458 if (new_rlim.rlim_max > sysctl_nr_open)
1459 return -EPERM;
1460 }
1461
1462 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1463 return -EINVAL;
1480 1464
1481 retval = security_task_setrlimit(resource, &new_rlim); 1465 retval = security_task_setrlimit(resource, &new_rlim);
1482 if (retval) 1466 if (retval)
@@ -1508,18 +1492,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1508 if (new_rlim.rlim_cur == RLIM_INFINITY) 1492 if (new_rlim.rlim_cur == RLIM_INFINITY)
1509 goto out; 1493 goto out;
1510 1494
1511 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); 1495 update_rlimit_cpu(new_rlim.rlim_cur);
1512 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1513 unsigned long rlim_cur = new_rlim.rlim_cur;
1514 cputime_t cputime;
1515
1516 cputime = secs_to_cputime(rlim_cur);
1517 read_lock(&tasklist_lock);
1518 spin_lock_irq(&current->sighand->siglock);
1519 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
1520 spin_unlock_irq(&current->sighand->siglock);
1521 read_unlock(&tasklist_lock);
1522 }
1523out: 1496out:
1524 return 0; 1497 return 0;
1525} 1498}
@@ -1557,11 +1530,8 @@ out:
1557 * 1530 *
1558 */ 1531 */
1559 1532
1560static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r, 1533static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1561 cputime_t *utimep, cputime_t *stimep)
1562{ 1534{
1563 *utimep = cputime_add(*utimep, t->utime);
1564 *stimep = cputime_add(*stimep, t->stime);
1565 r->ru_nvcsw += t->nvcsw; 1535 r->ru_nvcsw += t->nvcsw;
1566 r->ru_nivcsw += t->nivcsw; 1536 r->ru_nivcsw += t->nivcsw;
1567 r->ru_minflt += t->min_flt; 1537 r->ru_minflt += t->min_flt;
@@ -1575,12 +1545,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1575 struct task_struct *t; 1545 struct task_struct *t;
1576 unsigned long flags; 1546 unsigned long flags;
1577 cputime_t utime, stime; 1547 cputime_t utime, stime;
1548 struct task_cputime cputime;
1578 1549
1579 memset((char *) r, 0, sizeof *r); 1550 memset((char *) r, 0, sizeof *r);
1580 utime = stime = cputime_zero; 1551 utime = stime = cputime_zero;
1581 1552
1582 if (who == RUSAGE_THREAD) { 1553 if (who == RUSAGE_THREAD) {
1583 accumulate_thread_rusage(p, r, &utime, &stime); 1554 accumulate_thread_rusage(p, r);
1584 goto out; 1555 goto out;
1585 } 1556 }
1586 1557
@@ -1603,8 +1574,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1603 break; 1574 break;
1604 1575
1605 case RUSAGE_SELF: 1576 case RUSAGE_SELF:
1606 utime = cputime_add(utime, p->signal->utime); 1577 thread_group_cputime(p, &cputime);
1607 stime = cputime_add(stime, p->signal->stime); 1578 utime = cputime_add(utime, cputime.utime);
1579 stime = cputime_add(stime, cputime.stime);
1608 r->ru_nvcsw += p->signal->nvcsw; 1580 r->ru_nvcsw += p->signal->nvcsw;
1609 r->ru_nivcsw += p->signal->nivcsw; 1581 r->ru_nivcsw += p->signal->nivcsw;
1610 r->ru_minflt += p->signal->min_flt; 1582 r->ru_minflt += p->signal->min_flt;
@@ -1613,7 +1585,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1613 r->ru_oublock += p->signal->oublock; 1585 r->ru_oublock += p->signal->oublock;
1614 t = p; 1586 t = p;
1615 do { 1587 do {
1616 accumulate_thread_rusage(t, r, &utime, &stime); 1588 accumulate_thread_rusage(t, r);
1617 t = next_thread(t); 1589 t = next_thread(t);
1618 } while (t != p); 1590 } while (t != p);
1619 break; 1591 break;
@@ -1744,6 +1716,16 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1744 case PR_SET_TSC: 1716 case PR_SET_TSC:
1745 error = SET_TSC_CTL(arg2); 1717 error = SET_TSC_CTL(arg2);
1746 break; 1718 break;
1719 case PR_GET_TIMERSLACK:
1720 error = current->timer_slack_ns;
1721 break;
1722 case PR_SET_TIMERSLACK:
1723 if (arg2 <= 0)
1724 current->timer_slack_ns =
1725 current->default_timer_slack_ns;
1726 else
1727 current->timer_slack_ns = arg2;
1728 break;
1747 default: 1729 default:
1748 error = -EINVAL; 1730 error = -EINVAL;
1749 break; 1731 break;
@@ -1795,7 +1777,7 @@ int orderly_poweroff(bool force)
1795 goto out; 1777 goto out;
1796 } 1778 }
1797 1779
1798 info = call_usermodehelper_setup(argv[0], argv, envp); 1780 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1799 if (info == NULL) { 1781 if (info == NULL) {
1800 argv_free(argv); 1782 argv_free(argv);
1801 goto out; 1783 goto out;