diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 11 | ||||
-rw-r--r-- | kernel/fork.c | 66 | ||||
-rw-r--r-- | kernel/kmod.c | 11 | ||||
-rw-r--r-- | kernel/kprobes.c | 3 | ||||
-rw-r--r-- | kernel/pid.c | 14 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 4 | ||||
-rw-r--r-- | kernel/ptrace.c | 44 | ||||
-rw-r--r-- | kernel/rcutree.c | 2 | ||||
-rw-r--r-- | kernel/resource.c | 1 | ||||
-rw-r--r-- | kernel/sys.c | 18 |
10 files changed, 107 insertions, 67 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 6a057750ebbb..fafe75d9e6f6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -312,17 +312,6 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) | |||
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | void __set_special_pids(struct pid *pid) | ||
316 | { | ||
317 | struct task_struct *curr = current->group_leader; | ||
318 | |||
319 | if (task_session(curr) != pid) | ||
320 | change_pid(curr, PIDTYPE_SID, pid); | ||
321 | |||
322 | if (task_pgrp(curr) != pid) | ||
323 | change_pid(curr, PIDTYPE_PGID, pid); | ||
324 | } | ||
325 | |||
326 | /* | 315 | /* |
327 | * Let kernel threads use this to say that they allow a certain signal. | 316 | * Let kernel threads use this to say that they allow a certain signal. |
328 | * Must not be used if kthread was cloned with CLONE_SIGHAND. | 317 | * Must not be used if kthread was cloned with CLONE_SIGHAND. |
diff --git a/kernel/fork.c b/kernel/fork.c index 987b28a1f01b..6e6a1c11b3e5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1121,6 +1121,12 @@ static void posix_cpu_timers_init(struct task_struct *tsk) | |||
1121 | INIT_LIST_HEAD(&tsk->cpu_timers[2]); | 1121 | INIT_LIST_HEAD(&tsk->cpu_timers[2]); |
1122 | } | 1122 | } |
1123 | 1123 | ||
1124 | static inline void | ||
1125 | init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) | ||
1126 | { | ||
1127 | task->pids[type].pid = pid; | ||
1128 | } | ||
1129 | |||
1124 | /* | 1130 | /* |
1125 | * This creates a new process as a copy of the old one, | 1131 | * This creates a new process as a copy of the old one, |
1126 | * but does not actually start it yet. | 1132 | * but does not actually start it yet. |
@@ -1199,8 +1205,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1199 | retval = -EAGAIN; | 1205 | retval = -EAGAIN; |
1200 | if (atomic_read(&p->real_cred->user->processes) >= | 1206 | if (atomic_read(&p->real_cred->user->processes) >= |
1201 | task_rlimit(p, RLIMIT_NPROC)) { | 1207 | task_rlimit(p, RLIMIT_NPROC)) { |
1202 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 1208 | if (p->real_cred->user != INIT_USER && |
1203 | p->real_cred->user != INIT_USER) | 1209 | !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) |
1204 | goto bad_fork_free; | 1210 | goto bad_fork_free; |
1205 | } | 1211 | } |
1206 | current->flags &= ~PF_NPROC_EXCEEDED; | 1212 | current->flags &= ~PF_NPROC_EXCEEDED; |
@@ -1354,11 +1360,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1354 | goto bad_fork_cleanup_io; | 1360 | goto bad_fork_cleanup_io; |
1355 | } | 1361 | } |
1356 | 1362 | ||
1357 | p->pid = pid_nr(pid); | ||
1358 | p->tgid = p->pid; | ||
1359 | if (clone_flags & CLONE_THREAD) | ||
1360 | p->tgid = current->tgid; | ||
1361 | |||
1362 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1363 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
1363 | /* | 1364 | /* |
1364 | * Clear TID on mm_release()? | 1365 | * Clear TID on mm_release()? |
@@ -1394,12 +1395,19 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1394 | clear_all_latency_tracing(p); | 1395 | clear_all_latency_tracing(p); |
1395 | 1396 | ||
1396 | /* ok, now we should be set up.. */ | 1397 | /* ok, now we should be set up.. */ |
1397 | if (clone_flags & CLONE_THREAD) | 1398 | p->pid = pid_nr(pid); |
1399 | if (clone_flags & CLONE_THREAD) { | ||
1398 | p->exit_signal = -1; | 1400 | p->exit_signal = -1; |
1399 | else if (clone_flags & CLONE_PARENT) | 1401 | p->group_leader = current->group_leader; |
1400 | p->exit_signal = current->group_leader->exit_signal; | 1402 | p->tgid = current->tgid; |
1401 | else | 1403 | } else { |
1402 | p->exit_signal = (clone_flags & CSIGNAL); | 1404 | if (clone_flags & CLONE_PARENT) |
1405 | p->exit_signal = current->group_leader->exit_signal; | ||
1406 | else | ||
1407 | p->exit_signal = (clone_flags & CSIGNAL); | ||
1408 | p->group_leader = p; | ||
1409 | p->tgid = p->pid; | ||
1410 | } | ||
1403 | 1411 | ||
1404 | p->pdeath_signal = 0; | 1412 | p->pdeath_signal = 0; |
1405 | p->exit_state = 0; | 1413 | p->exit_state = 0; |
@@ -1408,15 +1416,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1408 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); | 1416 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); |
1409 | p->dirty_paused_when = 0; | 1417 | p->dirty_paused_when = 0; |
1410 | 1418 | ||
1411 | /* | ||
1412 | * Ok, make it visible to the rest of the system. | ||
1413 | * We dont wake it up yet. | ||
1414 | */ | ||
1415 | p->group_leader = p; | ||
1416 | INIT_LIST_HEAD(&p->thread_group); | 1419 | INIT_LIST_HEAD(&p->thread_group); |
1417 | p->task_works = NULL; | 1420 | p->task_works = NULL; |
1418 | 1421 | ||
1419 | /* Need tasklist lock for parent etc handling! */ | 1422 | /* |
1423 | * Make it visible to the rest of the system, but dont wake it up yet. | ||
1424 | * Need tasklist lock for parent etc handling! | ||
1425 | */ | ||
1420 | write_lock_irq(&tasklist_lock); | 1426 | write_lock_irq(&tasklist_lock); |
1421 | 1427 | ||
1422 | /* CLONE_PARENT re-uses the old parent */ | 1428 | /* CLONE_PARENT re-uses the old parent */ |
@@ -1446,18 +1452,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1446 | goto bad_fork_free_pid; | 1452 | goto bad_fork_free_pid; |
1447 | } | 1453 | } |
1448 | 1454 | ||
1449 | if (clone_flags & CLONE_THREAD) { | ||
1450 | current->signal->nr_threads++; | ||
1451 | atomic_inc(¤t->signal->live); | ||
1452 | atomic_inc(¤t->signal->sigcnt); | ||
1453 | p->group_leader = current->group_leader; | ||
1454 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); | ||
1455 | } | ||
1456 | |||
1457 | if (likely(p->pid)) { | 1455 | if (likely(p->pid)) { |
1458 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); | 1456 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
1459 | 1457 | ||
1458 | init_task_pid(p, PIDTYPE_PID, pid); | ||
1460 | if (thread_group_leader(p)) { | 1459 | if (thread_group_leader(p)) { |
1460 | init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); | ||
1461 | init_task_pid(p, PIDTYPE_SID, task_session(current)); | ||
1462 | |||
1461 | if (is_child_reaper(pid)) { | 1463 | if (is_child_reaper(pid)) { |
1462 | ns_of_pid(pid)->child_reaper = p; | 1464 | ns_of_pid(pid)->child_reaper = p; |
1463 | p->signal->flags |= SIGNAL_UNKILLABLE; | 1465 | p->signal->flags |= SIGNAL_UNKILLABLE; |
@@ -1465,13 +1467,19 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1465 | 1467 | ||
1466 | p->signal->leader_pid = pid; | 1468 | p->signal->leader_pid = pid; |
1467 | p->signal->tty = tty_kref_get(current->signal->tty); | 1469 | p->signal->tty = tty_kref_get(current->signal->tty); |
1468 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | ||
1469 | attach_pid(p, PIDTYPE_SID, task_session(current)); | ||
1470 | list_add_tail(&p->sibling, &p->real_parent->children); | 1470 | list_add_tail(&p->sibling, &p->real_parent->children); |
1471 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1471 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
1472 | attach_pid(p, PIDTYPE_PGID); | ||
1473 | attach_pid(p, PIDTYPE_SID); | ||
1472 | __this_cpu_inc(process_counts); | 1474 | __this_cpu_inc(process_counts); |
1475 | } else { | ||
1476 | current->signal->nr_threads++; | ||
1477 | atomic_inc(¤t->signal->live); | ||
1478 | atomic_inc(¤t->signal->sigcnt); | ||
1479 | list_add_tail_rcu(&p->thread_group, | ||
1480 | &p->group_leader->thread_group); | ||
1473 | } | 1481 | } |
1474 | attach_pid(p, PIDTYPE_PID, pid); | 1482 | attach_pid(p, PIDTYPE_PID); |
1475 | nr_threads++; | 1483 | nr_threads++; |
1476 | } | 1484 | } |
1477 | 1485 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index 8241906c4b61..fb326365b694 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -147,6 +147,9 @@ int __request_module(bool wait, const char *fmt, ...) | |||
147 | */ | 147 | */ |
148 | WARN_ON_ONCE(wait && current_is_async()); | 148 | WARN_ON_ONCE(wait && current_is_async()); |
149 | 149 | ||
150 | if (!modprobe_path[0]) | ||
151 | return 0; | ||
152 | |||
150 | va_start(args, fmt); | 153 | va_start(args, fmt); |
151 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); | 154 | ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); |
152 | va_end(args); | 155 | va_end(args); |
@@ -569,14 +572,6 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) | |||
569 | int retval = 0; | 572 | int retval = 0; |
570 | 573 | ||
571 | helper_lock(); | 574 | helper_lock(); |
572 | if (!sub_info->path) { | ||
573 | retval = -EINVAL; | ||
574 | goto out; | ||
575 | } | ||
576 | |||
577 | if (sub_info->path[0] == '\0') | ||
578 | goto out; | ||
579 | |||
580 | if (!khelper_wq || usermodehelper_disabled) { | 575 | if (!khelper_wq || usermodehelper_disabled) { |
581 | retval = -EBUSY; | 576 | retval = -EBUSY; |
582 | goto out; | 577 | goto out; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index bddf3b201a48..6e33498d665c 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -2332,6 +2332,7 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
2332 | if (copy_from_user(buf, user_buf, buf_size)) | 2332 | if (copy_from_user(buf, user_buf, buf_size)) |
2333 | return -EFAULT; | 2333 | return -EFAULT; |
2334 | 2334 | ||
2335 | buf[buf_size] = '\0'; | ||
2335 | switch (buf[0]) { | 2336 | switch (buf[0]) { |
2336 | case 'y': | 2337 | case 'y': |
2337 | case 'Y': | 2338 | case 'Y': |
@@ -2343,6 +2344,8 @@ static ssize_t write_enabled_file_bool(struct file *file, | |||
2343 | case '0': | 2344 | case '0': |
2344 | disarm_all_kprobes(); | 2345 | disarm_all_kprobes(); |
2345 | break; | 2346 | break; |
2347 | default: | ||
2348 | return -EINVAL; | ||
2346 | } | 2349 | } |
2347 | 2350 | ||
2348 | return count; | 2351 | return count; |
diff --git a/kernel/pid.c b/kernel/pid.c index 0db3e791a06d..66505c1dfc51 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -75,6 +75,7 @@ struct pid_namespace init_pid_ns = { | |||
75 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } | 75 | [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } |
76 | }, | 76 | }, |
77 | .last_pid = 0, | 77 | .last_pid = 0, |
78 | .nr_hashed = PIDNS_HASH_ADDING, | ||
78 | .level = 0, | 79 | .level = 0, |
79 | .child_reaper = &init_task, | 80 | .child_reaper = &init_task, |
80 | .user_ns = &init_user_ns, | 81 | .user_ns = &init_user_ns, |
@@ -373,14 +374,10 @@ EXPORT_SYMBOL_GPL(find_vpid); | |||
373 | /* | 374 | /* |
374 | * attach_pid() must be called with the tasklist_lock write-held. | 375 | * attach_pid() must be called with the tasklist_lock write-held. |
375 | */ | 376 | */ |
376 | void attach_pid(struct task_struct *task, enum pid_type type, | 377 | void attach_pid(struct task_struct *task, enum pid_type type) |
377 | struct pid *pid) | ||
378 | { | 378 | { |
379 | struct pid_link *link; | 379 | struct pid_link *link = &task->pids[type]; |
380 | 380 | hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); | |
381 | link = &task->pids[type]; | ||
382 | link->pid = pid; | ||
383 | hlist_add_head_rcu(&link->node, &pid->tasks[type]); | ||
384 | } | 381 | } |
385 | 382 | ||
386 | static void __change_pid(struct task_struct *task, enum pid_type type, | 383 | static void __change_pid(struct task_struct *task, enum pid_type type, |
@@ -412,7 +409,7 @@ void change_pid(struct task_struct *task, enum pid_type type, | |||
412 | struct pid *pid) | 409 | struct pid *pid) |
413 | { | 410 | { |
414 | __change_pid(task, type, pid); | 411 | __change_pid(task, type, pid); |
415 | attach_pid(task, type, pid); | 412 | attach_pid(task, type); |
416 | } | 413 | } |
417 | 414 | ||
418 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ | 415 | /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ |
@@ -594,7 +591,6 @@ void __init pidmap_init(void) | |||
594 | /* Reserve PID 0. We never call free_pidmap(0) */ | 591 | /* Reserve PID 0. We never call free_pidmap(0) */ |
595 | set_bit(0, init_pid_ns.pidmap[0].page); | 592 | set_bit(0, init_pid_ns.pidmap[0].page); |
596 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); | 593 | atomic_dec(&init_pid_ns.pidmap[0].nr_free); |
597 | init_pid_ns.nr_hashed = PIDNS_HASH_ADDING; | ||
598 | 594 | ||
599 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, | 595 | init_pid_ns.pid_cachep = KMEM_CACHE(pid, |
600 | SLAB_HWCACHE_ALIGN | SLAB_PANIC); | 596 | SLAB_HWCACHE_ALIGN | SLAB_PANIC); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 7872a35eafe7..349587bb03e1 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1652,7 +1652,7 @@ unsigned long snapshot_get_image_size(void) | |||
1652 | static int init_header(struct swsusp_info *info) | 1652 | static int init_header(struct swsusp_info *info) |
1653 | { | 1653 | { |
1654 | memset(info, 0, sizeof(struct swsusp_info)); | 1654 | memset(info, 0, sizeof(struct swsusp_info)); |
1655 | info->num_physpages = num_physpages; | 1655 | info->num_physpages = get_num_physpages(); |
1656 | info->image_pages = nr_copy_pages; | 1656 | info->image_pages = nr_copy_pages; |
1657 | info->pages = snapshot_get_image_size(); | 1657 | info->pages = snapshot_get_image_size(); |
1658 | info->size = info->pages; | 1658 | info->size = info->pages; |
@@ -1796,7 +1796,7 @@ static int check_header(struct swsusp_info *info) | |||
1796 | char *reason; | 1796 | char *reason; |
1797 | 1797 | ||
1798 | reason = check_image_kernel(info); | 1798 | reason = check_image_kernel(info); |
1799 | if (!reason && info->num_physpages != num_physpages) | 1799 | if (!reason && info->num_physpages != get_num_physpages()) |
1800 | reason = "memory size"; | 1800 | reason = "memory size"; |
1801 | if (reason) { | 1801 | if (reason) { |
1802 | printk(KERN_ERR "PM: Image mismatch: %s\n", reason); | 1802 | printk(KERN_ERR "PM: Image mismatch: %s\n", reason); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 335a7ae697f5..ba5e6cea181a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -844,6 +844,47 @@ int ptrace_request(struct task_struct *child, long request, | |||
844 | ret = ptrace_setsiginfo(child, &siginfo); | 844 | ret = ptrace_setsiginfo(child, &siginfo); |
845 | break; | 845 | break; |
846 | 846 | ||
847 | case PTRACE_GETSIGMASK: | ||
848 | if (addr != sizeof(sigset_t)) { | ||
849 | ret = -EINVAL; | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) | ||
854 | ret = -EFAULT; | ||
855 | else | ||
856 | ret = 0; | ||
857 | |||
858 | break; | ||
859 | |||
860 | case PTRACE_SETSIGMASK: { | ||
861 | sigset_t new_set; | ||
862 | |||
863 | if (addr != sizeof(sigset_t)) { | ||
864 | ret = -EINVAL; | ||
865 | break; | ||
866 | } | ||
867 | |||
868 | if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) { | ||
869 | ret = -EFAULT; | ||
870 | break; | ||
871 | } | ||
872 | |||
873 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); | ||
874 | |||
875 | /* | ||
876 | * Every thread does recalc_sigpending() after resume, so | ||
877 | * retarget_shared_pending() and recalc_sigpending() are not | ||
878 | * called here. | ||
879 | */ | ||
880 | spin_lock_irq(&child->sighand->siglock); | ||
881 | child->blocked = new_set; | ||
882 | spin_unlock_irq(&child->sighand->siglock); | ||
883 | |||
884 | ret = 0; | ||
885 | break; | ||
886 | } | ||
887 | |||
847 | case PTRACE_INTERRUPT: | 888 | case PTRACE_INTERRUPT: |
848 | /* | 889 | /* |
849 | * Stop tracee without any side-effect on signal or job | 890 | * Stop tracee without any side-effect on signal or job |
@@ -948,8 +989,7 @@ int ptrace_request(struct task_struct *child, long request, | |||
948 | 989 | ||
949 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK | 990 | #ifdef CONFIG_HAVE_ARCH_TRACEHOOK |
950 | case PTRACE_GETREGSET: | 991 | case PTRACE_GETREGSET: |
951 | case PTRACE_SETREGSET: | 992 | case PTRACE_SETREGSET: { |
952 | { | ||
953 | struct iovec kiov; | 993 | struct iovec kiov; |
954 | struct iovec __user *uiov = datavp; | 994 | struct iovec __user *uiov = datavp; |
955 | 995 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index cf3adc6fe001..e08abb9461ac 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -3026,7 +3026,7 @@ static int __init rcu_spawn_gp_kthread(void) | |||
3026 | struct task_struct *t; | 3026 | struct task_struct *t; |
3027 | 3027 | ||
3028 | for_each_rcu_flavor(rsp) { | 3028 | for_each_rcu_flavor(rsp) { |
3029 | t = kthread_run(rcu_gp_kthread, rsp, rsp->name); | 3029 | t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name); |
3030 | BUG_ON(IS_ERR(t)); | 3030 | BUG_ON(IS_ERR(t)); |
3031 | rnp = rcu_get_root(rsp); | 3031 | rnp = rcu_get_root(rsp); |
3032 | raw_spin_lock_irqsave(&rnp->lock, flags); | 3032 | raw_spin_lock_irqsave(&rnp->lock, flags); |
diff --git a/kernel/resource.c b/kernel/resource.c index 77bf11a86c7d..3f285dce9347 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -449,7 +449,6 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
449 | struct resource *this = root->child; | 449 | struct resource *this = root->child; |
450 | struct resource tmp = *new, avail, alloc; | 450 | struct resource tmp = *new, avail, alloc; |
451 | 451 | ||
452 | tmp.flags = new->flags; | ||
453 | tmp.start = root->start; | 452 | tmp.start = root->start; |
454 | /* | 453 | /* |
455 | * Skip past an allocated resource that starts at 0, since the assignment | 454 | * Skip past an allocated resource that starts at 0, since the assignment |
diff --git a/kernel/sys.c b/kernel/sys.c index 2bbd9a73b54c..071de900c824 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -511,7 +511,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | |||
511 | case LINUX_REBOOT_CMD_HALT: | 511 | case LINUX_REBOOT_CMD_HALT: |
512 | kernel_halt(); | 512 | kernel_halt(); |
513 | do_exit(0); | 513 | do_exit(0); |
514 | panic("cannot halt"); | 514 | panic("cannot halt.\n"); |
515 | 515 | ||
516 | case LINUX_REBOOT_CMD_POWER_OFF: | 516 | case LINUX_REBOOT_CMD_POWER_OFF: |
517 | kernel_power_off(); | 517 | kernel_power_off(); |
@@ -1309,6 +1309,17 @@ out: | |||
1309 | return retval; | 1309 | return retval; |
1310 | } | 1310 | } |
1311 | 1311 | ||
1312 | static void set_special_pids(struct pid *pid) | ||
1313 | { | ||
1314 | struct task_struct *curr = current->group_leader; | ||
1315 | |||
1316 | if (task_session(curr) != pid) | ||
1317 | change_pid(curr, PIDTYPE_SID, pid); | ||
1318 | |||
1319 | if (task_pgrp(curr) != pid) | ||
1320 | change_pid(curr, PIDTYPE_PGID, pid); | ||
1321 | } | ||
1322 | |||
1312 | SYSCALL_DEFINE0(setsid) | 1323 | SYSCALL_DEFINE0(setsid) |
1313 | { | 1324 | { |
1314 | struct task_struct *group_leader = current->group_leader; | 1325 | struct task_struct *group_leader = current->group_leader; |
@@ -1328,7 +1339,7 @@ SYSCALL_DEFINE0(setsid) | |||
1328 | goto out; | 1339 | goto out; |
1329 | 1340 | ||
1330 | group_leader->signal->leader = 1; | 1341 | group_leader->signal->leader = 1; |
1331 | __set_special_pids(sid); | 1342 | set_special_pids(sid); |
1332 | 1343 | ||
1333 | proc_clear_tty(group_leader); | 1344 | proc_clear_tty(group_leader); |
1334 | 1345 | ||
@@ -2355,8 +2366,7 @@ static int do_sysinfo(struct sysinfo *info) | |||
2355 | 2366 | ||
2356 | memset(info, 0, sizeof(struct sysinfo)); | 2367 | memset(info, 0, sizeof(struct sysinfo)); |
2357 | 2368 | ||
2358 | ktime_get_ts(&tp); | 2369 | get_monotonic_boottime(&tp); |
2359 | monotonic_to_bootbased(&tp); | ||
2360 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | 2370 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
2361 | 2371 | ||
2362 | get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); | 2372 | get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); |