aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 21:34:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 21:34:42 -0500
commitb6da0076bab5a12afb19312ffee41c95490af2a0 (patch)
tree52a5675b9c2ff95d88b981d5b9a3822f6073c112 /kernel
parentcbfe0de303a55ed96d8831c2d5f56f8131cd6612 (diff)
parenta53b831549141aa060a8b54b76e3a42870d74cc0 (diff)
Merge branch 'akpm' (patchbomb from Andrew)
Merge first patchbomb from Andrew Morton: - a few minor cifs fixes - dma-debug upadtes - ocfs2 - slab - about half of MM - procfs - kernel/exit.c - panic.c tweaks - printk upates - lib/ updates - checkpatch updates - fs/binfmt updates - the drivers/rtc tree - nilfs - kmod fixes - more kernel/exit.c - various other misc tweaks and fixes * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (190 commits) exit: pidns: fix/update the comments in zap_pid_ns_processes() exit: pidns: alloc_pid() leaks pid_namespace if child_reaper is exiting exit: exit_notify: re-use "dead" list to autoreap current exit: reparent: call forget_original_parent() under tasklist_lock exit: reparent: avoid find_new_reaper() if no children exit: reparent: introduce find_alive_thread() exit: reparent: introduce find_child_reaper() exit: reparent: document the ->has_child_subreaper checks exit: reparent: s/while_each_thread/for_each_thread/ in find_new_reaper() exit: reparent: fix the cross-namespace PR_SET_CHILD_SUBREAPER reparenting exit: reparent: fix the dead-parent PR_SET_CHILD_SUBREAPER reparenting exit: proc: don't try to flush /proc/tgid/task/tgid exit: release_task: fix the comment about group leader accounting exit: wait: drop tasklist_lock before psig->c* accounting exit: wait: don't use zombie->real_parent exit: wait: cleanup the ptrace_reparented() checks usermodehelper: kill the kmod_thread_locker logic usermodehelper: don't use CLONE_VFORK for ____call_usermodehelper() fs/hfs/catalog.c: fix comparison bug in hfs_cat_keycmp nilfs2: fix the nilfs_iget() vs. nilfs_new_inode() races ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/exit.c245
-rw-r--r--kernel/kmod.c43
-rw-r--r--kernel/panic.c13
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/pid_namespace.c28
-rw-r--r--kernel/printk/printk.c49
-rw-r--r--kernel/ptrace.c23
-rw-r--r--kernel/res_counter.c211
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/sysctl_binary.c1
12 files changed, 205 insertions, 424 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 17ea6d4a9a24..a59481a3fa6c 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -57,7 +57,6 @@ obj-$(CONFIG_UTS_NS) += utsname.o
57obj-$(CONFIG_USER_NS) += user_namespace.o 57obj-$(CONFIG_USER_NS) += user_namespace.o
58obj-$(CONFIG_PID_NS) += pid_namespace.o 58obj-$(CONFIG_PID_NS) += pid_namespace.o
59obj-$(CONFIG_IKCONFIG) += configs.o 59obj-$(CONFIG_IKCONFIG) += configs.o
60obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
61obj-$(CONFIG_SMP) += stop_machine.o 60obj-$(CONFIG_SMP) += stop_machine.o
62obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o 61obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
63obj-$(CONFIG_AUDIT) += audit.o auditfilter.o 62obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
diff --git a/kernel/exit.c b/kernel/exit.c
index 232c4bc8bcc9..8714e5ded8b4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -118,13 +118,10 @@ static void __exit_signal(struct task_struct *tsk)
118 } 118 }
119 119
120 /* 120 /*
121 * Accumulate here the counters for all threads but the group leader 121 * Accumulate here the counters for all threads as they die. We could
122 * as they die, so they can be added into the process-wide totals 122 * skip the group leader because it is the last user of signal_struct,
123 * when those are taken. The group leader stays around as a zombie as 123 * but we want to avoid the race with thread_group_cputime() which can
124 * long as there are other threads. When it gets reaped, the exit.c 124 * see the empty ->thread_head list.
125 * code will add its counts into these totals. We won't ever get here
126 * for the group leader, since it will have been the last reference on
127 * the signal_struct.
128 */ 125 */
129 task_cputime(tsk, &utime, &stime); 126 task_cputime(tsk, &utime, &stime);
130 write_seqlock(&sig->stats_lock); 127 write_seqlock(&sig->stats_lock);
@@ -462,6 +459,44 @@ static void exit_mm(struct task_struct *tsk)
462 clear_thread_flag(TIF_MEMDIE); 459 clear_thread_flag(TIF_MEMDIE);
463} 460}
464 461
462static struct task_struct *find_alive_thread(struct task_struct *p)
463{
464 struct task_struct *t;
465
466 for_each_thread(p, t) {
467 if (!(t->flags & PF_EXITING))
468 return t;
469 }
470 return NULL;
471}
472
473static struct task_struct *find_child_reaper(struct task_struct *father)
474 __releases(&tasklist_lock)
475 __acquires(&tasklist_lock)
476{
477 struct pid_namespace *pid_ns = task_active_pid_ns(father);
478 struct task_struct *reaper = pid_ns->child_reaper;
479
480 if (likely(reaper != father))
481 return reaper;
482
483 reaper = find_alive_thread(father);
484 if (reaper) {
485 pid_ns->child_reaper = reaper;
486 return reaper;
487 }
488
489 write_unlock_irq(&tasklist_lock);
490 if (unlikely(pid_ns == &init_pid_ns)) {
491 panic("Attempted to kill init! exitcode=0x%08x\n",
492 father->signal->group_exit_code ?: father->exit_code);
493 }
494 zap_pid_ns_processes(pid_ns);
495 write_lock_irq(&tasklist_lock);
496
497 return father;
498}
499
465/* 500/*
466 * When we die, we re-parent all our children, and try to: 501 * When we die, we re-parent all our children, and try to:
467 * 1. give them to another thread in our thread group, if such a member exists 502 * 1. give them to another thread in our thread group, if such a member exists
@@ -469,58 +504,36 @@ static void exit_mm(struct task_struct *tsk)
469 * child_subreaper for its children (like a service manager) 504 * child_subreaper for its children (like a service manager)
470 * 3. give it to the init process (PID 1) in our pid namespace 505 * 3. give it to the init process (PID 1) in our pid namespace
471 */ 506 */
472static struct task_struct *find_new_reaper(struct task_struct *father) 507static struct task_struct *find_new_reaper(struct task_struct *father,
473 __releases(&tasklist_lock) 508 struct task_struct *child_reaper)
474 __acquires(&tasklist_lock)
475{ 509{
476 struct pid_namespace *pid_ns = task_active_pid_ns(father); 510 struct task_struct *thread, *reaper;
477 struct task_struct *thread;
478 511
479 thread = father; 512 thread = find_alive_thread(father);
480 while_each_thread(father, thread) { 513 if (thread)
481 if (thread->flags & PF_EXITING)
482 continue;
483 if (unlikely(pid_ns->child_reaper == father))
484 pid_ns->child_reaper = thread;
485 return thread; 514 return thread;
486 }
487
488 if (unlikely(pid_ns->child_reaper == father)) {
489 write_unlock_irq(&tasklist_lock);
490 if (unlikely(pid_ns == &init_pid_ns)) {
491 panic("Attempted to kill init! exitcode=0x%08x\n",
492 father->signal->group_exit_code ?:
493 father->exit_code);
494 }
495
496 zap_pid_ns_processes(pid_ns);
497 write_lock_irq(&tasklist_lock);
498 } else if (father->signal->has_child_subreaper) {
499 struct task_struct *reaper;
500 515
516 if (father->signal->has_child_subreaper) {
501 /* 517 /*
502 * Find the first ancestor marked as child_subreaper. 518 * Find the first ->is_child_subreaper ancestor in our pid_ns.
503 * Note that the code below checks same_thread_group(reaper, 519 * We start from father to ensure we can not look into another
504 * pid_ns->child_reaper). This is what we need to DTRT in a 520 * namespace, this is safe because all its threads are dead.
505 * PID namespace. However we still need the check above, see
506 * http://marc.info/?l=linux-kernel&m=131385460420380
507 */ 521 */
508 for (reaper = father->real_parent; 522 for (reaper = father;
509 reaper != &init_task; 523 !same_thread_group(reaper, child_reaper);
510 reaper = reaper->real_parent) { 524 reaper = reaper->real_parent) {
511 if (same_thread_group(reaper, pid_ns->child_reaper)) 525 /* call_usermodehelper() descendants need this check */
526 if (reaper == &init_task)
512 break; 527 break;
513 if (!reaper->signal->is_child_subreaper) 528 if (!reaper->signal->is_child_subreaper)
514 continue; 529 continue;
515 thread = reaper; 530 thread = find_alive_thread(reaper);
516 do { 531 if (thread)
517 if (!(thread->flags & PF_EXITING)) 532 return thread;
518 return reaper;
519 } while_each_thread(reaper, thread);
520 } 533 }
521 } 534 }
522 535
523 return pid_ns->child_reaper; 536 return child_reaper;
524} 537}
525 538
526/* 539/*
@@ -529,15 +542,7 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
529static void reparent_leader(struct task_struct *father, struct task_struct *p, 542static void reparent_leader(struct task_struct *father, struct task_struct *p,
530 struct list_head *dead) 543 struct list_head *dead)
531{ 544{
532 list_move_tail(&p->sibling, &p->real_parent->children); 545 if (unlikely(p->exit_state == EXIT_DEAD))
533
534 if (p->exit_state == EXIT_DEAD)
535 return;
536 /*
537 * If this is a threaded reparent there is no need to
538 * notify anyone anything has happened.
539 */
540 if (same_thread_group(p->real_parent, father))
541 return; 546 return;
542 547
543 /* We don't want people slaying init. */ 548 /* We don't want people slaying init. */
@@ -548,49 +553,53 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
548 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 553 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
549 if (do_notify_parent(p, p->exit_signal)) { 554 if (do_notify_parent(p, p->exit_signal)) {
550 p->exit_state = EXIT_DEAD; 555 p->exit_state = EXIT_DEAD;
551 list_move_tail(&p->sibling, dead); 556 list_add(&p->ptrace_entry, dead);
552 } 557 }
553 } 558 }
554 559
555 kill_orphaned_pgrp(p, father); 560 kill_orphaned_pgrp(p, father);
556} 561}
557 562
558static void forget_original_parent(struct task_struct *father) 563/*
564 * This does two things:
565 *
566 * A. Make init inherit all the child processes
567 * B. Check to see if any process groups have become orphaned
568 * as a result of our exiting, and if they have any stopped
569 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
570 */
571static void forget_original_parent(struct task_struct *father,
572 struct list_head *dead)
559{ 573{
560 struct task_struct *p, *n, *reaper; 574 struct task_struct *p, *t, *reaper;
561 LIST_HEAD(dead_children);
562 575
563 write_lock_irq(&tasklist_lock); 576 if (unlikely(!list_empty(&father->ptraced)))
564 /* 577 exit_ptrace(father, dead);
565 * Note that exit_ptrace() and find_new_reaper() might
566 * drop tasklist_lock and reacquire it.
567 */
568 exit_ptrace(father);
569 reaper = find_new_reaper(father);
570 578
571 list_for_each_entry_safe(p, n, &father->children, sibling) { 579 /* Can drop and reacquire tasklist_lock */
572 struct task_struct *t = p; 580 reaper = find_child_reaper(father);
581 if (list_empty(&father->children))
582 return;
573 583
574 do { 584 reaper = find_new_reaper(father, reaper);
585 list_for_each_entry(p, &father->children, sibling) {
586 for_each_thread(p, t) {
575 t->real_parent = reaper; 587 t->real_parent = reaper;
576 if (t->parent == father) { 588 BUG_ON((!t->ptrace) != (t->parent == father));
577 BUG_ON(t->ptrace); 589 if (likely(!t->ptrace))
578 t->parent = t->real_parent; 590 t->parent = t->real_parent;
579 }
580 if (t->pdeath_signal) 591 if (t->pdeath_signal)
581 group_send_sig_info(t->pdeath_signal, 592 group_send_sig_info(t->pdeath_signal,
582 SEND_SIG_NOINFO, t); 593 SEND_SIG_NOINFO, t);
583 } while_each_thread(p, t); 594 }
584 reparent_leader(father, p, &dead_children); 595 /*
585 } 596 * If this is a threaded reparent there is no need to
586 write_unlock_irq(&tasklist_lock); 597 * notify anyone anything has happened.
587 598 */
588 BUG_ON(!list_empty(&father->children)); 599 if (!same_thread_group(reaper, father))
589 600 reparent_leader(father, p, dead);
590 list_for_each_entry_safe(p, n, &dead_children, sibling) {
591 list_del_init(&p->sibling);
592 release_task(p);
593 } 601 }
602 list_splice_tail_init(&father->children, &reaper->children);
594} 603}
595 604
596/* 605/*
@@ -600,18 +609,12 @@ static void forget_original_parent(struct task_struct *father)
600static void exit_notify(struct task_struct *tsk, int group_dead) 609static void exit_notify(struct task_struct *tsk, int group_dead)
601{ 610{
602 bool autoreap; 611 bool autoreap;
603 612 struct task_struct *p, *n;
604 /* 613 LIST_HEAD(dead);
605 * This does two things:
606 *
607 * A. Make init inherit all the child processes
608 * B. Check to see if any process groups have become orphaned
609 * as a result of our exiting, and if they have any stopped
610 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
611 */
612 forget_original_parent(tsk);
613 614
614 write_lock_irq(&tasklist_lock); 615 write_lock_irq(&tasklist_lock);
616 forget_original_parent(tsk, &dead);
617
615 if (group_dead) 618 if (group_dead)
616 kill_orphaned_pgrp(tsk->group_leader, NULL); 619 kill_orphaned_pgrp(tsk->group_leader, NULL);
617 620
@@ -629,15 +632,18 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
629 } 632 }
630 633
631 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE; 634 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
635 if (tsk->exit_state == EXIT_DEAD)
636 list_add(&tsk->ptrace_entry, &dead);
632 637
633 /* mt-exec, de_thread() is waiting for group leader */ 638 /* mt-exec, de_thread() is waiting for group leader */
634 if (unlikely(tsk->signal->notify_count < 0)) 639 if (unlikely(tsk->signal->notify_count < 0))
635 wake_up_process(tsk->signal->group_exit_task); 640 wake_up_process(tsk->signal->group_exit_task);
636 write_unlock_irq(&tasklist_lock); 641 write_unlock_irq(&tasklist_lock);
637 642
638 /* If the process is dead, release it - nobody will wait for it */ 643 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
639 if (autoreap) 644 list_del_init(&p->ptrace_entry);
640 release_task(tsk); 645 release_task(p);
646 }
641} 647}
642 648
643#ifdef CONFIG_DEBUG_STACK_USAGE 649#ifdef CONFIG_DEBUG_STACK_USAGE
@@ -982,8 +988,7 @@ static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
982 */ 988 */
983static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 989static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
984{ 990{
985 unsigned long state; 991 int state, retval, status;
986 int retval, status, traced;
987 pid_t pid = task_pid_vnr(p); 992 pid_t pid = task_pid_vnr(p);
988 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 993 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
989 struct siginfo __user *infop; 994 struct siginfo __user *infop;
@@ -1008,21 +1013,25 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1008 } 1013 }
1009 return wait_noreap_copyout(wo, p, pid, uid, why, status); 1014 return wait_noreap_copyout(wo, p, pid, uid, why, status);
1010 } 1015 }
1011
1012 traced = ptrace_reparented(p);
1013 /* 1016 /*
1014 * Move the task's state to DEAD/TRACE, only one thread can do this. 1017 * Move the task's state to DEAD/TRACE, only one thread can do this.
1015 */ 1018 */
1016 state = traced && thread_group_leader(p) ? EXIT_TRACE : EXIT_DEAD; 1019 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1020 EXIT_TRACE : EXIT_DEAD;
1017 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1021 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1018 return 0; 1022 return 0;
1019 /* 1023 /*
1020 * It can be ptraced but not reparented, check 1024 * We own this thread, nobody else can reap it.
1021 * thread_group_leader() to filter out sub-threads.
1022 */ 1025 */
1023 if (likely(!traced) && thread_group_leader(p)) { 1026 read_unlock(&tasklist_lock);
1024 struct signal_struct *psig; 1027 sched_annotate_sleep();
1025 struct signal_struct *sig; 1028
1029 /*
1030 * Check thread_group_leader() to exclude the traced sub-threads.
1031 */
1032 if (state == EXIT_DEAD && thread_group_leader(p)) {
1033 struct signal_struct *sig = p->signal;
1034 struct signal_struct *psig = current->signal;
1026 unsigned long maxrss; 1035 unsigned long maxrss;
1027 cputime_t tgutime, tgstime; 1036 cputime_t tgutime, tgstime;
1028 1037
@@ -1034,21 +1043,20 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1034 * accumulate in the parent's signal_struct c* fields. 1043 * accumulate in the parent's signal_struct c* fields.
1035 * 1044 *
1036 * We don't bother to take a lock here to protect these 1045 * We don't bother to take a lock here to protect these
1037 * p->signal fields, because they are only touched by 1046 * p->signal fields because the whole thread group is dead
1038 * __exit_signal, which runs with tasklist_lock 1047 * and nobody can change them.
1039 * write-locked anyway, and so is excluded here. We do 1048 *
1040 * need to protect the access to parent->signal fields, 1049 * psig->stats_lock also protects us from our sub-theads
1041 * as other threads in the parent group can be right 1050 * which can reap other children at the same time. Until
1042 * here reaping other children at the same time. 1051 * we change k_getrusage()-like users to rely on this lock
1052 * we have to take ->siglock as well.
1043 * 1053 *
1044 * We use thread_group_cputime_adjusted() to get times for 1054 * We use thread_group_cputime_adjusted() to get times for
1045 * the thread group, which consolidates times for all threads 1055 * the thread group, which consolidates times for all threads
1046 * in the group including the group leader. 1056 * in the group including the group leader.
1047 */ 1057 */
1048 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1058 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1049 spin_lock_irq(&p->real_parent->sighand->siglock); 1059 spin_lock_irq(&current->sighand->siglock);
1050 psig = p->real_parent->signal;
1051 sig = p->signal;
1052 write_seqlock(&psig->stats_lock); 1060 write_seqlock(&psig->stats_lock);
1053 psig->cutime += tgutime + sig->cutime; 1061 psig->cutime += tgutime + sig->cutime;
1054 psig->cstime += tgstime + sig->cstime; 1062 psig->cstime += tgstime + sig->cstime;
@@ -1073,16 +1081,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1073 task_io_accounting_add(&psig->ioac, &p->ioac); 1081 task_io_accounting_add(&psig->ioac, &p->ioac);
1074 task_io_accounting_add(&psig->ioac, &sig->ioac); 1082 task_io_accounting_add(&psig->ioac, &sig->ioac);
1075 write_sequnlock(&psig->stats_lock); 1083 write_sequnlock(&psig->stats_lock);
1076 spin_unlock_irq(&p->real_parent->sighand->siglock); 1084 spin_unlock_irq(&current->sighand->siglock);
1077 } 1085 }
1078 1086
1079 /*
1080 * Now we are sure this task is interesting, and no other
1081 * thread can reap it because we its state == DEAD/TRACE.
1082 */
1083 read_unlock(&tasklist_lock);
1084 sched_annotate_sleep();
1085
1086 retval = wo->wo_rusage 1087 retval = wo->wo_rusage
1087 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; 1088 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1088 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1089 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 80f7a6d00519..2777f40a9c7b 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -47,13 +47,6 @@ extern int max_threads;
47 47
48static struct workqueue_struct *khelper_wq; 48static struct workqueue_struct *khelper_wq;
49 49
50/*
51 * kmod_thread_locker is used for deadlock avoidance. There is no explicit
52 * locking to protect this global - it is private to the singleton khelper
53 * thread and should only ever be modified by that thread.
54 */
55static const struct task_struct *kmod_thread_locker;
56
57#define CAP_BSET (void *)1 50#define CAP_BSET (void *)1
58#define CAP_PI (void *)2 51#define CAP_PI (void *)2
59 52
@@ -223,7 +216,6 @@ static void umh_complete(struct subprocess_info *sub_info)
223static int ____call_usermodehelper(void *data) 216static int ____call_usermodehelper(void *data)
224{ 217{
225 struct subprocess_info *sub_info = data; 218 struct subprocess_info *sub_info = data;
226 int wait = sub_info->wait & ~UMH_KILLABLE;
227 struct cred *new; 219 struct cred *new;
228 int retval; 220 int retval;
229 221
@@ -267,20 +259,13 @@ static int ____call_usermodehelper(void *data)
267out: 259out:
268 sub_info->retval = retval; 260 sub_info->retval = retval;
269 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */ 261 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
270 if (wait != UMH_WAIT_PROC) 262 if (!(sub_info->wait & UMH_WAIT_PROC))
271 umh_complete(sub_info); 263 umh_complete(sub_info);
272 if (!retval) 264 if (!retval)
273 return 0; 265 return 0;
274 do_exit(0); 266 do_exit(0);
275} 267}
276 268
277static int call_helper(void *data)
278{
279 /* Worker thread started blocking khelper thread. */
280 kmod_thread_locker = current;
281 return ____call_usermodehelper(data);
282}
283
284/* Keventd can't block, but this (a child) can. */ 269/* Keventd can't block, but this (a child) can. */
285static int wait_for_helper(void *data) 270static int wait_for_helper(void *data)
286{ 271{
@@ -323,21 +308,14 @@ static void __call_usermodehelper(struct work_struct *work)
323{ 308{
324 struct subprocess_info *sub_info = 309 struct subprocess_info *sub_info =
325 container_of(work, struct subprocess_info, work); 310 container_of(work, struct subprocess_info, work);
326 int wait = sub_info->wait & ~UMH_KILLABLE;
327 pid_t pid; 311 pid_t pid;
328 312
329 /* CLONE_VFORK: wait until the usermode helper has execve'd 313 if (sub_info->wait & UMH_WAIT_PROC)
330 * successfully We need the data structures to stay around
331 * until that is done. */
332 if (wait == UMH_WAIT_PROC)
333 pid = kernel_thread(wait_for_helper, sub_info, 314 pid = kernel_thread(wait_for_helper, sub_info,
334 CLONE_FS | CLONE_FILES | SIGCHLD); 315 CLONE_FS | CLONE_FILES | SIGCHLD);
335 else { 316 else
336 pid = kernel_thread(call_helper, sub_info, 317 pid = kernel_thread(____call_usermodehelper, sub_info,
337 CLONE_VFORK | SIGCHLD); 318 SIGCHLD);
338 /* Worker thread stopped blocking khelper thread. */
339 kmod_thread_locker = NULL;
340 }
341 319
342 if (pid < 0) { 320 if (pid < 0) {
343 sub_info->retval = pid; 321 sub_info->retval = pid;
@@ -571,17 +549,6 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
571 goto out; 549 goto out;
572 } 550 }
573 /* 551 /*
574 * Worker thread must not wait for khelper thread at below
575 * wait_for_completion() if the thread was created with CLONE_VFORK
576 * flag, for khelper thread is already waiting for the thread at
577 * wait_for_completion() in do_fork().
578 */
579 if (wait != UMH_NO_WAIT && current == kmod_thread_locker) {
580 retval = -EBUSY;
581 goto out;
582 }
583
584 /*
585 * Set the completion pointer only if there is a waiter. 552 * Set the completion pointer only if there is a waiter.
586 * This makes it possible to use umh_complete to free 553 * This makes it possible to use umh_complete to free
587 * the data structure in case of UMH_NO_WAIT. 554 * the data structure in case of UMH_NO_WAIT.
diff --git a/kernel/panic.c b/kernel/panic.c
index cf80672b7924..4d8d6f906dec 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -33,6 +33,7 @@ static int pause_on_oops;
33static int pause_on_oops_flag; 33static int pause_on_oops_flag;
34static DEFINE_SPINLOCK(pause_on_oops_lock); 34static DEFINE_SPINLOCK(pause_on_oops_lock);
35static bool crash_kexec_post_notifiers; 35static bool crash_kexec_post_notifiers;
36int panic_on_warn __read_mostly;
36 37
37int panic_timeout = CONFIG_PANIC_TIMEOUT; 38int panic_timeout = CONFIG_PANIC_TIMEOUT;
38EXPORT_SYMBOL_GPL(panic_timeout); 39EXPORT_SYMBOL_GPL(panic_timeout);
@@ -428,6 +429,17 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
428 if (args) 429 if (args)
429 vprintk(args->fmt, args->args); 430 vprintk(args->fmt, args->args);
430 431
432 if (panic_on_warn) {
433 /*
434 * This thread may hit another WARN() in the panic path.
435 * Resetting this prevents additional WARN() from panicking the
436 * system on this thread. Other threads are blocked by the
437 * panic_mutex in panic().
438 */
439 panic_on_warn = 0;
440 panic("panic_on_warn set ...\n");
441 }
442
431 print_modules(); 443 print_modules();
432 dump_stack(); 444 dump_stack();
433 print_oops_end_marker(); 445 print_oops_end_marker();
@@ -485,6 +497,7 @@ EXPORT_SYMBOL(__stack_chk_fail);
485 497
486core_param(panic, panic_timeout, int, 0644); 498core_param(panic, panic_timeout, int, 0644);
487core_param(pause_on_oops, pause_on_oops, int, 0644); 499core_param(pause_on_oops, pause_on_oops, int, 0644);
500core_param(panic_on_warn, panic_on_warn, int, 0644);
488 501
489static int __init setup_crash_kexec_post_notifiers(char *s) 502static int __init setup_crash_kexec_post_notifiers(char *s)
490{ 503{
diff --git a/kernel/pid.c b/kernel/pid.c
index 9b9a26698144..82430c858d69 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -341,6 +341,8 @@ out:
341 341
342out_unlock: 342out_unlock:
343 spin_unlock_irq(&pidmap_lock); 343 spin_unlock_irq(&pidmap_lock);
344 put_pid_ns(ns);
345
344out_free: 346out_free:
345 while (++i <= ns->level) 347 while (++i <= ns->level)
346 free_pidmap(pid->numbers + i); 348 free_pidmap(pid->numbers + i);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index db95d8eb761b..bc6d6a89b6e6 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -190,7 +190,11 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
190 /* Don't allow any more processes into the pid namespace */ 190 /* Don't allow any more processes into the pid namespace */
191 disable_pid_allocation(pid_ns); 191 disable_pid_allocation(pid_ns);
192 192
193 /* Ignore SIGCHLD causing any terminated children to autoreap */ 193 /*
194 * Ignore SIGCHLD causing any terminated children to autoreap.
195 * This speeds up the namespace shutdown, plus see the comment
196 * below.
197 */
194 spin_lock_irq(&me->sighand->siglock); 198 spin_lock_irq(&me->sighand->siglock);
195 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN; 199 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
196 spin_unlock_irq(&me->sighand->siglock); 200 spin_unlock_irq(&me->sighand->siglock);
@@ -223,15 +227,31 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
223 } 227 }
224 read_unlock(&tasklist_lock); 228 read_unlock(&tasklist_lock);
225 229
226 /* Firstly reap the EXIT_ZOMBIE children we may have. */ 230 /*
231 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
232 * sys_wait4() will also block until our children traced from the
233 * parent namespace are detached and become EXIT_DEAD.
234 */
227 do { 235 do {
228 clear_thread_flag(TIF_SIGPENDING); 236 clear_thread_flag(TIF_SIGPENDING);
229 rc = sys_wait4(-1, NULL, __WALL, NULL); 237 rc = sys_wait4(-1, NULL, __WALL, NULL);
230 } while (rc != -ECHILD); 238 } while (rc != -ECHILD);
231 239
232 /* 240 /*
233 * sys_wait4() above can't reap the TASK_DEAD children. 241 * sys_wait4() above can't reap the EXIT_DEAD children but we do not
234 * Make sure they all go away, see free_pid(). 242 * really care, we could reparent them to the global init. We could
243 * exit and reap ->child_reaper even if it is not the last thread in
244 * this pid_ns, free_pid(nr_hashed == 0) calls proc_cleanup_work(),
245 * pid_ns can not go away until proc_kill_sb() drops the reference.
246 *
247 * But this ns can also have other tasks injected by setns()+fork().
248 * Again, ignoring the user visible semantics we do not really need
249 * to wait until they are all reaped, but they can be reparented to
250 * us and thus we need to ensure that pid->child_reaper stays valid
251 * until they all go away. See free_pid()->wake_up_process().
252 *
253 * We rely on ignored SIGCHLD, an injected zombie must be autoreaped
254 * if reparented.
235 */ 255 */
236 for (;;) { 256 for (;;) {
237 set_current_state(TASK_UNINTERRUPTIBLE); 257 set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index c8755e7e1dba..ea27c019655a 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -62,9 +62,6 @@ int console_printk[4] = {
62 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ 62 CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
63}; 63};
64 64
65/* Deferred messaged from sched code are marked by this special level */
66#define SCHED_MESSAGE_LOGLEVEL -2
67
68/* 65/*
69 * Low level drivers may need that to know if they can schedule in 66 * Low level drivers may need that to know if they can schedule in
70 * their unblank() callback or not. So let's export it. 67 * their unblank() callback or not. So let's export it.
@@ -1259,7 +1256,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
1259int do_syslog(int type, char __user *buf, int len, bool from_file) 1256int do_syslog(int type, char __user *buf, int len, bool from_file)
1260{ 1257{
1261 bool clear = false; 1258 bool clear = false;
1262 static int saved_console_loglevel = -1; 1259 static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1263 int error; 1260 int error;
1264 1261
1265 error = check_syslog_permissions(type, from_file); 1262 error = check_syslog_permissions(type, from_file);
@@ -1316,15 +1313,15 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
1316 break; 1313 break;
1317 /* Disable logging to console */ 1314 /* Disable logging to console */
1318 case SYSLOG_ACTION_CONSOLE_OFF: 1315 case SYSLOG_ACTION_CONSOLE_OFF:
1319 if (saved_console_loglevel == -1) 1316 if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1320 saved_console_loglevel = console_loglevel; 1317 saved_console_loglevel = console_loglevel;
1321 console_loglevel = minimum_console_loglevel; 1318 console_loglevel = minimum_console_loglevel;
1322 break; 1319 break;
1323 /* Enable logging to console */ 1320 /* Enable logging to console */
1324 case SYSLOG_ACTION_CONSOLE_ON: 1321 case SYSLOG_ACTION_CONSOLE_ON:
1325 if (saved_console_loglevel != -1) { 1322 if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1326 console_loglevel = saved_console_loglevel; 1323 console_loglevel = saved_console_loglevel;
1327 saved_console_loglevel = -1; 1324 saved_console_loglevel = LOGLEVEL_DEFAULT;
1328 } 1325 }
1329 break; 1326 break;
1330 /* Set level of messages printed to console */ 1327 /* Set level of messages printed to console */
@@ -1336,7 +1333,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
1336 len = minimum_console_loglevel; 1333 len = minimum_console_loglevel;
1337 console_loglevel = len; 1334 console_loglevel = len;
1338 /* Implicitly re-enable logging to console */ 1335 /* Implicitly re-enable logging to console */
1339 saved_console_loglevel = -1; 1336 saved_console_loglevel = LOGLEVEL_DEFAULT;
1340 error = 0; 1337 error = 0;
1341 break; 1338 break;
1342 /* Number of chars in the log buffer */ 1339 /* Number of chars in the log buffer */
@@ -1627,10 +1624,10 @@ asmlinkage int vprintk_emit(int facility, int level,
1627 int printed_len = 0; 1624 int printed_len = 0;
1628 bool in_sched = false; 1625 bool in_sched = false;
1629 /* cpu currently holding logbuf_lock in this function */ 1626 /* cpu currently holding logbuf_lock in this function */
1630 static volatile unsigned int logbuf_cpu = UINT_MAX; 1627 static unsigned int logbuf_cpu = UINT_MAX;
1631 1628
1632 if (level == SCHED_MESSAGE_LOGLEVEL) { 1629 if (level == LOGLEVEL_SCHED) {
1633 level = -1; 1630 level = LOGLEVEL_DEFAULT;
1634 in_sched = true; 1631 in_sched = true;
1635 } 1632 }
1636 1633
@@ -1695,8 +1692,9 @@ asmlinkage int vprintk_emit(int facility, int level,
1695 const char *end_of_header = printk_skip_level(text); 1692 const char *end_of_header = printk_skip_level(text);
1696 switch (kern_level) { 1693 switch (kern_level) {
1697 case '0' ... '7': 1694 case '0' ... '7':
1698 if (level == -1) 1695 if (level == LOGLEVEL_DEFAULT)
1699 level = kern_level - '0'; 1696 level = kern_level - '0';
1697 /* fallthrough */
1700 case 'd': /* KERN_DEFAULT */ 1698 case 'd': /* KERN_DEFAULT */
1701 lflags |= LOG_PREFIX; 1699 lflags |= LOG_PREFIX;
1702 } 1700 }
@@ -1710,7 +1708,7 @@ asmlinkage int vprintk_emit(int facility, int level,
1710 } 1708 }
1711 } 1709 }
1712 1710
1713 if (level == -1) 1711 if (level == LOGLEVEL_DEFAULT)
1714 level = default_message_loglevel; 1712 level = default_message_loglevel;
1715 1713
1716 if (dict) 1714 if (dict)
@@ -1788,7 +1786,7 @@ EXPORT_SYMBOL(vprintk_emit);
1788 1786
1789asmlinkage int vprintk(const char *fmt, va_list args) 1787asmlinkage int vprintk(const char *fmt, va_list args)
1790{ 1788{
1791 return vprintk_emit(0, -1, NULL, 0, fmt, args); 1789 return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
1792} 1790}
1793EXPORT_SYMBOL(vprintk); 1791EXPORT_SYMBOL(vprintk);
1794 1792
@@ -1842,7 +1840,7 @@ asmlinkage __visible int printk(const char *fmt, ...)
1842 } 1840 }
1843#endif 1841#endif
1844 va_start(args, fmt); 1842 va_start(args, fmt);
1845 r = vprintk_emit(0, -1, NULL, 0, fmt, args); 1843 r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
1846 va_end(args); 1844 va_end(args);
1847 1845
1848 return r; 1846 return r;
@@ -1881,23 +1879,20 @@ static size_t cont_print_text(char *text, size_t size) { return 0; }
1881#ifdef CONFIG_EARLY_PRINTK 1879#ifdef CONFIG_EARLY_PRINTK
1882struct console *early_console; 1880struct console *early_console;
1883 1881
1884void early_vprintk(const char *fmt, va_list ap)
1885{
1886 if (early_console) {
1887 char buf[512];
1888 int n = vscnprintf(buf, sizeof(buf), fmt, ap);
1889
1890 early_console->write(early_console, buf, n);
1891 }
1892}
1893
1894asmlinkage __visible void early_printk(const char *fmt, ...) 1882asmlinkage __visible void early_printk(const char *fmt, ...)
1895{ 1883{
1896 va_list ap; 1884 va_list ap;
1885 char buf[512];
1886 int n;
1887
1888 if (!early_console)
1889 return;
1897 1890
1898 va_start(ap, fmt); 1891 va_start(ap, fmt);
1899 early_vprintk(fmt, ap); 1892 n = vscnprintf(buf, sizeof(buf), fmt, ap);
1900 va_end(ap); 1893 va_end(ap);
1894
1895 early_console->write(early_console, buf, n);
1901} 1896}
1902#endif 1897#endif
1903 1898
@@ -2634,7 +2629,7 @@ int printk_deferred(const char *fmt, ...)
2634 2629
2635 preempt_disable(); 2630 preempt_disable();
2636 va_start(args, fmt); 2631 va_start(args, fmt);
2637 r = vprintk_emit(0, SCHED_MESSAGE_LOGLEVEL, NULL, 0, fmt, args); 2632 r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
2638 va_end(args); 2633 va_end(args);
2639 2634
2640 __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); 2635 __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 54e75226c2c4..1eb9d90c3af9 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -485,36 +485,19 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
485 485
486/* 486/*
487 * Detach all tasks we were using ptrace on. Called with tasklist held 487 * Detach all tasks we were using ptrace on. Called with tasklist held
488 * for writing, and returns with it held too. But note it can release 488 * for writing.
489 * and reacquire the lock.
490 */ 489 */
491void exit_ptrace(struct task_struct *tracer) 490void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
492 __releases(&tasklist_lock)
493 __acquires(&tasklist_lock)
494{ 491{
495 struct task_struct *p, *n; 492 struct task_struct *p, *n;
496 LIST_HEAD(ptrace_dead);
497
498 if (likely(list_empty(&tracer->ptraced)))
499 return;
500 493
501 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { 494 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
502 if (unlikely(p->ptrace & PT_EXITKILL)) 495 if (unlikely(p->ptrace & PT_EXITKILL))
503 send_sig_info(SIGKILL, SEND_SIG_FORCED, p); 496 send_sig_info(SIGKILL, SEND_SIG_FORCED, p);
504 497
505 if (__ptrace_detach(tracer, p)) 498 if (__ptrace_detach(tracer, p))
506 list_add(&p->ptrace_entry, &ptrace_dead); 499 list_add(&p->ptrace_entry, dead);
507 }
508
509 write_unlock_irq(&tasklist_lock);
510 BUG_ON(!list_empty(&tracer->ptraced));
511
512 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
513 list_del_init(&p->ptrace_entry);
514 release_task(p);
515 } 500 }
516
517 write_lock_irq(&tasklist_lock);
518} 501}
519 502
520int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 503int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
deleted file mode 100644
index e791130f85a7..000000000000
--- a/kernel/res_counter.c
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * resource cgroups
3 *
4 * Copyright 2007 OpenVZ SWsoft Inc
5 *
6 * Author: Pavel Emelianov <xemul@openvz.org>
7 *
8 */
9
10#include <linux/types.h>
11#include <linux/parser.h>
12#include <linux/fs.h>
13#include <linux/res_counter.h>
14#include <linux/uaccess.h>
15#include <linux/mm.h>
16
17void res_counter_init(struct res_counter *counter, struct res_counter *parent)
18{
19 spin_lock_init(&counter->lock);
20 counter->limit = RES_COUNTER_MAX;
21 counter->soft_limit = RES_COUNTER_MAX;
22 counter->parent = parent;
23}
24
25static u64 res_counter_uncharge_locked(struct res_counter *counter,
26 unsigned long val)
27{
28 if (WARN_ON(counter->usage < val))
29 val = counter->usage;
30
31 counter->usage -= val;
32 return counter->usage;
33}
34
35static int res_counter_charge_locked(struct res_counter *counter,
36 unsigned long val, bool force)
37{
38 int ret = 0;
39
40 if (counter->usage + val > counter->limit) {
41 counter->failcnt++;
42 ret = -ENOMEM;
43 if (!force)
44 return ret;
45 }
46
47 counter->usage += val;
48 if (counter->usage > counter->max_usage)
49 counter->max_usage = counter->usage;
50 return ret;
51}
52
53static int __res_counter_charge(struct res_counter *counter, unsigned long val,
54 struct res_counter **limit_fail_at, bool force)
55{
56 int ret, r;
57 unsigned long flags;
58 struct res_counter *c, *u;
59
60 r = ret = 0;
61 *limit_fail_at = NULL;
62 local_irq_save(flags);
63 for (c = counter; c != NULL; c = c->parent) {
64 spin_lock(&c->lock);
65 r = res_counter_charge_locked(c, val, force);
66 spin_unlock(&c->lock);
67 if (r < 0 && !ret) {
68 ret = r;
69 *limit_fail_at = c;
70 if (!force)
71 break;
72 }
73 }
74
75 if (ret < 0 && !force) {
76 for (u = counter; u != c; u = u->parent) {
77 spin_lock(&u->lock);
78 res_counter_uncharge_locked(u, val);
79 spin_unlock(&u->lock);
80 }
81 }
82 local_irq_restore(flags);
83
84 return ret;
85}
86
87int res_counter_charge(struct res_counter *counter, unsigned long val,
88 struct res_counter **limit_fail_at)
89{
90 return __res_counter_charge(counter, val, limit_fail_at, false);
91}
92
93int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
94 struct res_counter **limit_fail_at)
95{
96 return __res_counter_charge(counter, val, limit_fail_at, true);
97}
98
99u64 res_counter_uncharge_until(struct res_counter *counter,
100 struct res_counter *top,
101 unsigned long val)
102{
103 unsigned long flags;
104 struct res_counter *c;
105 u64 ret = 0;
106
107 local_irq_save(flags);
108 for (c = counter; c != top; c = c->parent) {
109 u64 r;
110 spin_lock(&c->lock);
111 r = res_counter_uncharge_locked(c, val);
112 if (c == counter)
113 ret = r;
114 spin_unlock(&c->lock);
115 }
116 local_irq_restore(flags);
117 return ret;
118}
119
120u64 res_counter_uncharge(struct res_counter *counter, unsigned long val)
121{
122 return res_counter_uncharge_until(counter, NULL, val);
123}
124
125static inline unsigned long long *
126res_counter_member(struct res_counter *counter, int member)
127{
128 switch (member) {
129 case RES_USAGE:
130 return &counter->usage;
131 case RES_MAX_USAGE:
132 return &counter->max_usage;
133 case RES_LIMIT:
134 return &counter->limit;
135 case RES_FAILCNT:
136 return &counter->failcnt;
137 case RES_SOFT_LIMIT:
138 return &counter->soft_limit;
139 };
140
141 BUG();
142 return NULL;
143}
144
145ssize_t res_counter_read(struct res_counter *counter, int member,
146 const char __user *userbuf, size_t nbytes, loff_t *pos,
147 int (*read_strategy)(unsigned long long val, char *st_buf))
148{
149 unsigned long long *val;
150 char buf[64], *s;
151
152 s = buf;
153 val = res_counter_member(counter, member);
154 if (read_strategy)
155 s += read_strategy(*val, s);
156 else
157 s += sprintf(s, "%llu\n", *val);
158 return simple_read_from_buffer((void __user *)userbuf, nbytes,
159 pos, buf, s - buf);
160}
161
162#if BITS_PER_LONG == 32
163u64 res_counter_read_u64(struct res_counter *counter, int member)
164{
165 unsigned long flags;
166 u64 ret;
167
168 spin_lock_irqsave(&counter->lock, flags);
169 ret = *res_counter_member(counter, member);
170 spin_unlock_irqrestore(&counter->lock, flags);
171
172 return ret;
173}
174#else
175u64 res_counter_read_u64(struct res_counter *counter, int member)
176{
177 return *res_counter_member(counter, member);
178}
179#endif
180
181int res_counter_memparse_write_strategy(const char *buf,
182 unsigned long long *resp)
183{
184 char *end;
185 unsigned long long res;
186
187 /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
188 if (*buf == '-') {
189 int rc = kstrtoull(buf + 1, 10, &res);
190
191 if (rc)
192 return rc;
193 if (res != 1)
194 return -EINVAL;
195 *resp = RES_COUNTER_MAX;
196 return 0;
197 }
198
199 res = memparse(buf, &end);
200 if (*end != '\0')
201 return -EINVAL;
202
203 if (PAGE_ALIGN(res) >= res)
204 res = PAGE_ALIGN(res);
205 else
206 res = RES_COUNTER_MAX;
207
208 *resp = res;
209
210 return 0;
211}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bb398c0c5f08..b5797b78add6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4527,8 +4527,10 @@ void sched_show_task(struct task_struct *p)
4527#ifdef CONFIG_DEBUG_STACK_USAGE 4527#ifdef CONFIG_DEBUG_STACK_USAGE
4528 free = stack_not_used(p); 4528 free = stack_not_used(p);
4529#endif 4529#endif
4530 ppid = 0;
4530 rcu_read_lock(); 4531 rcu_read_lock();
4531 ppid = task_pid_nr(rcu_dereference(p->real_parent)); 4532 if (pid_alive(p))
4533 ppid = task_pid_nr(rcu_dereference(p->real_parent));
4532 rcu_read_unlock(); 4534 rcu_read_unlock();
4533 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, 4535 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4534 task_pid_nr(p), ppid, 4536 task_pid_nr(p), ppid,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 15f2511a1b7c..7c54ff79afd7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1104,6 +1104,15 @@ static struct ctl_table kern_table[] = {
1104 .proc_handler = proc_dointvec, 1104 .proc_handler = proc_dointvec,
1105 }, 1105 },
1106#endif 1106#endif
1107 {
1108 .procname = "panic_on_warn",
1109 .data = &panic_on_warn,
1110 .maxlen = sizeof(int),
1111 .mode = 0644,
1112 .proc_handler = proc_dointvec_minmax,
1113 .extra1 = &zero,
1114 .extra2 = &one,
1115 },
1107 { } 1116 { }
1108}; 1117};
1109 1118
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 9a4f750a2963..7e7746a42a62 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -137,6 +137,7 @@ static const struct bin_table bin_kern_table[] = {
137 { CTL_INT, KERN_COMPAT_LOG, "compat-log" }, 137 { CTL_INT, KERN_COMPAT_LOG, "compat-log" },
138 { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, 138 { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
139 { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, 139 { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
140 { CTL_INT, KERN_PANIC_ON_WARN, "panic_on_warn" },
140 {} 141 {}
141}; 142};
142 143