aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/hw_breakpoint.c4
-rw-r--r--kernel/fork.c65
-rw-r--r--kernel/hung_task.c11
-rw-r--r--kernel/irq/autoprobe.c4
-rw-r--r--kernel/irq/chip.c42
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c46
-rw-r--r--kernel/kprobes.c12
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c2
12 files changed, 144 insertions, 55 deletions
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index b7971d6f38bf..ee706ce44aa0 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -651,10 +651,10 @@ int __init init_hw_breakpoint(void)
651 651
652 err_alloc: 652 err_alloc:
653 for_each_possible_cpu(err_cpu) { 653 for_each_possible_cpu(err_cpu) {
654 if (err_cpu == cpu)
655 break;
656 for (i = 0; i < TYPE_MAX; i++) 654 for (i = 0; i < TYPE_MAX; i++)
657 kfree(per_cpu(nr_task_bp_pinned[i], cpu)); 655 kfree(per_cpu(nr_task_bp_pinned[i], cpu));
656 if (err_cpu == cpu)
657 break;
658 } 658 }
659 659
660 return -ENOMEM; 660 return -ENOMEM;
diff --git a/kernel/fork.c b/kernel/fork.c
index b77fd559c78e..26a7a6707fa7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -66,6 +66,7 @@
66#include <linux/user-return-notifier.h> 66#include <linux/user-return-notifier.h>
67#include <linux/oom.h> 67#include <linux/oom.h>
68#include <linux/khugepaged.h> 68#include <linux/khugepaged.h>
69#include <linux/signalfd.h>
69 70
70#include <asm/pgtable.h> 71#include <asm/pgtable.h>
71#include <asm/pgalloc.h> 72#include <asm/pgalloc.h>
@@ -667,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
667 return mm; 668 return mm;
668} 669}
669 670
671static void complete_vfork_done(struct task_struct *tsk)
672{
673 struct completion *vfork;
674
675 task_lock(tsk);
676 vfork = tsk->vfork_done;
677 if (likely(vfork)) {
678 tsk->vfork_done = NULL;
679 complete(vfork);
680 }
681 task_unlock(tsk);
682}
683
684static int wait_for_vfork_done(struct task_struct *child,
685 struct completion *vfork)
686{
687 int killed;
688
689 freezer_do_not_count();
690 killed = wait_for_completion_killable(vfork);
691 freezer_count();
692
693 if (killed) {
694 task_lock(child);
695 child->vfork_done = NULL;
696 task_unlock(child);
697 }
698
699 put_task_struct(child);
700 return killed;
701}
702
670/* Please note the differences between mmput and mm_release. 703/* Please note the differences between mmput and mm_release.
671 * mmput is called whenever we stop holding onto a mm_struct, 704 * mmput is called whenever we stop holding onto a mm_struct,
672 * error success whatever. 705 * error success whatever.
@@ -682,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
682 */ 715 */
683void mm_release(struct task_struct *tsk, struct mm_struct *mm) 716void mm_release(struct task_struct *tsk, struct mm_struct *mm)
684{ 717{
685 struct completion *vfork_done = tsk->vfork_done;
686
687 /* Get rid of any futexes when releasing the mm */ 718 /* Get rid of any futexes when releasing the mm */
688#ifdef CONFIG_FUTEX 719#ifdef CONFIG_FUTEX
689 if (unlikely(tsk->robust_list)) { 720 if (unlikely(tsk->robust_list)) {
@@ -703,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
703 /* Get rid of any cached register state */ 734 /* Get rid of any cached register state */
704 deactivate_mm(tsk, mm); 735 deactivate_mm(tsk, mm);
705 736
706 /* notify parent sleeping on vfork() */ 737 if (tsk->vfork_done)
707 if (vfork_done) { 738 complete_vfork_done(tsk);
708 tsk->vfork_done = NULL;
709 complete(vfork_done);
710 }
711 739
712 /* 740 /*
713 * If we're exiting normally, clear a user-space tid field if 741 * If we're exiting normally, clear a user-space tid field if
714 * requested. We leave this alone when dying by signal, to leave 742 * requested. We leave this alone when dying by signal, to leave
715 * the value intact in a core dump, and to save the unnecessary 743 * the value intact in a core dump, and to save the unnecessary
716 * trouble otherwise. Userland only wants this done for a sys_exit. 744 * trouble, say, a killed vfork parent shouldn't touch this mm.
745 * Userland only wants this done for a sys_exit.
717 */ 746 */
718 if (tsk->clear_child_tid) { 747 if (tsk->clear_child_tid) {
719 if (!(tsk->flags & PF_SIGNALED) && 748 if (!(tsk->flags & PF_SIGNALED) &&
@@ -935,8 +964,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
935 964
936void __cleanup_sighand(struct sighand_struct *sighand) 965void __cleanup_sighand(struct sighand_struct *sighand)
937{ 966{
938 if (atomic_dec_and_test(&sighand->count)) 967 if (atomic_dec_and_test(&sighand->count)) {
968 signalfd_cleanup(sighand);
939 kmem_cache_free(sighand_cachep, sighand); 969 kmem_cache_free(sighand_cachep, sighand);
970 }
940} 971}
941 972
942 973
@@ -1015,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1015 1046
1016 new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); 1047 new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1017 new_flags |= PF_FORKNOEXEC; 1048 new_flags |= PF_FORKNOEXEC;
1018 new_flags |= PF_STARTING;
1019 p->flags = new_flags; 1049 p->flags = new_flags;
1020} 1050}
1021 1051
@@ -1545,16 +1575,9 @@ long do_fork(unsigned long clone_flags,
1545 if (clone_flags & CLONE_VFORK) { 1575 if (clone_flags & CLONE_VFORK) {
1546 p->vfork_done = &vfork; 1576 p->vfork_done = &vfork;
1547 init_completion(&vfork); 1577 init_completion(&vfork);
1578 get_task_struct(p);
1548 } 1579 }
1549 1580
1550 /*
1551 * We set PF_STARTING at creation in case tracing wants to
1552 * use this to distinguish a fully live task from one that
1553 * hasn't finished SIGSTOP raising yet. Now we clear it
1554 * and set the child going.
1555 */
1556 p->flags &= ~PF_STARTING;
1557
1558 wake_up_new_task(p); 1581 wake_up_new_task(p);
1559 1582
1560 /* forking complete and child started to run, tell ptracer */ 1583 /* forking complete and child started to run, tell ptracer */
@@ -1562,10 +1585,8 @@ long do_fork(unsigned long clone_flags,
1562 ptrace_event(trace, nr); 1585 ptrace_event(trace, nr);
1563 1586
1564 if (clone_flags & CLONE_VFORK) { 1587 if (clone_flags & CLONE_VFORK) {
1565 freezer_do_not_count(); 1588 if (!wait_for_vfork_done(p, &vfork))
1566 wait_for_completion(&vfork); 1589 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1567 freezer_count();
1568 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1569 } 1590 }
1570 } else { 1591 } else {
1571 nr = PTR_ERR(p); 1592 nr = PTR_ERR(p);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 2e48ec0c2e91..c21449f85a2a 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
119 * For preemptible RCU it is sufficient to call rcu_read_unlock in order 119 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
120 * to exit the grace period. For classic RCU, a reschedule is required. 120 * to exit the grace period. For classic RCU, a reschedule is required.
121 */ 121 */
122static void rcu_lock_break(struct task_struct *g, struct task_struct *t) 122static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
123{ 123{
124 bool can_cont;
125
124 get_task_struct(g); 126 get_task_struct(g);
125 get_task_struct(t); 127 get_task_struct(t);
126 rcu_read_unlock(); 128 rcu_read_unlock();
127 cond_resched(); 129 cond_resched();
128 rcu_read_lock(); 130 rcu_read_lock();
131 can_cont = pid_alive(g) && pid_alive(t);
129 put_task_struct(t); 132 put_task_struct(t);
130 put_task_struct(g); 133 put_task_struct(g);
134
135 return can_cont;
131} 136}
132 137
133/* 138/*
@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
154 goto unlock; 159 goto unlock;
155 if (!--batch_count) { 160 if (!--batch_count) {
156 batch_count = HUNG_TASK_BATCHING; 161 batch_count = HUNG_TASK_BATCHING;
157 rcu_lock_break(g, t); 162 if (!rcu_lock_break(g, t))
158 /* Exit if t or g was unhashed during refresh. */
159 if (t->state == TASK_DEAD || g->state == TASK_DEAD)
160 goto unlock; 163 goto unlock;
161 } 164 }
162 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ 165 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 342d8f44e401..0119b9d467ae 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void)
53 if (desc->irq_data.chip->irq_set_type) 53 if (desc->irq_data.chip->irq_set_type)
54 desc->irq_data.chip->irq_set_type(&desc->irq_data, 54 desc->irq_data.chip->irq_set_type(&desc->irq_data,
55 IRQ_TYPE_PROBE); 55 IRQ_TYPE_PROBE);
56 irq_startup(desc); 56 irq_startup(desc, false);
57 } 57 }
58 raw_spin_unlock_irq(&desc->lock); 58 raw_spin_unlock_irq(&desc->lock);
59 } 59 }
@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void)
70 raw_spin_lock_irq(&desc->lock); 70 raw_spin_lock_irq(&desc->lock);
71 if (!desc->action && irq_settings_can_probe(desc)) { 71 if (!desc->action && irq_settings_can_probe(desc)) {
72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; 72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
73 if (irq_startup(desc)) 73 if (irq_startup(desc, false))
74 desc->istate |= IRQS_PENDING; 74 desc->istate |= IRQS_PENDING;
75 } 75 }
76 raw_spin_unlock_irq(&desc->lock); 76 raw_spin_unlock_irq(&desc->lock);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f7c543a801d9..fb7db75ee0c8 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -157,19 +157,22 @@ static void irq_state_set_masked(struct irq_desc *desc)
157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
158} 158}
159 159
160int irq_startup(struct irq_desc *desc) 160int irq_startup(struct irq_desc *desc, bool resend)
161{ 161{
162 int ret = 0;
163
162 irq_state_clr_disabled(desc); 164 irq_state_clr_disabled(desc);
163 desc->depth = 0; 165 desc->depth = 0;
164 166
165 if (desc->irq_data.chip->irq_startup) { 167 if (desc->irq_data.chip->irq_startup) {
166 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 168 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
167 irq_state_clr_masked(desc); 169 irq_state_clr_masked(desc);
168 return ret; 170 } else {
171 irq_enable(desc);
169 } 172 }
170 173 if (resend)
171 irq_enable(desc); 174 check_irq_resend(desc, desc->irq_data.irq);
172 return 0; 175 return ret;
173} 176}
174 177
175void irq_shutdown(struct irq_desc *desc) 178void irq_shutdown(struct irq_desc *desc)
@@ -330,6 +333,24 @@ out_unlock:
330} 333}
331EXPORT_SYMBOL_GPL(handle_simple_irq); 334EXPORT_SYMBOL_GPL(handle_simple_irq);
332 335
336/*
337 * Called unconditionally from handle_level_irq() and only for oneshot
338 * interrupts from handle_fasteoi_irq()
339 */
340static void cond_unmask_irq(struct irq_desc *desc)
341{
342 /*
343 * We need to unmask in the following cases:
344 * - Standard level irq (IRQF_ONESHOT is not set)
345 * - Oneshot irq which did not wake the thread (caused by a
346 * spurious interrupt or a primary handler handling it
347 * completely).
348 */
349 if (!irqd_irq_disabled(&desc->irq_data) &&
350 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
351 unmask_irq(desc);
352}
353
333/** 354/**
334 * handle_level_irq - Level type irq handler 355 * handle_level_irq - Level type irq handler
335 * @irq: the interrupt number 356 * @irq: the interrupt number
@@ -362,8 +383,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
362 383
363 handle_irq_event(desc); 384 handle_irq_event(desc);
364 385
365 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) 386 cond_unmask_irq(desc);
366 unmask_irq(desc); 387
367out_unlock: 388out_unlock:
368 raw_spin_unlock(&desc->lock); 389 raw_spin_unlock(&desc->lock);
369} 390}
@@ -417,6 +438,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
417 preflow_handler(desc); 438 preflow_handler(desc);
418 handle_irq_event(desc); 439 handle_irq_event(desc);
419 440
441 if (desc->istate & IRQS_ONESHOT)
442 cond_unmask_irq(desc);
443
420out_eoi: 444out_eoi:
421 desc->irq_data.chip->irq_eoi(&desc->irq_data); 445 desc->irq_data.chip->irq_eoi(&desc->irq_data);
422out_unlock: 446out_unlock:
@@ -625,7 +649,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
625 irq_settings_set_noprobe(desc); 649 irq_settings_set_noprobe(desc);
626 irq_settings_set_norequest(desc); 650 irq_settings_set_norequest(desc);
627 irq_settings_set_nothread(desc); 651 irq_settings_set_nothread(desc);
628 irq_startup(desc); 652 irq_startup(desc, true);
629 } 653 }
630out: 654out:
631 irq_put_desc_busunlock(desc, flags); 655 irq_put_desc_busunlock(desc, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b7952316016a..40378ff877e7 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
67extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 67extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
68extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 68extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
69 69
70extern int irq_startup(struct irq_desc *desc); 70extern int irq_startup(struct irq_desc *desc, bool resend);
71extern void irq_shutdown(struct irq_desc *desc); 71extern void irq_shutdown(struct irq_desc *desc);
72extern void irq_enable(struct irq_desc *desc); 72extern void irq_enable(struct irq_desc *desc);
73extern void irq_disable(struct irq_desc *desc); 73extern void irq_disable(struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a9a9dbe49fea..0f0d4704ddd8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -985,6 +985,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
985 985
986 /* add new interrupt at end of irq queue */ 986 /* add new interrupt at end of irq queue */
987 do { 987 do {
988 /*
989 * Or all existing action->thread_mask bits,
990 * so we can find the next zero bit for this
991 * new action.
992 */
988 thread_mask |= old->thread_mask; 993 thread_mask |= old->thread_mask;
989 old_ptr = &old->next; 994 old_ptr = &old->next;
990 old = *old_ptr; 995 old = *old_ptr;
@@ -993,14 +998,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
993 } 998 }
994 999
995 /* 1000 /*
996 * Setup the thread mask for this irqaction. Unlikely to have 1001 * Setup the thread mask for this irqaction for ONESHOT. For
997 * 32 resp 64 irqs sharing one line, but who knows. 1002 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1003 * conditional in irq_wake_thread().
998 */ 1004 */
999 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { 1005 if (new->flags & IRQF_ONESHOT) {
1000 ret = -EBUSY; 1006 /*
1001 goto out_mask; 1007 * Unlikely to have 32 resp 64 irqs sharing one line,
1008 * but who knows.
1009 */
1010 if (thread_mask == ~0UL) {
1011 ret = -EBUSY;
1012 goto out_mask;
1013 }
1014 /*
1015 * The thread_mask for the action is or'ed to
1016 * desc->thread_active to indicate that the
1017 * IRQF_ONESHOT thread handler has been woken, but not
1018 * yet finished. The bit is cleared when a thread
1019 * completes. When all threads of a shared interrupt
1020 * line have completed desc->threads_active becomes
1021 * zero and the interrupt line is unmasked. See
1022 * handle.c:irq_wake_thread() for further information.
1023 *
1024 * If no thread is woken by primary (hard irq context)
1025 * interrupt handlers, then desc->threads_active is
1026 * also checked for zero to unmask the irq line in the
1027 * affected hard irq flow handlers
1028 * (handle_[fasteoi|level]_irq).
1029 *
1030 * The new action gets the first zero bit of
1031 * thread_mask assigned. See the loop above which or's
1032 * all existing action->thread_mask bits.
1033 */
1034 new->thread_mask = 1 << ffz(thread_mask);
1002 } 1035 }
1003 new->thread_mask = 1 << ffz(thread_mask);
1004 1036
1005 if (!shared) { 1037 if (!shared) {
1006 init_waitqueue_head(&desc->wait_for_threads); 1038 init_waitqueue_head(&desc->wait_for_threads);
@@ -1027,7 +1059,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1027 desc->istate |= IRQS_ONESHOT; 1059 desc->istate |= IRQS_ONESHOT;
1028 1060
1029 if (irq_settings_can_autoenable(desc)) 1061 if (irq_settings_can_autoenable(desc))
1030 irq_startup(desc); 1062 irq_startup(desc, true);
1031 else 1063 else
1032 /* Undo nested disables: */ 1064 /* Undo nested disables: */
1033 desc->depth = 1; 1065 desc->depth = 1;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9788c0ec6f43..c62b8546cc90 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
1334 if (!kernel_text_address((unsigned long) p->addr) || 1334 if (!kernel_text_address((unsigned long) p->addr) ||
1335 in_kprobes_functions((unsigned long) p->addr) || 1335 in_kprobes_functions((unsigned long) p->addr) ||
1336 ftrace_text_reserved(p->addr, p->addr) || 1336 ftrace_text_reserved(p->addr, p->addr) ||
1337 jump_label_text_reserved(p->addr, p->addr)) 1337 jump_label_text_reserved(p->addr, p->addr)) {
1338 goto fail_with_jump_label; 1338 ret = -EINVAL;
1339 goto cannot_probe;
1340 }
1339 1341
1340 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1342 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1341 p->flags &= KPROBE_FLAG_DISABLED; 1343 p->flags &= KPROBE_FLAG_DISABLED;
@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
1352 * its code to prohibit unexpected unloading. 1354 * its code to prohibit unexpected unloading.
1353 */ 1355 */
1354 if (unlikely(!try_module_get(probed_mod))) 1356 if (unlikely(!try_module_get(probed_mod)))
1355 goto fail_with_jump_label; 1357 goto cannot_probe;
1356 1358
1357 /* 1359 /*
1358 * If the module freed .init.text, we couldn't insert 1360 * If the module freed .init.text, we couldn't insert
@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
1361 if (within_module_init((unsigned long)p->addr, probed_mod) && 1363 if (within_module_init((unsigned long)p->addr, probed_mod) &&
1362 probed_mod->state != MODULE_STATE_COMING) { 1364 probed_mod->state != MODULE_STATE_COMING) {
1363 module_put(probed_mod); 1365 module_put(probed_mod);
1364 goto fail_with_jump_label; 1366 goto cannot_probe;
1365 } 1367 }
1366 /* ret will be updated by following code */ 1368 /* ret will be updated by following code */
1367 } 1369 }
@@ -1409,7 +1411,7 @@ out:
1409 1411
1410 return ret; 1412 return ret;
1411 1413
1412fail_with_jump_label: 1414cannot_probe:
1413 preempt_enable(); 1415 preempt_enable();
1414 jump_label_unlock(); 1416 jump_label_unlock();
1415 return ret; 1417 return ret;
diff --git a/kernel/pid.c b/kernel/pid.c
index ce8e00deaccb..9f08dfabaf13 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -543,12 +543,12 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
543 */ 543 */
544void __init pidhash_init(void) 544void __init pidhash_init(void)
545{ 545{
546 int i, pidhash_size; 546 unsigned int i, pidhash_size;
547 547
548 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, 548 pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
549 HASH_EARLY | HASH_SMALL, 549 HASH_EARLY | HASH_SMALL,
550 &pidhash_shift, NULL, 4096); 550 &pidhash_shift, NULL, 4096);
551 pidhash_size = 1 << pidhash_shift; 551 pidhash_size = 1U << pidhash_shift;
552 552
553 for (i = 0; i < pidhash_size; i++) 553 for (i = 0; i < pidhash_size; i++)
554 INIT_HLIST_HEAD(&pid_hash[i]); 554 INIT_HLIST_HEAD(&pid_hash[i]);
diff --git a/kernel/printk.c b/kernel/printk.c
index 13c0a1143f49..32690a0b7a18 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -702,6 +702,9 @@ static bool printk_time = 0;
702#endif 702#endif
703module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); 703module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
704 704
705static bool always_kmsg_dump;
706module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
707
705/* Check if we have any console registered that can be called early in boot. */ 708/* Check if we have any console registered that can be called early in boot. */
706static int have_callable_console(void) 709static int have_callable_console(void)
707{ 710{
@@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)
1732 unsigned long l1, l2; 1735 unsigned long l1, l2;
1733 unsigned long flags; 1736 unsigned long flags;
1734 1737
1738 if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
1739 return;
1740
1735 /* Theoretically, the log could move on after we do this, but 1741 /* Theoretically, the log could move on after we do this, but
1736 there's not a lot we can do about that. The new messages 1742 there's not a lot we can do about that. The new messages
1737 will overwrite the start of what we dump. */ 1743 will overwrite the start of what we dump. */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5255c9d2e053..b342f57879e6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1932,7 +1932,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1932 local_irq_enable(); 1932 local_irq_enable();
1933#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1933#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1934 finish_lock_switch(rq, prev); 1934 finish_lock_switch(rq, prev);
1935 trace_sched_stat_sleeptime(current, rq->clock);
1936 1935
1937 fire_sched_in_preempt_notifiers(current); 1936 fire_sched_in_preempt_notifiers(current);
1938 if (mm) 1937 if (mm)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7c6414fc669d..aca16b843b7e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1003,6 +1003,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1003 if (unlikely(delta > se->statistics.sleep_max)) 1003 if (unlikely(delta > se->statistics.sleep_max))
1004 se->statistics.sleep_max = delta; 1004 se->statistics.sleep_max = delta;
1005 1005
1006 se->statistics.sleep_start = 0;
1006 se->statistics.sum_sleep_runtime += delta; 1007 se->statistics.sum_sleep_runtime += delta;
1007 1008
1008 if (tsk) { 1009 if (tsk) {
@@ -1019,6 +1020,7 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
1019 if (unlikely(delta > se->statistics.block_max)) 1020 if (unlikely(delta > se->statistics.block_max))
1020 se->statistics.block_max = delta; 1021 se->statistics.block_max = delta;
1021 1022
1023 se->statistics.block_start = 0;
1022 se->statistics.sum_sleep_runtime += delta; 1024 se->statistics.sum_sleep_runtime += delta;
1023 1025
1024 if (tsk) { 1026 if (tsk) {