aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kexec_core.c1
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/printk/internal.h2
-rw-r--r--kernel/printk/nmi.c39
-rw-r--r--kernel/printk/printk.c2
5 files changed, 47 insertions, 3 deletions
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 1c03dfb4abfd..d5d408252992 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -893,6 +893,7 @@ void crash_kexec(struct pt_regs *regs)
893 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); 893 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
894 if (old_cpu == PANIC_CPU_INVALID) { 894 if (old_cpu == PANIC_CPU_INVALID) {
895 /* This is the 1st CPU which comes here, so go ahead. */ 895 /* This is the 1st CPU which comes here, so go ahead. */
896 printk_nmi_flush_on_panic();
896 __crash_kexec(regs); 897 __crash_kexec(regs);
897 898
898 /* 899 /*
diff --git a/kernel/panic.c b/kernel/panic.c
index 535c96510a44..8aa74497cc5a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -160,8 +160,10 @@ void panic(const char *fmt, ...)
160 * 160 *
161 * Bypass the panic_cpu check and call __crash_kexec directly. 161 * Bypass the panic_cpu check and call __crash_kexec directly.
162 */ 162 */
163 if (!crash_kexec_post_notifiers) 163 if (!crash_kexec_post_notifiers) {
164 printk_nmi_flush_on_panic();
164 __crash_kexec(NULL); 165 __crash_kexec(NULL);
166 }
165 167
166 /* 168 /*
167 * Note smp_send_stop is the usual smp shutdown function, which 169 * Note smp_send_stop is the usual smp shutdown function, which
@@ -176,6 +178,8 @@ void panic(const char *fmt, ...)
176 */ 178 */
177 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 179 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
178 180
181 /* Call flush even twice. It tries harder with a single online CPU */
182 printk_nmi_flush_on_panic();
179 kmsg_dump(KMSG_DUMP_PANIC); 183 kmsg_dump(KMSG_DUMP_PANIC);
180 184
181 /* 185 /*
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 341bedccc065..7fd2838fa417 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -22,6 +22,8 @@ int __printf(1, 0) vprintk_default(const char *fmt, va_list args);
22 22
23#ifdef CONFIG_PRINTK_NMI 23#ifdef CONFIG_PRINTK_NMI
24 24
25extern raw_spinlock_t logbuf_lock;
26
25/* 27/*
26 * printk() could not take logbuf_lock in NMI context. Instead, 28 * printk() could not take logbuf_lock in NMI context. Instead,
27 * it temporary stores the strings into a per-CPU buffer. 29 * it temporary stores the strings into a per-CPU buffer.
diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
index bf08557d7e3d..b69eb8a2876f 100644
--- a/kernel/printk/nmi.c
+++ b/kernel/printk/nmi.c
@@ -17,6 +17,7 @@
17 17
18#include <linux/preempt.h> 18#include <linux/preempt.h>
19#include <linux/spinlock.h> 19#include <linux/spinlock.h>
20#include <linux/debug_locks.h>
20#include <linux/smp.h> 21#include <linux/smp.h>
21#include <linux/cpumask.h> 22#include <linux/cpumask.h>
22#include <linux/irq_work.h> 23#include <linux/irq_work.h>
@@ -106,7 +107,16 @@ static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
106{ 107{
107 const char *buf = s->buffer + start; 108 const char *buf = s->buffer + start;
108 109
109 printk("%.*s", (end - start) + 1, buf); 110 /*
111 * The buffers are flushed in NMI only on panic. The messages must
112 * go only into the ring buffer at this stage. Consoles will get
113 * explicitly called later when a crashdump is not generated.
114 */
115 if (in_nmi())
116 printk_deferred("%.*s", (end - start) + 1, buf);
117 else
118 printk("%.*s", (end - start) + 1, buf);
119
110} 120}
111 121
112/* 122/*
@@ -194,6 +204,33 @@ void printk_nmi_flush(void)
194 __printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work); 204 __printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work);
195} 205}
196 206
207/**
208 * printk_nmi_flush_on_panic - flush all per-cpu nmi buffers when the system
209 * goes down.
210 *
211 * Similar to printk_nmi_flush() but it can be called even in NMI context when
212 * the system goes down. It does the best effort to get NMI messages into
213 * the main ring buffer.
214 *
215 * Note that it could try harder when there is only one CPU online.
216 */
217void printk_nmi_flush_on_panic(void)
218{
219 /*
220 * Make sure that we could access the main ring buffer.
221 * Do not risk a double release when more CPUs are up.
222 */
223 if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) {
224 if (num_online_cpus() > 1)
225 return;
226
227 debug_locks_off();
228 raw_spin_lock_init(&logbuf_lock);
229 }
230
231 printk_nmi_flush();
232}
233
197void __init printk_nmi_init(void) 234void __init printk_nmi_init(void)
198{ 235{
199 int cpu; 236 int cpu;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index e38579d730f4..60cdf6386763 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -245,7 +245,7 @@ __packed __aligned(4)
245 * within the scheduler's rq lock. It must be released before calling 245 * within the scheduler's rq lock. It must be released before calling
246 * console_unlock() or anything else that might wake up a process. 246 * console_unlock() or anything else that might wake up a process.
247 */ 247 */
248static DEFINE_RAW_SPINLOCK(logbuf_lock); 248DEFINE_RAW_SPINLOCK(logbuf_lock);
249 249
250#ifdef CONFIG_PRINTK 250#ifdef CONFIG_PRINTK
251DECLARE_WAIT_QUEUE_HEAD(log_wait); 251DECLARE_WAIT_QUEUE_HEAD(log_wait);