diff options
-rw-r--r-- | include/linux/printk.h | 12 | ||||
-rw-r--r-- | init/Kconfig | 16 | ||||
-rw-r--r-- | init/main.c | 2 | ||||
-rw-r--r-- | kernel/kexec_core.c | 2 | ||||
-rw-r--r-- | kernel/panic.c | 4 | ||||
-rw-r--r-- | kernel/printk/Makefile | 2 | ||||
-rw-r--r-- | kernel/printk/printk_safe.c (renamed from kernel/printk/nmi.c) | 64 | ||||
-rw-r--r-- | lib/nmi_backtrace.c | 2 |
8 files changed, 53 insertions, 51 deletions
diff --git a/include/linux/printk.h b/include/linux/printk.h index 3472cc6b7a60..37e933eeffb2 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -147,17 +147,17 @@ void early_printk(const char *s, ...) { } | |||
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | #ifdef CONFIG_PRINTK_NMI | 149 | #ifdef CONFIG_PRINTK_NMI |
150 | extern void printk_nmi_init(void); | 150 | extern void printk_safe_init(void); |
151 | extern void printk_nmi_enter(void); | 151 | extern void printk_nmi_enter(void); |
152 | extern void printk_nmi_exit(void); | 152 | extern void printk_nmi_exit(void); |
153 | extern void printk_nmi_flush(void); | 153 | extern void printk_safe_flush(void); |
154 | extern void printk_nmi_flush_on_panic(void); | 154 | extern void printk_safe_flush_on_panic(void); |
155 | #else | 155 | #else |
156 | static inline void printk_nmi_init(void) { } | 156 | static inline void printk_safe_init(void) { } |
157 | static inline void printk_nmi_enter(void) { } | 157 | static inline void printk_nmi_enter(void) { } |
158 | static inline void printk_nmi_exit(void) { } | 158 | static inline void printk_nmi_exit(void) { } |
159 | static inline void printk_nmi_flush(void) { } | 159 | static inline void printk_safe_flush(void) { } |
160 | static inline void printk_nmi_flush_on_panic(void) { } | 160 | static inline void printk_safe_flush_on_panic(void) { } |
161 | #endif /* PRINTK_NMI */ | 161 | #endif /* PRINTK_NMI */ |
162 | 162 | ||
163 | #ifdef CONFIG_PRINTK | 163 | #ifdef CONFIG_PRINTK |
diff --git a/init/Kconfig b/init/Kconfig index 223b734abccd..760b7d0bc9d7 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -875,17 +875,19 @@ config LOG_CPU_MAX_BUF_SHIFT | |||
875 | 13 => 8 KB for each CPU | 875 | 13 => 8 KB for each CPU |
876 | 12 => 4 KB for each CPU | 876 | 12 => 4 KB for each CPU |
877 | 877 | ||
878 | config NMI_LOG_BUF_SHIFT | 878 | config PRINTK_SAFE_LOG_BUF_SHIFT |
879 | int "Temporary per-CPU NMI log buffer size (12 => 4KB, 13 => 8KB)" | 879 | int "Temporary per-CPU printk log buffer size (12 => 4KB, 13 => 8KB)" |
880 | range 10 21 | 880 | range 10 21 |
881 | default 13 | 881 | default 13 |
882 | depends on PRINTK_NMI | 882 | depends on PRINTK |
883 | help | 883 | help |
884 | Select the size of a per-CPU buffer where NMI messages are temporary | 884 | Select the size of an alternate printk per-CPU buffer where messages |
885 | stored. They are copied to the main log buffer in a safe context | 885 | printed from usafe contexts are temporary stored. One example would |
886 | to avoid a deadlock. The value defines the size as a power of 2. | 886 | be NMI messages, another one - printk recursion. The messages are |
887 | copied to the main log buffer in a safe context to avoid a deadlock. | ||
888 | The value defines the size as a power of 2. | ||
887 | 889 | ||
888 | NMI messages are rare and limited. The largest one is when | 890 | Those messages are rare and limited. The largest one is when |
889 | a backtrace is printed. It usually fits into 4KB. Select | 891 | a backtrace is printed. It usually fits into 4KB. Select |
890 | 8KB if you want to be on the safe side. | 892 | 8KB if you want to be on the safe side. |
891 | 893 | ||
diff --git a/init/main.c b/init/main.c index b0c9d6facef9..b4ca17d9bdeb 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -580,7 +580,7 @@ asmlinkage __visible void __init start_kernel(void) | |||
580 | timekeeping_init(); | 580 | timekeeping_init(); |
581 | time_init(); | 581 | time_init(); |
582 | sched_clock_postinit(); | 582 | sched_clock_postinit(); |
583 | printk_nmi_init(); | 583 | printk_safe_init(); |
584 | perf_event_init(); | 584 | perf_event_init(); |
585 | profile_init(); | 585 | profile_init(); |
586 | call_function_init(); | 586 | call_function_init(); |
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 5617cc412444..14bb9eb76665 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c | |||
@@ -916,7 +916,7 @@ void crash_kexec(struct pt_regs *regs) | |||
916 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); | 916 | old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); |
917 | if (old_cpu == PANIC_CPU_INVALID) { | 917 | if (old_cpu == PANIC_CPU_INVALID) { |
918 | /* This is the 1st CPU which comes here, so go ahead. */ | 918 | /* This is the 1st CPU which comes here, so go ahead. */ |
919 | printk_nmi_flush_on_panic(); | 919 | printk_safe_flush_on_panic(); |
920 | __crash_kexec(regs); | 920 | __crash_kexec(regs); |
921 | 921 | ||
922 | /* | 922 | /* |
diff --git a/kernel/panic.c b/kernel/panic.c index c51edaa04fce..8c8efcd310e7 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -188,7 +188,7 @@ void panic(const char *fmt, ...) | |||
188 | * Bypass the panic_cpu check and call __crash_kexec directly. | 188 | * Bypass the panic_cpu check and call __crash_kexec directly. |
189 | */ | 189 | */ |
190 | if (!_crash_kexec_post_notifiers) { | 190 | if (!_crash_kexec_post_notifiers) { |
191 | printk_nmi_flush_on_panic(); | 191 | printk_safe_flush_on_panic(); |
192 | __crash_kexec(NULL); | 192 | __crash_kexec(NULL); |
193 | 193 | ||
194 | /* | 194 | /* |
@@ -213,7 +213,7 @@ void panic(const char *fmt, ...) | |||
213 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); | 213 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
214 | 214 | ||
215 | /* Call flush even twice. It tries harder with a single online CPU */ | 215 | /* Call flush even twice. It tries harder with a single online CPU */ |
216 | printk_nmi_flush_on_panic(); | 216 | printk_safe_flush_on_panic(); |
217 | kmsg_dump(KMSG_DUMP_PANIC); | 217 | kmsg_dump(KMSG_DUMP_PANIC); |
218 | 218 | ||
219 | /* | 219 | /* |
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile index abb0042a427b..607928119f26 100644 --- a/kernel/printk/Makefile +++ b/kernel/printk/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | obj-y = printk.o | 1 | obj-y = printk.o |
2 | obj-$(CONFIG_PRINTK_NMI) += nmi.o | 2 | obj-$(CONFIG_PRINTK_NMI) += printk_safe.o |
3 | obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o | 3 | obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o |
diff --git a/kernel/printk/nmi.c b/kernel/printk/printk_safe.c index f011aaef583c..fc80359dcd78 100644 --- a/kernel/printk/nmi.c +++ b/kernel/printk/printk_safe.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * nmi.c - Safe printk in NMI context | 2 | * printk_safe.c - Safe printk in NMI context |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
@@ -39,18 +39,18 @@ | |||
39 | * were handled or when IRQs are blocked. | 39 | * were handled or when IRQs are blocked. |
40 | */ | 40 | */ |
41 | DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default; | 41 | DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default; |
42 | static int printk_nmi_irq_ready; | 42 | static int printk_safe_irq_ready; |
43 | atomic_t nmi_message_lost; | 43 | atomic_t nmi_message_lost; |
44 | 44 | ||
45 | #define NMI_LOG_BUF_LEN ((1 << CONFIG_NMI_LOG_BUF_SHIFT) - \ | 45 | #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ |
46 | sizeof(atomic_t) - sizeof(struct irq_work)) | 46 | sizeof(atomic_t) - sizeof(struct irq_work)) |
47 | 47 | ||
48 | struct nmi_seq_buf { | 48 | struct printk_safe_seq_buf { |
49 | atomic_t len; /* length of written data */ | 49 | atomic_t len; /* length of written data */ |
50 | struct irq_work work; /* IRQ work that flushes the buffer */ | 50 | struct irq_work work; /* IRQ work that flushes the buffer */ |
51 | unsigned char buffer[NMI_LOG_BUF_LEN]; | 51 | unsigned char buffer[SAFE_LOG_BUF_LEN]; |
52 | }; | 52 | }; |
53 | static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); | 53 | static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * Safe printk() for NMI context. It uses a per-CPU buffer to | 56 | * Safe printk() for NMI context. It uses a per-CPU buffer to |
@@ -60,7 +60,7 @@ static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); | |||
60 | */ | 60 | */ |
61 | static int vprintk_nmi(const char *fmt, va_list args) | 61 | static int vprintk_nmi(const char *fmt, va_list args) |
62 | { | 62 | { |
63 | struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); | 63 | struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); |
64 | int add = 0; | 64 | int add = 0; |
65 | size_t len; | 65 | size_t len; |
66 | 66 | ||
@@ -91,7 +91,7 @@ again: | |||
91 | goto again; | 91 | goto again; |
92 | 92 | ||
93 | /* Get flushed in a more safe context. */ | 93 | /* Get flushed in a more safe context. */ |
94 | if (add && printk_nmi_irq_ready) { | 94 | if (add && printk_safe_irq_ready) { |
95 | /* Make sure that IRQ work is really initialized. */ | 95 | /* Make sure that IRQ work is really initialized. */ |
96 | smp_rmb(); | 96 | smp_rmb(); |
97 | irq_work_queue(&s->work); | 97 | irq_work_queue(&s->work); |
@@ -100,7 +100,7 @@ again: | |||
100 | return add; | 100 | return add; |
101 | } | 101 | } |
102 | 102 | ||
103 | static void printk_nmi_flush_line(const char *text, int len) | 103 | static void printk_safe_flush_line(const char *text, int len) |
104 | { | 104 | { |
105 | /* | 105 | /* |
106 | * The buffers are flushed in NMI only on panic. The messages must | 106 | * The buffers are flushed in NMI only on panic. The messages must |
@@ -111,11 +111,10 @@ static void printk_nmi_flush_line(const char *text, int len) | |||
111 | printk_deferred("%.*s", len, text); | 111 | printk_deferred("%.*s", len, text); |
112 | else | 112 | else |
113 | printk("%.*s", len, text); | 113 | printk("%.*s", len, text); |
114 | |||
115 | } | 114 | } |
116 | 115 | ||
117 | /* printk part of the temporary buffer line by line */ | 116 | /* printk part of the temporary buffer line by line */ |
118 | static int printk_nmi_flush_buffer(const char *start, size_t len) | 117 | static int printk_safe_flush_buffer(const char *start, size_t len) |
119 | { | 118 | { |
120 | const char *c, *end; | 119 | const char *c, *end; |
121 | bool header; | 120 | bool header; |
@@ -127,7 +126,7 @@ static int printk_nmi_flush_buffer(const char *start, size_t len) | |||
127 | /* Print line by line. */ | 126 | /* Print line by line. */ |
128 | while (c < end) { | 127 | while (c < end) { |
129 | if (*c == '\n') { | 128 | if (*c == '\n') { |
130 | printk_nmi_flush_line(start, c - start + 1); | 129 | printk_safe_flush_line(start, c - start + 1); |
131 | start = ++c; | 130 | start = ++c; |
132 | header = true; | 131 | header = true; |
133 | continue; | 132 | continue; |
@@ -140,7 +139,7 @@ static int printk_nmi_flush_buffer(const char *start, size_t len) | |||
140 | continue; | 139 | continue; |
141 | } | 140 | } |
142 | 141 | ||
143 | printk_nmi_flush_line(start, c - start); | 142 | printk_safe_flush_line(start, c - start); |
144 | start = c++; | 143 | start = c++; |
145 | header = true; | 144 | header = true; |
146 | continue; | 145 | continue; |
@@ -154,8 +153,8 @@ static int printk_nmi_flush_buffer(const char *start, size_t len) | |||
154 | if (start < end && !header) { | 153 | if (start < end && !header) { |
155 | static const char newline[] = KERN_CONT "\n"; | 154 | static const char newline[] = KERN_CONT "\n"; |
156 | 155 | ||
157 | printk_nmi_flush_line(start, end - start); | 156 | printk_safe_flush_line(start, end - start); |
158 | printk_nmi_flush_line(newline, strlen(newline)); | 157 | printk_safe_flush_line(newline, strlen(newline)); |
159 | } | 158 | } |
160 | 159 | ||
161 | return len; | 160 | return len; |
@@ -165,11 +164,12 @@ static int printk_nmi_flush_buffer(const char *start, size_t len) | |||
165 | * Flush data from the associated per_CPU buffer. The function | 164 | * Flush data from the associated per_CPU buffer. The function |
166 | * can be called either via IRQ work or independently. | 165 | * can be called either via IRQ work or independently. |
167 | */ | 166 | */ |
168 | static void __printk_nmi_flush(struct irq_work *work) | 167 | static void __printk_safe_flush(struct irq_work *work) |
169 | { | 168 | { |
170 | static raw_spinlock_t read_lock = | 169 | static raw_spinlock_t read_lock = |
171 | __RAW_SPIN_LOCK_INITIALIZER(read_lock); | 170 | __RAW_SPIN_LOCK_INITIALIZER(read_lock); |
172 | struct nmi_seq_buf *s = container_of(work, struct nmi_seq_buf, work); | 171 | struct printk_safe_seq_buf *s = |
172 | container_of(work, struct printk_safe_seq_buf, work); | ||
173 | unsigned long flags; | 173 | unsigned long flags; |
174 | size_t len; | 174 | size_t len; |
175 | int i; | 175 | int i; |
@@ -194,9 +194,9 @@ more: | |||
194 | * buffer size. | 194 | * buffer size. |
195 | */ | 195 | */ |
196 | if ((i && i >= len) || len > sizeof(s->buffer)) { | 196 | if ((i && i >= len) || len > sizeof(s->buffer)) { |
197 | const char *msg = "printk_nmi_flush: internal error\n"; | 197 | const char *msg = "printk_safe_flush: internal error\n"; |
198 | 198 | ||
199 | printk_nmi_flush_line(msg, strlen(msg)); | 199 | printk_safe_flush_line(msg, strlen(msg)); |
200 | len = 0; | 200 | len = 0; |
201 | } | 201 | } |
202 | 202 | ||
@@ -205,7 +205,7 @@ more: | |||
205 | 205 | ||
206 | /* Make sure that data has been written up to the @len */ | 206 | /* Make sure that data has been written up to the @len */ |
207 | smp_rmb(); | 207 | smp_rmb(); |
208 | i += printk_nmi_flush_buffer(s->buffer + i, len - i); | 208 | i += printk_safe_flush_buffer(s->buffer + i, len - i); |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * Check that nothing has got added in the meantime and truncate | 211 | * Check that nothing has got added in the meantime and truncate |
@@ -221,31 +221,31 @@ out: | |||
221 | } | 221 | } |
222 | 222 | ||
223 | /** | 223 | /** |
224 | * printk_nmi_flush - flush all per-cpu nmi buffers. | 224 | * printk_safe_flush - flush all per-cpu nmi buffers. |
225 | * | 225 | * |
226 | * The buffers are flushed automatically via IRQ work. This function | 226 | * The buffers are flushed automatically via IRQ work. This function |
227 | * is useful only when someone wants to be sure that all buffers have | 227 | * is useful only when someone wants to be sure that all buffers have |
228 | * been flushed at some point. | 228 | * been flushed at some point. |
229 | */ | 229 | */ |
230 | void printk_nmi_flush(void) | 230 | void printk_safe_flush(void) |
231 | { | 231 | { |
232 | int cpu; | 232 | int cpu; |
233 | 233 | ||
234 | for_each_possible_cpu(cpu) | 234 | for_each_possible_cpu(cpu) |
235 | __printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work); | 235 | __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work); |
236 | } | 236 | } |
237 | 237 | ||
238 | /** | 238 | /** |
239 | * printk_nmi_flush_on_panic - flush all per-cpu nmi buffers when the system | 239 | * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system |
240 | * goes down. | 240 | * goes down. |
241 | * | 241 | * |
242 | * Similar to printk_nmi_flush() but it can be called even in NMI context when | 242 | * Similar to printk_safe_flush() but it can be called even in NMI context when |
243 | * the system goes down. It does the best effort to get NMI messages into | 243 | * the system goes down. It does the best effort to get NMI messages into |
244 | * the main ring buffer. | 244 | * the main ring buffer. |
245 | * | 245 | * |
246 | * Note that it could try harder when there is only one CPU online. | 246 | * Note that it could try harder when there is only one CPU online. |
247 | */ | 247 | */ |
248 | void printk_nmi_flush_on_panic(void) | 248 | void printk_safe_flush_on_panic(void) |
249 | { | 249 | { |
250 | /* | 250 | /* |
251 | * Make sure that we could access the main ring buffer. | 251 | * Make sure that we could access the main ring buffer. |
@@ -259,25 +259,25 @@ void printk_nmi_flush_on_panic(void) | |||
259 | raw_spin_lock_init(&logbuf_lock); | 259 | raw_spin_lock_init(&logbuf_lock); |
260 | } | 260 | } |
261 | 261 | ||
262 | printk_nmi_flush(); | 262 | printk_safe_flush(); |
263 | } | 263 | } |
264 | 264 | ||
265 | void __init printk_nmi_init(void) | 265 | void __init printk_safe_init(void) |
266 | { | 266 | { |
267 | int cpu; | 267 | int cpu; |
268 | 268 | ||
269 | for_each_possible_cpu(cpu) { | 269 | for_each_possible_cpu(cpu) { |
270 | struct nmi_seq_buf *s = &per_cpu(nmi_print_seq, cpu); | 270 | struct printk_safe_seq_buf *s = &per_cpu(nmi_print_seq, cpu); |
271 | 271 | ||
272 | init_irq_work(&s->work, __printk_nmi_flush); | 272 | init_irq_work(&s->work, __printk_safe_flush); |
273 | } | 273 | } |
274 | 274 | ||
275 | /* Make sure that IRQ works are initialized before enabling. */ | 275 | /* Make sure that IRQ works are initialized before enabling. */ |
276 | smp_wmb(); | 276 | smp_wmb(); |
277 | printk_nmi_irq_ready = 1; | 277 | printk_safe_irq_ready = 1; |
278 | 278 | ||
279 | /* Flush pending messages that did not have scheduled IRQ works. */ | 279 | /* Flush pending messages that did not have scheduled IRQ works. */ |
280 | printk_nmi_flush(); | 280 | printk_safe_flush(); |
281 | } | 281 | } |
282 | 282 | ||
283 | void printk_nmi_enter(void) | 283 | void printk_nmi_enter(void) |
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 75554754eadf..5f7999eacad5 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c | |||
@@ -77,7 +77,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, | |||
77 | * Force flush any remote buffers that might be stuck in IRQ context | 77 | * Force flush any remote buffers that might be stuck in IRQ context |
78 | * and therefore could not run their irq_work. | 78 | * and therefore could not run their irq_work. |
79 | */ | 79 | */ |
80 | printk_nmi_flush(); | 80 | printk_safe_flush(); |
81 | 81 | ||
82 | clear_bit_unlock(0, &backtrace_flag); | 82 | clear_bit_unlock(0, &backtrace_flag); |
83 | put_cpu(); | 83 | put_cpu(); |