diff options
| -rw-r--r-- | kernel/printk/internal.h | 6 | ||||
| -rw-r--r-- | kernel/printk/printk.c | 19 | ||||
| -rw-r--r-- | kernel/printk/printk_safe.c | 36 | ||||
| -rw-r--r-- | lib/nmi_backtrace.c | 3 |
4 files changed, 50 insertions, 14 deletions
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h index 1db044f808b7..2a7d04049af4 100644 --- a/kernel/printk/internal.h +++ b/kernel/printk/internal.h | |||
| @@ -18,12 +18,14 @@ | |||
| 18 | 18 | ||
| 19 | #ifdef CONFIG_PRINTK | 19 | #ifdef CONFIG_PRINTK |
| 20 | 20 | ||
| 21 | #define PRINTK_SAFE_CONTEXT_MASK 0x7fffffff | 21 | #define PRINTK_SAFE_CONTEXT_MASK 0x3fffffff |
| 22 | #define PRINTK_NMI_CONTEXT_MASK 0x80000000 | 22 | #define PRINTK_NMI_DEFERRED_CONTEXT_MASK 0x40000000 |
| 23 | #define PRINTK_NMI_CONTEXT_MASK 0x80000000 | ||
| 23 | 24 | ||
| 24 | extern raw_spinlock_t logbuf_lock; | 25 | extern raw_spinlock_t logbuf_lock; |
| 25 | 26 | ||
| 26 | __printf(1, 0) int vprintk_default(const char *fmt, va_list args); | 27 | __printf(1, 0) int vprintk_default(const char *fmt, va_list args); |
| 28 | __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args); | ||
| 27 | __printf(1, 0) int vprintk_func(const char *fmt, va_list args); | 29 | __printf(1, 0) int vprintk_func(const char *fmt, va_list args); |
| 28 | void __printk_safe_enter(void); | 30 | void __printk_safe_enter(void); |
| 29 | void __printk_safe_exit(void); | 31 | void __printk_safe_exit(void); |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index bd53ea579dc8..fc47863f629c 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
| @@ -2720,16 +2720,13 @@ void wake_up_klogd(void) | |||
| 2720 | preempt_enable(); | 2720 | preempt_enable(); |
| 2721 | } | 2721 | } |
| 2722 | 2722 | ||
| 2723 | int printk_deferred(const char *fmt, ...) | 2723 | int vprintk_deferred(const char *fmt, va_list args) |
| 2724 | { | 2724 | { |
| 2725 | va_list args; | ||
| 2726 | int r; | 2725 | int r; |
| 2727 | 2726 | ||
| 2728 | preempt_disable(); | ||
| 2729 | va_start(args, fmt); | ||
| 2730 | r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); | 2727 | r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args); |
| 2731 | va_end(args); | ||
| 2732 | 2728 | ||
| 2729 | preempt_disable(); | ||
| 2733 | __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); | 2730 | __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); |
| 2734 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); | 2731 | irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); |
| 2735 | preempt_enable(); | 2732 | preempt_enable(); |
| @@ -2737,6 +2734,18 @@ int printk_deferred(const char *fmt, ...) | |||
| 2737 | return r; | 2734 | return r; |
| 2738 | } | 2735 | } |
| 2739 | 2736 | ||
| 2737 | int printk_deferred(const char *fmt, ...) | ||
| 2738 | { | ||
| 2739 | va_list args; | ||
| 2740 | int r; | ||
| 2741 | |||
| 2742 | va_start(args, fmt); | ||
| 2743 | r = vprintk_deferred(fmt, args); | ||
| 2744 | va_end(args); | ||
| 2745 | |||
| 2746 | return r; | ||
| 2747 | } | ||
| 2748 | |||
| 2740 | /* | 2749 | /* |
| 2741 | * printk rate limiting, lifted from the networking subsystem. | 2750 | * printk rate limiting, lifted from the networking subsystem. |
| 2742 | * | 2751 | * |
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 033e50a7d706..3cdaeaef9ce1 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c | |||
| @@ -80,8 +80,8 @@ static void queue_flush_work(struct printk_safe_seq_buf *s) | |||
| 80 | * happen, printk_safe_log_store() will notice the buffer->len mismatch | 80 | * happen, printk_safe_log_store() will notice the buffer->len mismatch |
| 81 | * and repeat the write. | 81 | * and repeat the write. |
| 82 | */ | 82 | */ |
| 83 | static int printk_safe_log_store(struct printk_safe_seq_buf *s, | 83 | static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s, |
| 84 | const char *fmt, va_list args) | 84 | const char *fmt, va_list args) |
| 85 | { | 85 | { |
| 86 | int add; | 86 | int add; |
| 87 | size_t len; | 87 | size_t len; |
| @@ -299,7 +299,7 @@ void printk_safe_flush_on_panic(void) | |||
| 299 | * one writer running. But the buffer might get flushed from another | 299 | * one writer running. But the buffer might get flushed from another |
| 300 | * CPU, so we need to be careful. | 300 | * CPU, so we need to be careful. |
| 301 | */ | 301 | */ |
| 302 | static int vprintk_nmi(const char *fmt, va_list args) | 302 | static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) |
| 303 | { | 303 | { |
| 304 | struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); | 304 | struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); |
| 305 | 305 | ||
| @@ -308,17 +308,29 @@ static int vprintk_nmi(const char *fmt, va_list args) | |||
| 308 | 308 | ||
| 309 | void printk_nmi_enter(void) | 309 | void printk_nmi_enter(void) |
| 310 | { | 310 | { |
| 311 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); | 311 | /* |
| 312 | * The size of the extra per-CPU buffer is limited. Use it only when | ||
| 313 | * the main one is locked. If this CPU is not in the safe context, | ||
| 314 | * the lock must be taken on another CPU and we could wait for it. | ||
| 315 | */ | ||
| 316 | if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) && | ||
| 317 | raw_spin_is_locked(&logbuf_lock)) { | ||
| 318 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); | ||
| 319 | } else { | ||
| 320 | this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK); | ||
| 321 | } | ||
| 312 | } | 322 | } |
| 313 | 323 | ||
| 314 | void printk_nmi_exit(void) | 324 | void printk_nmi_exit(void) |
| 315 | { | 325 | { |
| 316 | this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); | 326 | this_cpu_and(printk_context, |
| 327 | ~(PRINTK_NMI_CONTEXT_MASK | | ||
| 328 | PRINTK_NMI_DEFERRED_CONTEXT_MASK)); | ||
| 317 | } | 329 | } |
| 318 | 330 | ||
| 319 | #else | 331 | #else |
| 320 | 332 | ||
| 321 | static int vprintk_nmi(const char *fmt, va_list args) | 333 | static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) |
| 322 | { | 334 | { |
| 323 | return 0; | 335 | return 0; |
| 324 | } | 336 | } |
| @@ -330,7 +342,7 @@ static int vprintk_nmi(const char *fmt, va_list args) | |||
| 330 | * into itself. It uses a per-CPU buffer to store the message, just like | 342 | * into itself. It uses a per-CPU buffer to store the message, just like |
| 331 | * NMI. | 343 | * NMI. |
| 332 | */ | 344 | */ |
| 333 | static int vprintk_safe(const char *fmt, va_list args) | 345 | static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args) |
| 334 | { | 346 | { |
| 335 | struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq); | 347 | struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq); |
| 336 | 348 | ||
| @@ -351,12 +363,22 @@ void __printk_safe_exit(void) | |||
| 351 | 363 | ||
| 352 | __printf(1, 0) int vprintk_func(const char *fmt, va_list args) | 364 | __printf(1, 0) int vprintk_func(const char *fmt, va_list args) |
| 353 | { | 365 | { |
| 366 | /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ | ||
| 354 | if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) | 367 | if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) |
| 355 | return vprintk_nmi(fmt, args); | 368 | return vprintk_nmi(fmt, args); |
| 356 | 369 | ||
| 370 | /* Use extra buffer to prevent a recursion deadlock in safe mode. */ | ||
| 357 | if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) | 371 | if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) |
| 358 | return vprintk_safe(fmt, args); | 372 | return vprintk_safe(fmt, args); |
| 359 | 373 | ||
| 374 | /* | ||
| 375 | * Use the main logbuf when logbuf_lock is available in NMI. | ||
| 376 | * But avoid calling console drivers that might have their own locks. | ||
| 377 | */ | ||
| 378 | if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK) | ||
| 379 | return vprintk_deferred(fmt, args); | ||
| 380 | |||
| 381 | /* No obstacles. */ | ||
| 360 | return vprintk_default(fmt, args); | 382 | return vprintk_default(fmt, args); |
| 361 | } | 383 | } |
| 362 | 384 | ||
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 4e8a30d1c22f..0bc0a3535a8a 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c | |||
| @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, | |||
| 86 | 86 | ||
| 87 | bool nmi_cpu_backtrace(struct pt_regs *regs) | 87 | bool nmi_cpu_backtrace(struct pt_regs *regs) |
| 88 | { | 88 | { |
| 89 | static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; | ||
| 89 | int cpu = smp_processor_id(); | 90 | int cpu = smp_processor_id(); |
| 90 | 91 | ||
| 91 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | 92 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
| 93 | arch_spin_lock(&lock); | ||
| 92 | if (regs && cpu_in_idle(instruction_pointer(regs))) { | 94 | if (regs && cpu_in_idle(instruction_pointer(regs))) { |
| 93 | pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", | 95 | pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", |
| 94 | cpu, instruction_pointer(regs)); | 96 | cpu, instruction_pointer(regs)); |
| @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) | |||
| 99 | else | 101 | else |
| 100 | dump_stack(); | 102 | dump_stack(); |
| 101 | } | 103 | } |
| 104 | arch_spin_unlock(&lock); | ||
| 102 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 105 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
| 103 | return true; | 106 | return true; |
| 104 | } | 107 | } |
