diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-10-17 06:34:32 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-10-29 14:04:31 -0400 |
commit | f77084d96355f5fba8e2c1fb3a51a393b1570de7 (patch) | |
tree | 34ccb8ee62b9656a1fe519dae572a947d4184997 | |
parent | 8af1909580595a303b03d5999e410d407b7a6db7 (diff) |
x86/mm/pat: Disable preemption around __flush_tlb_all()
The WARN_ON_ONCE(__read_cr3() != build_cr3()) in switch_mm_irqs_off()
triggers every once in a while during a snapshotted system upgrade.
The warning triggers since commit decab0888e6e ("x86/mm: Remove
preempt_disable/enable() from __native_flush_tlb()"). The callchain is:
get_page_from_freelist() -> post_alloc_hook() -> __kernel_map_pages()
with CONFIG_DEBUG_PAGEALLOC enabled.
Disable preemption during CR3 reset / __flush_tlb_all() and add a comment
why preemption has to be disabled so it won't be removed accidentaly.
Add another preemptible() check in __flush_tlb_all() to catch callers with
enabled preemption when PGE is enabled, because PGE enabled does not
trigger the warning in __native_flush_tlb(). Suggested by Andy Lutomirski.
Fixes: decab0888e6e ("x86/mm: Remove preempt_disable/enable() from __native_flush_tlb()")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20181017103432.zgv46nlu3hc7k4rq@linutronix.de
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 6 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 6 |
2 files changed, 11 insertions, 1 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 323a313947e0..d760611cfc35 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -453,6 +453,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr) | |||
453 | */ | 453 | */ |
454 | static inline void __flush_tlb_all(void) | 454 | static inline void __flush_tlb_all(void) |
455 | { | 455 | { |
456 | /* | ||
457 | * This is to catch users with enabled preemption and the PGE feature | ||
458 | * and don't trigger the warning in __native_flush_tlb(). | ||
459 | */ | ||
460 | VM_WARN_ON_ONCE(preemptible()); | ||
461 | |||
456 | if (boot_cpu_has(X86_FEATURE_PGE)) { | 462 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
457 | __flush_tlb_global(); | 463 | __flush_tlb_global(); |
458 | } else { | 464 | } else { |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 62bb30b4bd2a..a1004dec98ea 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -2309,9 +2309,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) | |||
2309 | 2309 | ||
2310 | /* | 2310 | /* |
2311 | * We should perform an IPI and flush all tlbs, | 2311 | * We should perform an IPI and flush all tlbs, |
2312 | * but that can deadlock->flush only current cpu: | 2312 | * but that can deadlock->flush only current cpu. |
2313 | * Preemption needs to be disabled around __flush_tlb_all() due to | ||
2314 | * CR3 reload in __native_flush_tlb(). | ||
2313 | */ | 2315 | */ |
2316 | preempt_disable(); | ||
2314 | __flush_tlb_all(); | 2317 | __flush_tlb_all(); |
2318 | preempt_enable(); | ||
2315 | 2319 | ||
2316 | arch_flush_lazy_mmu_mode(); | 2320 | arch_flush_lazy_mmu_mode(); |
2317 | } | 2321 | } |