diff options
author | Andy Lutomirski <luto@kernel.org> | 2017-09-17 12:03:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-09-17 12:59:08 -0400 |
commit | 52a2af400c1075219b3f0ce5c96fc961da44018a (patch) | |
tree | 0d51c712a3dbb9a18dd2adee5b786704fca48718 | |
parent | 47061a24e2ee5bd8a40d473d47a5bd823fa0081f (diff) |
x86/mm/64: Stop using CR3.PCID == 0 in ASID-aware code
Putting the logical ASID into CR3's PCID bits directly means that we
have two cases to consider separately: ASID == 0 and ASID != 0.
This means that bugs that only hit in one of these cases trigger
nondeterministically.
There were some bugs like this in the past, and I think there's
still one in current kernels. In particular, we have a number of
ASID-unware code paths that save CR3, write some special value, and
then restore CR3. This includes suspend/resume, hibernate, kexec,
EFI, and maybe other things I've missed. This is currently
dangerous: if ASID != 0, then this code sequence will leave garbage
in the TLB tagged for ASID 0. We could potentially see corruption
when switching back to ASID 0. In principle, an
initialize_tlbstate_and_flush() call after these sequences would
solve the problem, but EFI, at least, does not call this. (And it
probably shouldn't -- initialize_tlbstate_and_flush() is rather
expensive.)
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/cdc14bbe5d3c3ef2a562be09a6368ffe9bd947a6.1505663533.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 21 |
1 files changed, 19 insertions, 2 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index a999ba6b721f..c120b5db178a 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -286,14 +286,31 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, | |||
286 | return __pkru_allows_pkey(vma_pkey(vma), write); | 286 | return __pkru_allows_pkey(vma_pkey(vma), write); |
287 | } | 287 | } |
288 | 288 | ||
289 | /* | ||
290 | * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID | ||
291 | * bits. This serves two purposes. It prevents a nasty situation in | ||
292 | * which PCID-unaware code saves CR3, loads some other value (with PCID | ||
293 | * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if | ||
294 | * the saved ASID was nonzero. It also means that any bugs involving | ||
295 | * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger | ||
296 | * deterministically. | ||
297 | */ | ||
298 | |||
289 | static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) | 299 | static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) |
290 | { | 300 | { |
291 | return __sme_pa(mm->pgd) | asid; | 301 | if (static_cpu_has(X86_FEATURE_PCID)) { |
302 | VM_WARN_ON_ONCE(asid > 4094); | ||
303 | return __sme_pa(mm->pgd) | (asid + 1); | ||
304 | } else { | ||
305 | VM_WARN_ON_ONCE(asid != 0); | ||
306 | return __sme_pa(mm->pgd); | ||
307 | } | ||
292 | } | 308 | } |
293 | 309 | ||
294 | static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) | 310 | static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) |
295 | { | 311 | { |
296 | return __sme_pa(mm->pgd) | asid | CR3_NOFLUSH; | 312 | VM_WARN_ON_ONCE(asid > 4094); |
313 | return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; | ||
297 | } | 314 | } |
298 | 315 | ||
299 | /* | 316 | /* |