diff options
author | Andy Lutomirski <luto@kernel.org> | 2017-09-17 12:03:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-09-17 12:59:08 -0400 |
commit | 47061a24e2ee5bd8a40d473d47a5bd823fa0081f (patch) | |
tree | d147d513c17c141d9a3028d00bdc575a168cc659 | |
parent | 0666f560b71b899cd11a7caf39fd45129e9030fd (diff) |
x86/mm: Factor out CR3-building code
Current, the code that assembles a value to load into CR3 is
open-coded everywhere. Factor it out into helpers build_cr3() and
build_cr3_noflush().
This makes one semantic change: __get_current_cr3_fast() was wrong
on SME systems. No one noticed because the only caller is in the
VMX code, and there are no CPUs with both SME and VMX.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom Lendacky <Thomas.Lendacky@amd.com>
Link: http://lkml.kernel.org/r/ce350cf11e93e2842d14d0b95b0199c7d881f527.1505663533.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 15 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 11 |
2 files changed, 16 insertions, 10 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 7ae318c340d9..a999ba6b721f 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -286,6 +286,15 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, | |||
286 | return __pkru_allows_pkey(vma_pkey(vma), write); | 286 | return __pkru_allows_pkey(vma_pkey(vma), write); |
287 | } | 287 | } |
288 | 288 | ||
289 | static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) | ||
290 | { | ||
291 | return __sme_pa(mm->pgd) | asid; | ||
292 | } | ||
293 | |||
294 | static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) | ||
295 | { | ||
296 | return __sme_pa(mm->pgd) | asid | CR3_NOFLUSH; | ||
297 | } | ||
289 | 298 | ||
290 | /* | 299 | /* |
291 | * This can be used from process context to figure out what the value of | 300 | * This can be used from process context to figure out what the value of |
@@ -296,10 +305,8 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, | |||
296 | */ | 305 | */ |
297 | static inline unsigned long __get_current_cr3_fast(void) | 306 | static inline unsigned long __get_current_cr3_fast(void) |
298 | { | 307 | { |
299 | unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd); | 308 | unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), |
300 | 309 | this_cpu_read(cpu_tlbstate.loaded_mm_asid)); | |
301 | if (static_cpu_has(X86_FEATURE_PCID)) | ||
302 | cr3 |= this_cpu_read(cpu_tlbstate.loaded_mm_asid); | ||
303 | 310 | ||
304 | /* For now, be very restrictive about when this can be called. */ | 311 | /* For now, be very restrictive about when this can be called. */ |
305 | VM_WARN_ON(in_nmi() || preemptible()); | 312 | VM_WARN_ON(in_nmi() || preemptible()); |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 1ab3821f9e26..93fe97cce581 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -126,8 +126,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
126 | * isn't free. | 126 | * isn't free. |
127 | */ | 127 | */ |
128 | #ifdef CONFIG_DEBUG_VM | 128 | #ifdef CONFIG_DEBUG_VM |
129 | if (WARN_ON_ONCE(__read_cr3() != | 129 | if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { |
130 | (__sme_pa(real_prev->pgd) | prev_asid))) { | ||
131 | /* | 130 | /* |
132 | * If we were to BUG here, we'd be very likely to kill | 131 | * If we were to BUG here, we'd be very likely to kill |
133 | * the system so hard that we don't see the call trace. | 132 | * the system so hard that we don't see the call trace. |
@@ -172,7 +171,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
172 | */ | 171 | */ |
173 | this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, | 172 | this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, |
174 | next_tlb_gen); | 173 | next_tlb_gen); |
175 | write_cr3(__sme_pa(next->pgd) | prev_asid); | 174 | write_cr3(build_cr3(next, prev_asid)); |
176 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, | 175 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, |
177 | TLB_FLUSH_ALL); | 176 | TLB_FLUSH_ALL); |
178 | } | 177 | } |
@@ -216,12 +215,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
216 | if (need_flush) { | 215 | if (need_flush) { |
217 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); | 216 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); |
218 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); | 217 | this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); |
219 | write_cr3(__sme_pa(next->pgd) | new_asid); | 218 | write_cr3(build_cr3(next, new_asid)); |
220 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, | 219 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, |
221 | TLB_FLUSH_ALL); | 220 | TLB_FLUSH_ALL); |
222 | } else { | 221 | } else { |
223 | /* The new ASID is already up to date. */ | 222 | /* The new ASID is already up to date. */ |
224 | write_cr3(__sme_pa(next->pgd) | new_asid | CR3_NOFLUSH); | 223 | write_cr3(build_cr3_noflush(next, new_asid)); |
225 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); | 224 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); |
226 | } | 225 | } |
227 | 226 | ||
@@ -265,7 +264,7 @@ void initialize_tlbstate_and_flush(void) | |||
265 | !(cr4_read_shadow() & X86_CR4_PCIDE)); | 264 | !(cr4_read_shadow() & X86_CR4_PCIDE)); |
266 | 265 | ||
267 | /* Force ASID 0 and force a TLB flush. */ | 266 | /* Force ASID 0 and force a TLB flush. */ |
268 | write_cr3(cr3 & ~CR3_PCID_MASK); | 267 | write_cr3(build_cr3(mm, 0)); |
269 | 268 | ||
270 | /* Reinitialize tlbstate. */ | 269 | /* Reinitialize tlbstate. */ |
271 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); | 270 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); |