aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/paravirt.h4
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_32.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h27
-rw-r--r--arch/x86/kernel/paravirt.c6
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/pgtable_32.c2
-rw-r--r--arch/x86/mm/tlb.c6
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/xen/mmu_pv.c6
-rw-r--r--include/trace/events/xen.h2
13 files changed, 39 insertions, 26 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 892df375b615..554841fab717 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -297,9 +297,9 @@ static inline void __flush_tlb_global(void)
297{ 297{
298 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); 298 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
299} 299}
300static inline void __flush_tlb_single(unsigned long addr) 300static inline void __flush_tlb_one_user(unsigned long addr)
301{ 301{
302 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); 302 PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
303} 303}
304 304
305static inline void flush_tlb_others(const struct cpumask *cpumask, 305static inline void flush_tlb_others(const struct cpumask *cpumask,
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 6ec54d01972d..f624f1f10316 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -217,7 +217,7 @@ struct pv_mmu_ops {
217 /* TLB operations */ 217 /* TLB operations */
218 void (*flush_tlb_user)(void); 218 void (*flush_tlb_user)(void);
219 void (*flush_tlb_kernel)(void); 219 void (*flush_tlb_kernel)(void);
220 void (*flush_tlb_single)(unsigned long addr); 220 void (*flush_tlb_one_user)(unsigned long addr);
221 void (*flush_tlb_others)(const struct cpumask *cpus, 221 void (*flush_tlb_others)(const struct cpumask *cpus,
222 const struct flush_tlb_info *info); 222 const struct flush_tlb_info *info);
223 223
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index e67c0620aec2..e55466760ff8 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -61,7 +61,7 @@ void paging_init(void);
61#define kpte_clear_flush(ptep, vaddr) \ 61#define kpte_clear_flush(ptep, vaddr) \
62do { \ 62do { \
63 pte_clear(&init_mm, (vaddr), (ptep)); \ 63 pte_clear(&init_mm, (vaddr), (ptep)); \
64 __flush_tlb_one((vaddr)); \ 64 __flush_tlb_one_kernel((vaddr)); \
65} while (0) 65} while (0)
66 66
67#endif /* !__ASSEMBLY__ */ 67#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 2b8f18ca5874..84137c22fdfa 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -140,7 +140,7 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
140#else 140#else
141#define __flush_tlb() __native_flush_tlb() 141#define __flush_tlb() __native_flush_tlb()
142#define __flush_tlb_global() __native_flush_tlb_global() 142#define __flush_tlb_global() __native_flush_tlb_global()
143#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) 143#define __flush_tlb_one_user(addr) __native_flush_tlb_one_user(addr)
144#endif 144#endif
145 145
146static inline bool tlb_defer_switch_to_init_mm(void) 146static inline bool tlb_defer_switch_to_init_mm(void)
@@ -400,7 +400,7 @@ static inline void __native_flush_tlb_global(void)
400/* 400/*
401 * flush one page in the user mapping 401 * flush one page in the user mapping
402 */ 402 */
403static inline void __native_flush_tlb_single(unsigned long addr) 403static inline void __native_flush_tlb_one_user(unsigned long addr)
404{ 404{
405 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); 405 u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
406 406
@@ -437,18 +437,31 @@ static inline void __flush_tlb_all(void)
437/* 437/*
438 * flush one page in the kernel mapping 438 * flush one page in the kernel mapping
439 */ 439 */
440static inline void __flush_tlb_one(unsigned long addr) 440static inline void __flush_tlb_one_kernel(unsigned long addr)
441{ 441{
442 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); 442 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
443 __flush_tlb_single(addr); 443
444 /*
445 * If PTI is off, then __flush_tlb_one_user() is just INVLPG or its
446 * paravirt equivalent. Even with PCID, this is sufficient: we only
447 * use PCID if we also use global PTEs for the kernel mapping, and
448 * INVLPG flushes global translations across all address spaces.
449 *
450 * If PTI is on, then the kernel is mapped with non-global PTEs, and
451 * __flush_tlb_one_user() will flush the given address for the current
452 * kernel address space and for its usermode counterpart, but it does
453 * not flush it for other address spaces.
454 */
455 __flush_tlb_one_user(addr);
444 456
445 if (!static_cpu_has(X86_FEATURE_PTI)) 457 if (!static_cpu_has(X86_FEATURE_PTI))
446 return; 458 return;
447 459
448 /* 460 /*
449 * __flush_tlb_single() will have cleared the TLB entry for this ASID, 461 * See above. We need to propagate the flush to all other address
450 * but since kernel space is replicated across all, we must also 462 * spaces. In principle, we only need to propagate it to kernelmode
451 * invalidate all others. 463 * address spaces, but the extra bookkeeping we would need is not
464 * worth it.
452 */ 465 */
453 invalidate_other_asid(); 466 invalidate_other_asid();
454} 467}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 041096bdef86..99dc79e76bdc 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -200,9 +200,9 @@ static void native_flush_tlb_global(void)
200 __native_flush_tlb_global(); 200 __native_flush_tlb_global();
201} 201}
202 202
203static void native_flush_tlb_single(unsigned long addr) 203static void native_flush_tlb_one_user(unsigned long addr)
204{ 204{
205 __native_flush_tlb_single(addr); 205 __native_flush_tlb_one_user(addr);
206} 206}
207 207
208struct static_key paravirt_steal_enabled; 208struct static_key paravirt_steal_enabled;
@@ -401,7 +401,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
401 401
402 .flush_tlb_user = native_flush_tlb, 402 .flush_tlb_user = native_flush_tlb,
403 .flush_tlb_kernel = native_flush_tlb_global, 403 .flush_tlb_kernel = native_flush_tlb_global,
404 .flush_tlb_single = native_flush_tlb_single, 404 .flush_tlb_one_user = native_flush_tlb_one_user,
405 .flush_tlb_others = native_flush_tlb_others, 405 .flush_tlb_others = native_flush_tlb_others,
406 406
407 .pgd_alloc = __paravirt_pgd_alloc, 407 .pgd_alloc = __paravirt_pgd_alloc,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 4a837289f2ad..60ae1fe3609f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -256,7 +256,7 @@ static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
256 * It's enough to flush this one mapping. 256 * It's enough to flush this one mapping.
257 * (PGE mappings get flushed as well) 257 * (PGE mappings get flushed as well)
258 */ 258 */
259 __flush_tlb_one(vaddr); 259 __flush_tlb_one_kernel(vaddr);
260} 260}
261 261
262void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) 262void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index c45b6ec5357b..e2db83bebc3b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -820,5 +820,5 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
820 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 820 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
821 else 821 else
822 pte_clear(&init_mm, addr, pte); 822 pte_clear(&init_mm, addr, pte);
823 __flush_tlb_one(addr); 823 __flush_tlb_one_kernel(addr);
824} 824}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 58477ec3d66d..7c8686709636 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -168,7 +168,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
168 return -1; 168 return -1;
169 } 169 }
170 170
171 __flush_tlb_one(f->addr); 171 __flush_tlb_one_kernel(f->addr);
172 return 0; 172 return 0;
173} 173}
174 174
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index c3c5274410a9..9bb7f0ab9fe6 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -63,7 +63,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
63 * It's enough to flush this one mapping. 63 * It's enough to flush this one mapping.
64 * (PGE mappings get flushed as well) 64 * (PGE mappings get flushed as well)
65 */ 65 */
66 __flush_tlb_one(vaddr); 66 __flush_tlb_one_kernel(vaddr);
67} 67}
68 68
69unsigned long __FIXADDR_TOP = 0xfffff000; 69unsigned long __FIXADDR_TOP = 0xfffff000;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 012d02624848..0c936435ea93 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -492,7 +492,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
492 * flush that changes context.tlb_gen from 2 to 3. If they get 492 * flush that changes context.tlb_gen from 2 to 3. If they get
493 * processed on this CPU in reverse order, we'll see 493 * processed on this CPU in reverse order, we'll see
494 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL. 494 * local_tlb_gen == 1, mm_tlb_gen == 3, and end != TLB_FLUSH_ALL.
495 * If we were to use __flush_tlb_single() and set local_tlb_gen to 495 * If we were to use __flush_tlb_one_user() and set local_tlb_gen to
496 * 3, we'd be break the invariant: we'd update local_tlb_gen above 496 * 3, we'd be break the invariant: we'd update local_tlb_gen above
497 * 1 without the full flush that's needed for tlb_gen 2. 497 * 1 without the full flush that's needed for tlb_gen 2.
498 * 498 *
@@ -513,7 +513,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
513 513
514 addr = f->start; 514 addr = f->start;
515 while (addr < f->end) { 515 while (addr < f->end) {
516 __flush_tlb_single(addr); 516 __flush_tlb_one_user(addr);
517 addr += PAGE_SIZE; 517 addr += PAGE_SIZE;
518 } 518 }
519 if (local) 519 if (local)
@@ -660,7 +660,7 @@ static void do_kernel_range_flush(void *info)
660 660
661 /* flush range by one by one 'invlpg' */ 661 /* flush range by one by one 'invlpg' */
662 for (addr = f->start; addr < f->end; addr += PAGE_SIZE) 662 for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
663 __flush_tlb_one(addr); 663 __flush_tlb_one_kernel(addr);
664} 664}
665 665
666void flush_tlb_kernel_range(unsigned long start, unsigned long end) 666void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 8538a6723171..7d5d53f36a7a 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
299 local_flush_tlb(); 299 local_flush_tlb();
300 stat->d_alltlb++; 300 stat->d_alltlb++;
301 } else { 301 } else {
302 __flush_tlb_single(msg->address); 302 __flush_tlb_one_user(msg->address);
303 stat->d_onetlb++; 303 stat->d_onetlb++;
304 } 304 }
305 stat->d_requestee++; 305 stat->d_requestee++;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index d85076223a69..aae88fec9941 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1300,12 +1300,12 @@ static void xen_flush_tlb(void)
1300 preempt_enable(); 1300 preempt_enable();
1301} 1301}
1302 1302
1303static void xen_flush_tlb_single(unsigned long addr) 1303static void xen_flush_tlb_one_user(unsigned long addr)
1304{ 1304{
1305 struct mmuext_op *op; 1305 struct mmuext_op *op;
1306 struct multicall_space mcs; 1306 struct multicall_space mcs;
1307 1307
1308 trace_xen_mmu_flush_tlb_single(addr); 1308 trace_xen_mmu_flush_tlb_one_user(addr);
1309 1309
1310 preempt_disable(); 1310 preempt_disable();
1311 1311
@@ -2370,7 +2370,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2370 2370
2371 .flush_tlb_user = xen_flush_tlb, 2371 .flush_tlb_user = xen_flush_tlb,
2372 .flush_tlb_kernel = xen_flush_tlb, 2372 .flush_tlb_kernel = xen_flush_tlb,
2373 .flush_tlb_single = xen_flush_tlb_single, 2373 .flush_tlb_one_user = xen_flush_tlb_one_user,
2374 .flush_tlb_others = xen_flush_tlb_others, 2374 .flush_tlb_others = xen_flush_tlb_others,
2375 2375
2376 .pgd_alloc = xen_pgd_alloc, 2376 .pgd_alloc = xen_pgd_alloc,
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index b8adf05c534e..7dd8f34c37df 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -368,7 +368,7 @@ TRACE_EVENT(xen_mmu_flush_tlb,
368 TP_printk("%s", "") 368 TP_printk("%s", "")
369 ); 369 );
370 370
371TRACE_EVENT(xen_mmu_flush_tlb_single, 371TRACE_EVENT(xen_mmu_flush_tlb_one_user,
372 TP_PROTO(unsigned long addr), 372 TP_PROTO(unsigned long addr),
373 TP_ARGS(addr), 373 TP_ARGS(addr),
374 TP_STRUCT__entry( 374 TP_STRUCT__entry(