aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2017-04-22 03:01:20 -0400
committerIngo Molnar <mingo@kernel.org>2017-04-26 04:02:06 -0400
commit29961b59a51f8c6838a26a45e871a7ed6771809b (patch)
tree31204a77f627f52b403050fce545015a24614baf
parent9ccee2373f0658f234727700e619df097ba57023 (diff)
x86/mm: Remove flush_tlb() and flush_tlb_current_task()
I was trying to figure out what how flush_tlb_current_task() would possibly work correctly if current->mm != current->active_mm, but I realized I could spare myself the effort: it has no callers except the unused flush_tlb() macro. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Nadav Amit <namit@vmware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/e52d64c11690f85e9f1d69d7b48cc2269cd2e94b.1492844372.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/tlbflush.h9
-rw-r--r--arch/x86/mm/tlb.c17
2 files changed, 0 insertions, 26 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 75d002bdb3f3..6ed9ea469b48 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -215,7 +215,6 @@ static inline void __flush_tlb_one(unsigned long addr)
215/* 215/*
216 * TLB flushing: 216 * TLB flushing:
217 * 217 *
218 * - flush_tlb() flushes the current mm struct TLBs
219 * - flush_tlb_all() flushes all processes TLBs 218 * - flush_tlb_all() flushes all processes TLBs
220 * - flush_tlb_mm(mm) flushes the specified mm context TLB's 219 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
221 * - flush_tlb_page(vma, vmaddr) flushes one page 220 * - flush_tlb_page(vma, vmaddr) flushes one page
@@ -247,11 +246,6 @@ static inline void flush_tlb_all(void)
247 __flush_tlb_all(); 246 __flush_tlb_all();
248} 247}
249 248
250static inline void flush_tlb(void)
251{
252 __flush_tlb_up();
253}
254
255static inline void local_flush_tlb(void) 249static inline void local_flush_tlb(void)
256{ 250{
257 __flush_tlb_up(); 251 __flush_tlb_up();
@@ -313,14 +307,11 @@ static inline void flush_tlb_kernel_range(unsigned long start,
313 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) 307 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
314 308
315extern void flush_tlb_all(void); 309extern void flush_tlb_all(void);
316extern void flush_tlb_current_task(void);
317extern void flush_tlb_page(struct vm_area_struct *, unsigned long); 310extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
318extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, 311extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
319 unsigned long end, unsigned long vmflag); 312 unsigned long end, unsigned long vmflag);
320extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 313extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
321 314
322#define flush_tlb() flush_tlb_current_task()
323
324void native_flush_tlb_others(const struct cpumask *cpumask, 315void native_flush_tlb_others(const struct cpumask *cpumask,
325 struct mm_struct *mm, 316 struct mm_struct *mm,
326 unsigned long start, unsigned long end); 317 unsigned long start, unsigned long end);
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a7655f6caf7d..92ec37f517ab 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -289,23 +289,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
289 smp_call_function_many(cpumask, flush_tlb_func, &info, 1); 289 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
290} 290}
291 291
292void flush_tlb_current_task(void)
293{
294 struct mm_struct *mm = current->mm;
295
296 preempt_disable();
297
298 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
299
300 /* This is an implicit full barrier that synchronizes with switch_mm. */
301 local_flush_tlb();
302
303 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
304 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
305 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
306 preempt_enable();
307}
308
309/* 292/*
310 * See Documentation/x86/tlb.txt for details. We choose 33 293 * See Documentation/x86/tlb.txt for details. We choose 33
311 * because it is large enough to cover the vast majority (at 294 * because it is large enough to cover the vast majority (at