diff options
| -rw-r--r-- | arch/x86/include/asm/tlbflush.h | 9 | ||||
| -rw-r--r-- | arch/x86/mm/tlb.c | 17 |
2 files changed, 0 insertions, 26 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 75d002bdb3f3..6ed9ea469b48 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
| @@ -215,7 +215,6 @@ static inline void __flush_tlb_one(unsigned long addr) | |||
| 215 | /* | 215 | /* |
| 216 | * TLB flushing: | 216 | * TLB flushing: |
| 217 | * | 217 | * |
| 218 | * - flush_tlb() flushes the current mm struct TLBs | ||
| 219 | * - flush_tlb_all() flushes all processes TLBs | 218 | * - flush_tlb_all() flushes all processes TLBs |
| 220 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | 219 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 221 | * - flush_tlb_page(vma, vmaddr) flushes one page | 220 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| @@ -247,11 +246,6 @@ static inline void flush_tlb_all(void) | |||
| 247 | __flush_tlb_all(); | 246 | __flush_tlb_all(); |
| 248 | } | 247 | } |
| 249 | 248 | ||
| 250 | static inline void flush_tlb(void) | ||
| 251 | { | ||
| 252 | __flush_tlb_up(); | ||
| 253 | } | ||
| 254 | |||
| 255 | static inline void local_flush_tlb(void) | 249 | static inline void local_flush_tlb(void) |
| 256 | { | 250 | { |
| 257 | __flush_tlb_up(); | 251 | __flush_tlb_up(); |
| @@ -313,14 +307,11 @@ static inline void flush_tlb_kernel_range(unsigned long start, | |||
| 313 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) | 307 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) |
| 314 | 308 | ||
| 315 | extern void flush_tlb_all(void); | 309 | extern void flush_tlb_all(void); |
| 316 | extern void flush_tlb_current_task(void); | ||
| 317 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | 310 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); |
| 318 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 311 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
| 319 | unsigned long end, unsigned long vmflag); | 312 | unsigned long end, unsigned long vmflag); |
| 320 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 313 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
| 321 | 314 | ||
| 322 | #define flush_tlb() flush_tlb_current_task() | ||
| 323 | |||
| 324 | void native_flush_tlb_others(const struct cpumask *cpumask, | 315 | void native_flush_tlb_others(const struct cpumask *cpumask, |
| 325 | struct mm_struct *mm, | 316 | struct mm_struct *mm, |
| 326 | unsigned long start, unsigned long end); | 317 | unsigned long start, unsigned long end); |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index a7655f6caf7d..92ec37f517ab 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
| @@ -289,23 +289,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
| 289 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); | 289 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | void flush_tlb_current_task(void) | ||
| 293 | { | ||
| 294 | struct mm_struct *mm = current->mm; | ||
| 295 | |||
| 296 | preempt_disable(); | ||
| 297 | |||
| 298 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | ||
| 299 | |||
| 300 | /* This is an implicit full barrier that synchronizes with switch_mm. */ | ||
| 301 | local_flush_tlb(); | ||
| 302 | |||
| 303 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); | ||
| 304 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) | ||
| 305 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); | ||
| 306 | preempt_enable(); | ||
| 307 | } | ||
| 308 | |||
| 309 | /* | 292 | /* |
| 310 | * See Documentation/x86/tlb.txt for details. We choose 33 | 293 | * See Documentation/x86/tlb.txt for details. We choose 33 |
| 311 | * because it is large enough to cover the vast majority (at | 294 | * because it is large enough to cover the vast majority (at |
