diff options
Diffstat (limited to 'include/asm-x86/tlbflush_64.h')
-rw-r--r-- | include/asm-x86/tlbflush_64.h | 100 |
1 files changed, 0 insertions, 100 deletions
diff --git a/include/asm-x86/tlbflush_64.h b/include/asm-x86/tlbflush_64.h deleted file mode 100644 index 7731fd23d572..000000000000 --- a/include/asm-x86/tlbflush_64.h +++ /dev/null | |||
@@ -1,100 +0,0 @@ | |||
1 | #ifndef _X8664_TLBFLUSH_H | ||
2 | #define _X8664_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <asm/processor.h> | ||
7 | #include <asm/system.h> | ||
8 | |||
9 | static inline void __flush_tlb(void) | ||
10 | { | ||
11 | write_cr3(read_cr3()); | ||
12 | } | ||
13 | |||
14 | static inline void __flush_tlb_all(void) | ||
15 | { | ||
16 | unsigned long cr4 = read_cr4(); | ||
17 | write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ | ||
18 | write_cr4(cr4); /* write old PGE again and flush TLBs */ | ||
19 | } | ||
20 | |||
21 | #define __flush_tlb_one(addr) \ | ||
22 | __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") | ||
23 | |||
24 | |||
25 | /* | ||
26 | * TLB flushing: | ||
27 | * | ||
28 | * - flush_tlb() flushes the current mm struct TLBs | ||
29 | * - flush_tlb_all() flushes all processes TLBs | ||
30 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
31 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
32 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
33 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
34 | * | ||
35 | * x86-64 can only flush individual pages or full VMs. For a range flush | ||
36 | * we always do the full VM. Might be worth trying if for a small | ||
37 | * range a few INVLPGs in a row are a win. | ||
38 | */ | ||
39 | |||
40 | #ifndef CONFIG_SMP | ||
41 | |||
42 | #define flush_tlb() __flush_tlb() | ||
43 | #define flush_tlb_all() __flush_tlb_all() | ||
44 | #define local_flush_tlb() __flush_tlb() | ||
45 | |||
46 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
47 | { | ||
48 | if (mm == current->active_mm) | ||
49 | __flush_tlb(); | ||
50 | } | ||
51 | |||
52 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
53 | unsigned long addr) | ||
54 | { | ||
55 | if (vma->vm_mm == current->active_mm) | ||
56 | __flush_tlb_one(addr); | ||
57 | } | ||
58 | |||
59 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
60 | unsigned long start, unsigned long end) | ||
61 | { | ||
62 | if (vma->vm_mm == current->active_mm) | ||
63 | __flush_tlb(); | ||
64 | } | ||
65 | |||
66 | #else | ||
67 | |||
68 | #include <asm/smp.h> | ||
69 | |||
70 | #define local_flush_tlb() \ | ||
71 | __flush_tlb() | ||
72 | |||
73 | extern void flush_tlb_all(void); | ||
74 | extern void flush_tlb_current_task(void); | ||
75 | extern void flush_tlb_mm(struct mm_struct *); | ||
76 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
77 | |||
78 | #define flush_tlb() flush_tlb_current_task() | ||
79 | |||
80 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) | ||
81 | { | ||
82 | flush_tlb_mm(vma->vm_mm); | ||
83 | } | ||
84 | |||
85 | #define TLBSTATE_OK 1 | ||
86 | #define TLBSTATE_LAZY 2 | ||
87 | |||
88 | /* Roughly an IPI every 20MB with 4k pages for freeing page table | ||
89 | ranges. Cost is about 42k of memory for each CPU. */ | ||
90 | #define ARCH_FREE_PTE_NR 5350 | ||
91 | |||
92 | #endif | ||
93 | |||
94 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
95 | unsigned long end) | ||
96 | { | ||
97 | flush_tlb_all(); | ||
98 | } | ||
99 | |||
100 | #endif /* _X8664_TLBFLUSH_H */ | ||