diff options
Diffstat (limited to 'include/asm-x86/tlbflush.h')
-rw-r--r-- | include/asm-x86/tlbflush.h | 168 |
1 files changed, 0 insertions, 168 deletions
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h deleted file mode 100644 index 35c76ceb9f40..000000000000 --- a/include/asm-x86/tlbflush.h +++ /dev/null | |||
@@ -1,168 +0,0 @@ | |||
1 | #ifndef _ASM_X86_TLBFLUSH_H | ||
2 | #define _ASM_X86_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/sched.h> | ||
6 | |||
7 | #include <asm/processor.h> | ||
8 | #include <asm/system.h> | ||
9 | |||
10 | #ifdef CONFIG_PARAVIRT | ||
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
13 | #define __flush_tlb() __native_flush_tlb() | ||
14 | #define __flush_tlb_global() __native_flush_tlb_global() | ||
15 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | ||
16 | #endif | ||
17 | |||
18 | static inline void __native_flush_tlb(void) | ||
19 | { | ||
20 | write_cr3(read_cr3()); | ||
21 | } | ||
22 | |||
23 | static inline void __native_flush_tlb_global(void) | ||
24 | { | ||
25 | unsigned long flags; | ||
26 | unsigned long cr4; | ||
27 | |||
28 | /* | ||
29 | * Read-modify-write to CR4 - protect it from preemption and | ||
30 | * from interrupts. (Use the raw variant because this code can | ||
31 | * be called from deep inside debugging code.) | ||
32 | */ | ||
33 | raw_local_irq_save(flags); | ||
34 | |||
35 | cr4 = read_cr4(); | ||
36 | /* clear PGE */ | ||
37 | write_cr4(cr4 & ~X86_CR4_PGE); | ||
38 | /* write old PGE again and flush TLBs */ | ||
39 | write_cr4(cr4); | ||
40 | |||
41 | raw_local_irq_restore(flags); | ||
42 | } | ||
43 | |||
44 | static inline void __native_flush_tlb_single(unsigned long addr) | ||
45 | { | ||
46 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); | ||
47 | } | ||
48 | |||
49 | static inline void __flush_tlb_all(void) | ||
50 | { | ||
51 | if (cpu_has_pge) | ||
52 | __flush_tlb_global(); | ||
53 | else | ||
54 | __flush_tlb(); | ||
55 | } | ||
56 | |||
57 | static inline void __flush_tlb_one(unsigned long addr) | ||
58 | { | ||
59 | if (cpu_has_invlpg) | ||
60 | __flush_tlb_single(addr); | ||
61 | else | ||
62 | __flush_tlb(); | ||
63 | } | ||
64 | |||
65 | #ifdef CONFIG_X86_32 | ||
66 | # define TLB_FLUSH_ALL 0xffffffff | ||
67 | #else | ||
68 | # define TLB_FLUSH_ALL -1ULL | ||
69 | #endif | ||
70 | |||
71 | /* | ||
72 | * TLB flushing: | ||
73 | * | ||
74 | * - flush_tlb() flushes the current mm struct TLBs | ||
75 | * - flush_tlb_all() flushes all processes TLBs | ||
76 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
77 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
78 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
79 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
80 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus | ||
81 | * | ||
82 | * ..but the i386 has somewhat limited tlb flushing capabilities, | ||
83 | * and page-granular flushes are available only on i486 and up. | ||
84 | * | ||
85 | * x86-64 can only flush individual pages or full VMs. For a range flush | ||
86 | * we always do the full VM. Might be worth trying if for a small | ||
87 | * range a few INVLPGs in a row are a win. | ||
88 | */ | ||
89 | |||
90 | #ifndef CONFIG_SMP | ||
91 | |||
92 | #define flush_tlb() __flush_tlb() | ||
93 | #define flush_tlb_all() __flush_tlb_all() | ||
94 | #define local_flush_tlb() __flush_tlb() | ||
95 | |||
96 | static inline void flush_tlb_mm(struct mm_struct *mm) | ||
97 | { | ||
98 | if (mm == current->active_mm) | ||
99 | __flush_tlb(); | ||
100 | } | ||
101 | |||
102 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
103 | unsigned long addr) | ||
104 | { | ||
105 | if (vma->vm_mm == current->active_mm) | ||
106 | __flush_tlb_one(addr); | ||
107 | } | ||
108 | |||
109 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
110 | unsigned long start, unsigned long end) | ||
111 | { | ||
112 | if (vma->vm_mm == current->active_mm) | ||
113 | __flush_tlb(); | ||
114 | } | ||
115 | |||
116 | static inline void native_flush_tlb_others(const cpumask_t *cpumask, | ||
117 | struct mm_struct *mm, | ||
118 | unsigned long va) | ||
119 | { | ||
120 | } | ||
121 | |||
122 | #else /* SMP */ | ||
123 | |||
124 | #include <asm/smp.h> | ||
125 | |||
126 | #define local_flush_tlb() __flush_tlb() | ||
127 | |||
128 | extern void flush_tlb_all(void); | ||
129 | extern void flush_tlb_current_task(void); | ||
130 | extern void flush_tlb_mm(struct mm_struct *); | ||
131 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
132 | |||
133 | #define flush_tlb() flush_tlb_current_task() | ||
134 | |||
135 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
136 | unsigned long start, unsigned long end) | ||
137 | { | ||
138 | flush_tlb_mm(vma->vm_mm); | ||
139 | } | ||
140 | |||
141 | void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, | ||
142 | unsigned long va); | ||
143 | |||
144 | #define TLBSTATE_OK 1 | ||
145 | #define TLBSTATE_LAZY 2 | ||
146 | |||
147 | #ifdef CONFIG_X86_32 | ||
148 | struct tlb_state { | ||
149 | struct mm_struct *active_mm; | ||
150 | int state; | ||
151 | char __cacheline_padding[L1_CACHE_BYTES-8]; | ||
152 | }; | ||
153 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); | ||
154 | #endif | ||
155 | |||
156 | #endif /* SMP */ | ||
157 | |||
158 | #ifndef CONFIG_PARAVIRT | ||
159 | #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) | ||
160 | #endif | ||
161 | |||
162 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
163 | unsigned long end) | ||
164 | { | ||
165 | flush_tlb_all(); | ||
166 | } | ||
167 | |||
168 | #endif /* _ASM_X86_TLBFLUSH_H */ | ||