diff options
-rw-r--r-- | arch/x86/kernel/smp_64.c | 12 | ||||
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/boot_ioremap_32.c | 2 | ||||
-rw-r--r-- | include/asm-x86/cpufeature.h | 9 | ||||
-rw-r--r-- | include/asm-x86/tlbflush_32.h | 120 | ||||
-rw-r--r-- | include/asm-x86/tlbflush_64.h | 78 |
6 files changed, 128 insertions, 100 deletions
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 62b0f2a1b1e8..7142447b5666 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c | |||
@@ -55,7 +55,6 @@ union smp_flush_state { | |||
55 | cpumask_t flush_cpumask; | 55 | cpumask_t flush_cpumask; |
56 | struct mm_struct *flush_mm; | 56 | struct mm_struct *flush_mm; |
57 | unsigned long flush_va; | 57 | unsigned long flush_va; |
58 | #define FLUSH_ALL -1ULL | ||
59 | spinlock_t tlbstate_lock; | 58 | spinlock_t tlbstate_lock; |
60 | }; | 59 | }; |
61 | char pad[SMP_CACHE_BYTES]; | 60 | char pad[SMP_CACHE_BYTES]; |
@@ -153,7 +152,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |||
153 | 152 | ||
154 | if (f->flush_mm == read_pda(active_mm)) { | 153 | if (f->flush_mm == read_pda(active_mm)) { |
155 | if (read_pda(mmu_state) == TLBSTATE_OK) { | 154 | if (read_pda(mmu_state) == TLBSTATE_OK) { |
156 | if (f->flush_va == FLUSH_ALL) | 155 | if (f->flush_va == TLB_FLUSH_ALL) |
157 | local_flush_tlb(); | 156 | local_flush_tlb(); |
158 | else | 157 | else |
159 | __flush_tlb_one(f->flush_va); | 158 | __flush_tlb_one(f->flush_va); |
@@ -166,11 +165,12 @@ out: | |||
166 | add_pda(irq_tlb_count, 1); | 165 | add_pda(irq_tlb_count, 1); |
167 | } | 166 | } |
168 | 167 | ||
169 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | 168 | void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, |
170 | unsigned long va) | 169 | unsigned long va) |
171 | { | 170 | { |
172 | int sender; | 171 | int sender; |
173 | union smp_flush_state *f; | 172 | union smp_flush_state *f; |
173 | cpumask_t cpumask = *cpumaskp; | ||
174 | 174 | ||
175 | /* Caller has disabled preemption */ | 175 | /* Caller has disabled preemption */ |
176 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | 176 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; |
@@ -223,7 +223,7 @@ void flush_tlb_current_task(void) | |||
223 | 223 | ||
224 | local_flush_tlb(); | 224 | local_flush_tlb(); |
225 | if (!cpus_empty(cpu_mask)) | 225 | if (!cpus_empty(cpu_mask)) |
226 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 226 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
227 | preempt_enable(); | 227 | preempt_enable(); |
228 | } | 228 | } |
229 | 229 | ||
@@ -242,7 +242,7 @@ void flush_tlb_mm (struct mm_struct * mm) | |||
242 | leave_mm(smp_processor_id()); | 242 | leave_mm(smp_processor_id()); |
243 | } | 243 | } |
244 | if (!cpus_empty(cpu_mask)) | 244 | if (!cpus_empty(cpu_mask)) |
245 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 245 | flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
246 | 246 | ||
247 | preempt_enable(); | 247 | preempt_enable(); |
248 | } | 248 | } |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 981def2b4e9b..b472a2df0b7f 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -800,7 +800,6 @@ static void smp_reschedule_interrupt(void) | |||
800 | static struct mm_struct *flush_mm; | 800 | static struct mm_struct *flush_mm; |
801 | static unsigned long flush_va; | 801 | static unsigned long flush_va; |
802 | static DEFINE_SPINLOCK(tlbstate_lock); | 802 | static DEFINE_SPINLOCK(tlbstate_lock); |
803 | #define FLUSH_ALL 0xffffffff | ||
804 | 803 | ||
805 | /* | 804 | /* |
806 | * We cannot call mmdrop() because we are in interrupt context, | 805 | * We cannot call mmdrop() because we are in interrupt context, |
@@ -834,7 +833,7 @@ static void smp_invalidate_interrupt(void) | |||
834 | 833 | ||
835 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | 834 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { |
836 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | 835 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { |
837 | if (flush_va == FLUSH_ALL) | 836 | if (flush_va == TLB_FLUSH_ALL) |
838 | local_flush_tlb(); | 837 | local_flush_tlb(); |
839 | else | 838 | else |
840 | __flush_tlb_one(flush_va); | 839 | __flush_tlb_one(flush_va); |
@@ -903,7 +902,7 @@ void flush_tlb_current_task(void) | |||
903 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); | 902 | cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); |
904 | local_flush_tlb(); | 903 | local_flush_tlb(); |
905 | if (cpu_mask) | 904 | if (cpu_mask) |
906 | voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 905 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
907 | 906 | ||
908 | preempt_enable(); | 907 | preempt_enable(); |
909 | } | 908 | } |
@@ -923,7 +922,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
923 | leave_mm(smp_processor_id()); | 922 | leave_mm(smp_processor_id()); |
924 | } | 923 | } |
925 | if (cpu_mask) | 924 | if (cpu_mask) |
926 | voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | 925 | voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); |
927 | 926 | ||
928 | preempt_enable(); | 927 | preempt_enable(); |
929 | } | 928 | } |
diff --git a/arch/x86/mm/boot_ioremap_32.c b/arch/x86/mm/boot_ioremap_32.c index f14da2a53ece..b20f74a2770f 100644 --- a/arch/x86/mm/boot_ioremap_32.c +++ b/arch/x86/mm/boot_ioremap_32.c | |||
@@ -57,7 +57,7 @@ static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, | |||
57 | pte = boot_vaddr_to_pte(virtual_source); | 57 | pte = boot_vaddr_to_pte(virtual_source); |
58 | for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) { | 58 | for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) { |
59 | set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL)); | 59 | set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL)); |
60 | __flush_tlb_one(&vaddr[i*PAGE_SIZE]); | 60 | __flush_tlb_one((unsigned long) &vaddr[i*PAGE_SIZE]); |
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 4c7875554d01..acbf6681740d 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -163,6 +163,12 @@ | |||
163 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | 163 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
164 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | 164 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) |
165 | 165 | ||
166 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | ||
167 | # define cpu_has_invlpg 1 | ||
168 | #else | ||
169 | # define cpu_has_invlpg (boot_cpu_data.x86 > 3) | ||
170 | #endif | ||
171 | |||
166 | #ifdef CONFIG_X86_64 | 172 | #ifdef CONFIG_X86_64 |
167 | 173 | ||
168 | #undef cpu_has_vme | 174 | #undef cpu_has_vme |
@@ -183,6 +189,9 @@ | |||
183 | #undef cpu_has_centaur_mcr | 189 | #undef cpu_has_centaur_mcr |
184 | #define cpu_has_centaur_mcr 0 | 190 | #define cpu_has_centaur_mcr 0 |
185 | 191 | ||
192 | #undef cpu_has_pge | ||
193 | #define cpu_has_pge 1 | ||
194 | |||
186 | #endif /* CONFIG_X86_64 */ | 195 | #endif /* CONFIG_X86_64 */ |
187 | 196 | ||
188 | #endif /* _ASM_X86_CPUFEATURE_H */ | 197 | #endif /* _ASM_X86_CPUFEATURE_H */ |
diff --git a/include/asm-x86/tlbflush_32.h b/include/asm-x86/tlbflush_32.h index 2bd5b95e2048..9e07cc8f2d94 100644 --- a/include/asm-x86/tlbflush_32.h +++ b/include/asm-x86/tlbflush_32.h | |||
@@ -1,8 +1,11 @@ | |||
1 | #ifndef _I386_TLBFLUSH_H | 1 | #ifndef _X86_TLBFLUSH_H |
2 | #define _I386_TLBFLUSH_H | 2 | #define _X86_TLBFLUSH_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/sched.h> | ||
6 | |||
5 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <asm/system.h> | ||
6 | 9 | ||
7 | #ifdef CONFIG_PARAVIRT | 10 | #ifdef CONFIG_PARAVIRT |
8 | #include <asm/paravirt.h> | 11 | #include <asm/paravirt.h> |
@@ -12,62 +15,41 @@ | |||
12 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | 15 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
13 | #endif | 16 | #endif |
14 | 17 | ||
15 | #define __native_flush_tlb() \ | 18 | static inline void __native_flush_tlb(void) |
16 | do { \ | 19 | { |
17 | unsigned int tmpreg; \ | 20 | write_cr3(read_cr3()); |
18 | \ | 21 | } |
19 | __asm__ __volatile__( \ | ||
20 | "movl %%cr3, %0; \n" \ | ||
21 | "movl %0, %%cr3; # flush TLB \n" \ | ||
22 | : "=r" (tmpreg) \ | ||
23 | :: "memory"); \ | ||
24 | } while (0) | ||
25 | 22 | ||
26 | /* | 23 | static inline void __native_flush_tlb_global(void) |
27 | * Global pages have to be flushed a bit differently. Not a real | 24 | { |
28 | * performance problem because this does not happen often. | 25 | unsigned long cr4 = read_cr4(); |
29 | */ | 26 | |
30 | #define __native_flush_tlb_global() \ | 27 | /* clear PGE */ |
31 | do { \ | 28 | write_cr4(cr4 & ~X86_CR4_PGE); |
32 | unsigned int tmpreg, cr4, cr4_orig; \ | 29 | /* write old PGE again and flush TLBs */ |
33 | \ | 30 | write_cr4(cr4); |
34 | __asm__ __volatile__( \ | 31 | } |
35 | "movl %%cr4, %2; # turn off PGE \n" \ | 32 | |
36 | "movl %2, %1; \n" \ | 33 | static inline void __native_flush_tlb_single(unsigned long addr) |
37 | "andl %3, %1; \n" \ | 34 | { |
38 | "movl %1, %%cr4; \n" \ | 35 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); |
39 | "movl %%cr3, %0; \n" \ | 36 | } |
40 | "movl %0, %%cr3; # flush TLB \n" \ | 37 | |
41 | "movl %2, %%cr4; # turn PGE back on \n" \ | 38 | static inline void __flush_tlb_all(void) |
42 | : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ | 39 | { |
43 | : "i" (~X86_CR4_PGE) \ | 40 | if (cpu_has_pge) |
44 | : "memory"); \ | 41 | __flush_tlb_global(); |
45 | } while (0) | 42 | else |
46 | 43 | __flush_tlb(); | |
47 | #define __native_flush_tlb_single(addr) \ | 44 | } |
48 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") | 45 | |
49 | 46 | static inline void __flush_tlb_one(unsigned long addr) | |
50 | # define __flush_tlb_all() \ | 47 | { |
51 | do { \ | 48 | if (cpu_has_invlpg) |
52 | if (cpu_has_pge) \ | 49 | __flush_tlb_single(addr); |
53 | __flush_tlb_global(); \ | 50 | else |
54 | else \ | 51 | __flush_tlb(); |
55 | __flush_tlb(); \ | 52 | } |
56 | } while (0) | ||
57 | |||
58 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) | ||
59 | |||
60 | #ifdef CONFIG_X86_INVLPG | ||
61 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) | ||
62 | #else | ||
63 | # define __flush_tlb_one(addr) \ | ||
64 | do { \ | ||
65 | if (cpu_has_invlpg) \ | ||
66 | __flush_tlb_single(addr); \ | ||
67 | else \ | ||
68 | __flush_tlb(); \ | ||
69 | } while (0) | ||
70 | #endif | ||
71 | 53 | ||
72 | /* | 54 | /* |
73 | * TLB flushing: | 55 | * TLB flushing: |
@@ -86,11 +68,8 @@ | |||
86 | 68 | ||
87 | #define TLB_FLUSH_ALL 0xffffffff | 69 | #define TLB_FLUSH_ALL 0xffffffff |
88 | 70 | ||
89 | |||
90 | #ifndef CONFIG_SMP | 71 | #ifndef CONFIG_SMP |
91 | 72 | ||
92 | #include <linux/sched.h> | ||
93 | |||
94 | #define flush_tlb() __flush_tlb() | 73 | #define flush_tlb() __flush_tlb() |
95 | #define flush_tlb_all() __flush_tlb_all() | 74 | #define flush_tlb_all() __flush_tlb_all() |
96 | #define local_flush_tlb() __flush_tlb() | 75 | #define local_flush_tlb() __flush_tlb() |
@@ -102,21 +81,22 @@ static inline void flush_tlb_mm(struct mm_struct *mm) | |||
102 | } | 81 | } |
103 | 82 | ||
104 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 83 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
105 | unsigned long addr) | 84 | unsigned long addr) |
106 | { | 85 | { |
107 | if (vma->vm_mm == current->active_mm) | 86 | if (vma->vm_mm == current->active_mm) |
108 | __flush_tlb_one(addr); | 87 | __flush_tlb_one(addr); |
109 | } | 88 | } |
110 | 89 | ||
111 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 90 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
112 | unsigned long start, unsigned long end) | 91 | unsigned long start, unsigned long end) |
113 | { | 92 | { |
114 | if (vma->vm_mm == current->active_mm) | 93 | if (vma->vm_mm == current->active_mm) |
115 | __flush_tlb(); | 94 | __flush_tlb(); |
116 | } | 95 | } |
117 | 96 | ||
118 | static inline void native_flush_tlb_others(const cpumask_t *cpumask, | 97 | static inline void native_flush_tlb_others(const cpumask_t *cpumask, |
119 | struct mm_struct *mm, unsigned long va) | 98 | struct mm_struct *mm, |
99 | unsigned long va) | ||
120 | { | 100 | { |
121 | } | 101 | } |
122 | 102 | ||
@@ -124,8 +104,7 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask, | |||
124 | 104 | ||
125 | #include <asm/smp.h> | 105 | #include <asm/smp.h> |
126 | 106 | ||
127 | #define local_flush_tlb() \ | 107 | #define local_flush_tlb() __flush_tlb() |
128 | __flush_tlb() | ||
129 | 108 | ||
130 | extern void flush_tlb_all(void); | 109 | extern void flush_tlb_all(void); |
131 | extern void flush_tlb_current_task(void); | 110 | extern void flush_tlb_current_task(void); |
@@ -134,7 +113,8 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | |||
134 | 113 | ||
135 | #define flush_tlb() flush_tlb_current_task() | 114 | #define flush_tlb() flush_tlb_current_task() |
136 | 115 | ||
137 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) | 116 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
117 | unsigned long start, unsigned long end) | ||
138 | { | 118 | { |
139 | flush_tlb_mm(vma->vm_mm); | 119 | flush_tlb_mm(vma->vm_mm); |
140 | } | 120 | } |
@@ -152,17 +132,17 @@ struct tlb_state | |||
152 | char __cacheline_padding[L1_CACHE_BYTES-8]; | 132 | char __cacheline_padding[L1_CACHE_BYTES-8]; |
153 | }; | 133 | }; |
154 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); | 134 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); |
135 | |||
155 | #endif /* SMP */ | 136 | #endif /* SMP */ |
156 | 137 | ||
157 | #ifndef CONFIG_PARAVIRT | 138 | #ifndef CONFIG_PARAVIRT |
158 | #define flush_tlb_others(mask, mm, va) \ | 139 | #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) |
159 | native_flush_tlb_others(&mask, mm, va) | ||
160 | #endif | 140 | #endif |
161 | 141 | ||
162 | static inline void flush_tlb_kernel_range(unsigned long start, | 142 | static inline void flush_tlb_kernel_range(unsigned long start, |
163 | unsigned long end) | 143 | unsigned long end) |
164 | { | 144 | { |
165 | flush_tlb_all(); | 145 | flush_tlb_all(); |
166 | } | 146 | } |
167 | 147 | ||
168 | #endif /* _I386_TLBFLUSH_H */ | 148 | #endif /* _X86_TLBFLUSH_H */ |
diff --git a/include/asm-x86/tlbflush_64.h b/include/asm-x86/tlbflush_64.h index 7731fd23d572..0bed440ba9fe 100644 --- a/include/asm-x86/tlbflush_64.h +++ b/include/asm-x86/tlbflush_64.h | |||
@@ -1,26 +1,55 @@ | |||
1 | #ifndef _X8664_TLBFLUSH_H | 1 | #ifndef _X86_TLBFLUSH_H |
2 | #define _X8664_TLBFLUSH_H | 2 | #define _X86_TLBFLUSH_H |
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/sched.h> | 5 | #include <linux/sched.h> |
6 | |||
6 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
7 | #include <asm/system.h> | 8 | #include <asm/system.h> |
8 | 9 | ||
9 | static inline void __flush_tlb(void) | 10 | #ifdef CONFIG_PARAVIRT |
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
13 | #define __flush_tlb() __native_flush_tlb() | ||
14 | #define __flush_tlb_global() __native_flush_tlb_global() | ||
15 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | ||
16 | #endif | ||
17 | |||
18 | static inline void __native_flush_tlb(void) | ||
10 | { | 19 | { |
11 | write_cr3(read_cr3()); | 20 | write_cr3(read_cr3()); |
12 | } | 21 | } |
13 | 22 | ||
14 | static inline void __flush_tlb_all(void) | 23 | static inline void __native_flush_tlb_global(void) |
15 | { | 24 | { |
16 | unsigned long cr4 = read_cr4(); | 25 | unsigned long cr4 = read_cr4(); |
17 | write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */ | 26 | |
18 | write_cr4(cr4); /* write old PGE again and flush TLBs */ | 27 | /* clear PGE */ |
28 | write_cr4(cr4 & ~X86_CR4_PGE); | ||
29 | /* write old PGE again and flush TLBs */ | ||
30 | write_cr4(cr4); | ||
19 | } | 31 | } |
20 | 32 | ||
21 | #define __flush_tlb_one(addr) \ | 33 | static inline void __native_flush_tlb_single(unsigned long addr) |
22 | __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory") | 34 | { |
35 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory"); | ||
36 | } | ||
37 | |||
38 | static inline void __flush_tlb_all(void) | ||
39 | { | ||
40 | if (cpu_has_pge) | ||
41 | __flush_tlb_global(); | ||
42 | else | ||
43 | __flush_tlb(); | ||
44 | } | ||
23 | 45 | ||
46 | static inline void __flush_tlb_one(unsigned long addr) | ||
47 | { | ||
48 | if (cpu_has_invlpg) | ||
49 | __flush_tlb_single(addr); | ||
50 | else | ||
51 | __flush_tlb(); | ||
52 | } | ||
24 | 53 | ||
25 | /* | 54 | /* |
26 | * TLB flushing: | 55 | * TLB flushing: |
@@ -37,6 +66,8 @@ static inline void __flush_tlb_all(void) | |||
37 | * range a few INVLPGs in a row are a win. | 66 | * range a few INVLPGs in a row are a win. |
38 | */ | 67 | */ |
39 | 68 | ||
69 | #define TLB_FLUSH_ALL -1ULL | ||
70 | |||
40 | #ifndef CONFIG_SMP | 71 | #ifndef CONFIG_SMP |
41 | 72 | ||
42 | #define flush_tlb() __flush_tlb() | 73 | #define flush_tlb() __flush_tlb() |
@@ -50,25 +81,30 @@ static inline void flush_tlb_mm(struct mm_struct *mm) | |||
50 | } | 81 | } |
51 | 82 | ||
52 | static inline void flush_tlb_page(struct vm_area_struct *vma, | 83 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
53 | unsigned long addr) | 84 | unsigned long addr) |
54 | { | 85 | { |
55 | if (vma->vm_mm == current->active_mm) | 86 | if (vma->vm_mm == current->active_mm) |
56 | __flush_tlb_one(addr); | 87 | __flush_tlb_one(addr); |
57 | } | 88 | } |
58 | 89 | ||
59 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 90 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
60 | unsigned long start, unsigned long end) | 91 | unsigned long start, unsigned long end) |
61 | { | 92 | { |
62 | if (vma->vm_mm == current->active_mm) | 93 | if (vma->vm_mm == current->active_mm) |
63 | __flush_tlb(); | 94 | __flush_tlb(); |
64 | } | 95 | } |
65 | 96 | ||
66 | #else | 97 | static inline void native_flush_tlb_others(const cpumask_t *cpumask, |
98 | struct mm_struct *mm, | ||
99 | unsigned long va) | ||
100 | { | ||
101 | } | ||
102 | |||
103 | #else /* SMP */ | ||
67 | 104 | ||
68 | #include <asm/smp.h> | 105 | #include <asm/smp.h> |
69 | 106 | ||
70 | #define local_flush_tlb() \ | 107 | #define local_flush_tlb() __flush_tlb() |
71 | __flush_tlb() | ||
72 | 108 | ||
73 | extern void flush_tlb_all(void); | 109 | extern void flush_tlb_all(void); |
74 | extern void flush_tlb_current_task(void); | 110 | extern void flush_tlb_current_task(void); |
@@ -77,24 +113,28 @@ extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | |||
77 | 113 | ||
78 | #define flush_tlb() flush_tlb_current_task() | 114 | #define flush_tlb() flush_tlb_current_task() |
79 | 115 | ||
80 | static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) | 116 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
117 | unsigned long start, unsigned long end) | ||
81 | { | 118 | { |
82 | flush_tlb_mm(vma->vm_mm); | 119 | flush_tlb_mm(vma->vm_mm); |
83 | } | 120 | } |
84 | 121 | ||
122 | void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, | ||
123 | unsigned long va); | ||
124 | |||
85 | #define TLBSTATE_OK 1 | 125 | #define TLBSTATE_OK 1 |
86 | #define TLBSTATE_LAZY 2 | 126 | #define TLBSTATE_LAZY 2 |
87 | 127 | ||
88 | /* Roughly an IPI every 20MB with 4k pages for freeing page table | 128 | #endif /* SMP */ |
89 | ranges. Cost is about 42k of memory for each CPU. */ | ||
90 | #define ARCH_FREE_PTE_NR 5350 | ||
91 | 129 | ||
130 | #ifndef CONFIG_PARAVIRT | ||
131 | #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) | ||
92 | #endif | 132 | #endif |
93 | 133 | ||
94 | static inline void flush_tlb_kernel_range(unsigned long start, | 134 | static inline void flush_tlb_kernel_range(unsigned long start, |
95 | unsigned long end) | 135 | unsigned long end) |
96 | { | 136 | { |
97 | flush_tlb_all(); | 137 | flush_tlb_all(); |
98 | } | 138 | } |
99 | 139 | ||
100 | #endif /* _X8664_TLBFLUSH_H */ | 140 | #endif /* _X86_TLBFLUSH_H */ |