diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/mmu_context_64.h | 16 | ||||
-rw-r--r-- | arch/x86/include/asm/pda.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_32.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_64.c | 13 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 6 |
7 files changed, 20 insertions, 38 deletions
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h index 677d36e9540a..c4572505ab3e 100644 --- a/arch/x86/include/asm/mmu_context_64.h +++ b/arch/x86/include/asm/mmu_context_64.h | |||
@@ -1,13 +1,11 @@ | |||
1 | #ifndef _ASM_X86_MMU_CONTEXT_64_H | 1 | #ifndef _ASM_X86_MMU_CONTEXT_64_H |
2 | #define _ASM_X86_MMU_CONTEXT_64_H | 2 | #define _ASM_X86_MMU_CONTEXT_64_H |
3 | 3 | ||
4 | #include <asm/pda.h> | ||
5 | |||
6 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 4 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
7 | { | 5 | { |
8 | #ifdef CONFIG_SMP | 6 | #ifdef CONFIG_SMP |
9 | if (read_pda(mmu_state) == TLBSTATE_OK) | 7 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
10 | write_pda(mmu_state, TLBSTATE_LAZY); | 8 | percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); |
11 | #endif | 9 | #endif |
12 | } | 10 | } |
13 | 11 | ||
@@ -19,8 +17,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
19 | /* stop flush ipis for the previous mm */ | 17 | /* stop flush ipis for the previous mm */ |
20 | cpu_clear(cpu, prev->cpu_vm_mask); | 18 | cpu_clear(cpu, prev->cpu_vm_mask); |
21 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
22 | write_pda(mmu_state, TLBSTATE_OK); | 20 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
23 | write_pda(active_mm, next); | 21 | percpu_write(cpu_tlbstate.active_mm, next); |
24 | #endif | 22 | #endif |
25 | cpu_set(cpu, next->cpu_vm_mask); | 23 | cpu_set(cpu, next->cpu_vm_mask); |
26 | load_cr3(next->pgd); | 24 | load_cr3(next->pgd); |
@@ -30,9 +28,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
30 | } | 28 | } |
31 | #ifdef CONFIG_SMP | 29 | #ifdef CONFIG_SMP |
32 | else { | 30 | else { |
33 | write_pda(mmu_state, TLBSTATE_OK); | 31 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
34 | if (read_pda(active_mm) != next) | 32 | BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); |
35 | BUG(); | 33 | |
36 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 34 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
37 | /* We were in lazy tlb mode and leave_mm disabled | 35 | /* We were in lazy tlb mode and leave_mm disabled |
38 | * tlb flush IPI delivery. We must reload CR3 | 36 | * tlb flush IPI delivery. We must reload CR3 |
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h index 69a40757e217..8ee835ed10e1 100644 --- a/arch/x86/include/asm/pda.h +++ b/arch/x86/include/asm/pda.h | |||
@@ -25,9 +25,7 @@ struct x8664_pda { | |||
25 | char *irqstackptr; | 25 | char *irqstackptr; |
26 | short nodenumber; /* number of current node (32k max) */ | 26 | short nodenumber; /* number of current node (32k max) */ |
27 | short in_bootmem; /* pda lives in bootmem */ | 27 | short in_bootmem; /* pda lives in bootmem */ |
28 | short mmu_state; | ||
29 | short isidle; | 28 | short isidle; |
30 | struct mm_struct *active_mm; | ||
31 | } ____cacheline_aligned_in_smp; | 29 | } ____cacheline_aligned_in_smp; |
32 | 30 | ||
33 | DECLARE_PER_CPU(struct x8664_pda, __pda); | 31 | DECLARE_PER_CPU(struct x8664_pda, __pda); |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 17feaa9c7e76..d3539f998f88 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -148,20 +148,17 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
148 | #define TLBSTATE_OK 1 | 148 | #define TLBSTATE_OK 1 |
149 | #define TLBSTATE_LAZY 2 | 149 | #define TLBSTATE_LAZY 2 |
150 | 150 | ||
151 | #ifdef CONFIG_X86_32 | ||
152 | struct tlb_state { | 151 | struct tlb_state { |
153 | struct mm_struct *active_mm; | 152 | struct mm_struct *active_mm; |
154 | int state; | 153 | int state; |
155 | char __cacheline_padding[L1_CACHE_BYTES-8]; | ||
156 | }; | 154 | }; |
157 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); | 155 | DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); |
158 | 156 | ||
159 | void reset_lazy_tlbstate(void); | ||
160 | #else | ||
161 | static inline void reset_lazy_tlbstate(void) | 157 | static inline void reset_lazy_tlbstate(void) |
162 | { | 158 | { |
159 | percpu_write(cpu_tlbstate.state, 0); | ||
160 | percpu_write(cpu_tlbstate.active_mm, &init_mm); | ||
163 | } | 161 | } |
164 | #endif | ||
165 | 162 | ||
166 | #endif /* SMP */ | 163 | #endif /* SMP */ |
167 | 164 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c49498d40830..3d0cc6f17116 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -897,8 +897,6 @@ void __cpuinit pda_init(int cpu) | |||
897 | pda->irqcount = -1; | 897 | pda->irqcount = -1; |
898 | pda->kernelstack = (unsigned long)stack_thread_info() - | 898 | pda->kernelstack = (unsigned long)stack_thread_info() - |
899 | PDA_STACKOFFSET + THREAD_SIZE; | 899 | PDA_STACKOFFSET + THREAD_SIZE; |
900 | pda->active_mm = &init_mm; | ||
901 | pda->mmu_state = 0; | ||
902 | 900 | ||
903 | if (cpu == 0) { | 901 | if (cpu == 0) { |
904 | /* others are initialized in smpboot.c */ | 902 | /* others are initialized in smpboot.c */ |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index e65449d0f7d9..abf0808d6fc4 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -4,8 +4,8 @@ | |||
4 | 4 | ||
5 | #include <asm/tlbflush.h> | 5 | #include <asm/tlbflush.h> |
6 | 6 | ||
7 | DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) | 7 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) |
8 | ____cacheline_aligned = { &init_mm, 0, }; | 8 | = { &init_mm, 0, }; |
9 | 9 | ||
10 | /* must come after the send_IPI functions above for inlining */ | 10 | /* must come after the send_IPI functions above for inlining */ |
11 | #include <mach_ipi.h> | 11 | #include <mach_ipi.h> |
@@ -231,14 +231,6 @@ void flush_tlb_all(void) | |||
231 | on_each_cpu(do_flush_tlb_all, NULL, 1); | 231 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
232 | } | 232 | } |
233 | 233 | ||
234 | void reset_lazy_tlbstate(void) | ||
235 | { | ||
236 | int cpu = raw_smp_processor_id(); | ||
237 | |||
238 | per_cpu(cpu_tlbstate, cpu).state = 0; | ||
239 | per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; | ||
240 | } | ||
241 | |||
242 | static int init_flush_cpumask(void) | 234 | static int init_flush_cpumask(void) |
243 | { | 235 | { |
244 | alloc_cpumask_var(&flush_cpumask, GFP_KERNEL); | 236 | alloc_cpumask_var(&flush_cpumask, GFP_KERNEL); |
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 7f4141d3b661..e64a32c48825 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -18,6 +18,9 @@ | |||
18 | #include <asm/uv/uv_hub.h> | 18 | #include <asm/uv/uv_hub.h> |
19 | #include <asm/uv/uv_bau.h> | 19 | #include <asm/uv/uv_bau.h> |
20 | 20 | ||
21 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) | ||
22 | = { &init_mm, 0, }; | ||
23 | |||
21 | #include <mach_ipi.h> | 24 | #include <mach_ipi.h> |
22 | /* | 25 | /* |
23 | * Smarter SMP flushing macros. | 26 | * Smarter SMP flushing macros. |
@@ -62,9 +65,9 @@ static DEFINE_PER_CPU(union smp_flush_state, flush_state); | |||
62 | */ | 65 | */ |
63 | void leave_mm(int cpu) | 66 | void leave_mm(int cpu) |
64 | { | 67 | { |
65 | if (read_pda(mmu_state) == TLBSTATE_OK) | 68 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
66 | BUG(); | 69 | BUG(); |
67 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); | 70 | cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask); |
68 | load_cr3(swapper_pg_dir); | 71 | load_cr3(swapper_pg_dir); |
69 | } | 72 | } |
70 | EXPORT_SYMBOL_GPL(leave_mm); | 73 | EXPORT_SYMBOL_GPL(leave_mm); |
@@ -142,8 +145,8 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) | |||
142 | * BUG(); | 145 | * BUG(); |
143 | */ | 146 | */ |
144 | 147 | ||
145 | if (f->flush_mm == read_pda(active_mm)) { | 148 | if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { |
146 | if (read_pda(mmu_state) == TLBSTATE_OK) { | 149 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
147 | if (f->flush_va == TLB_FLUSH_ALL) | 150 | if (f->flush_va == TLB_FLUSH_ALL) |
148 | local_flush_tlb(); | 151 | local_flush_tlb(); |
149 | else | 152 | else |
@@ -281,7 +284,7 @@ static void do_flush_tlb_all(void *info) | |||
281 | unsigned long cpu = smp_processor_id(); | 284 | unsigned long cpu = smp_processor_id(); |
282 | 285 | ||
283 | __flush_tlb_all(); | 286 | __flush_tlb_all(); |
284 | if (read_pda(mmu_state) == TLBSTATE_LAZY) | 287 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
285 | leave_mm(cpu); | 288 | leave_mm(cpu); |
286 | } | 289 | } |
287 | 290 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 7bc7852cc5c4..98cb9869eb24 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1063,11 +1063,7 @@ static void drop_other_mm_ref(void *info) | |||
1063 | struct mm_struct *mm = info; | 1063 | struct mm_struct *mm = info; |
1064 | struct mm_struct *active_mm; | 1064 | struct mm_struct *active_mm; |
1065 | 1065 | ||
1066 | #ifdef CONFIG_X86_64 | 1066 | active_mm = percpu_read(cpu_tlbstate.active_mm); |
1067 | active_mm = read_pda(active_mm); | ||
1068 | #else | ||
1069 | active_mm = __get_cpu_var(cpu_tlbstate).active_mm; | ||
1070 | #endif | ||
1071 | 1067 | ||
1072 | if (active_mm == mm) | 1068 | if (active_mm == mm) |
1073 | leave_mm(smp_processor_id()); | 1069 | leave_mm(smp_processor_id()); |