diff options
| author | Jan Beulich <jbeulich@novell.com> | 2008-12-16 06:46:58 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-12-16 12:47:17 -0500 |
| commit | cfc319833b5b359bf3bce99564dbac00af7925ac (patch) | |
| tree | 61351c5b876de2931a002210b880bbe5ee1ec478 /arch/x86 | |
| parent | 1796316a8b028a148be48ba5d4e7be493a39d173 (diff) | |
x86, 32-bit: improve lazy TLB handling code
Impact: micro-optimize the 32-bit TLB flush code
Use the faster x86_{read,write}_percpu() accessors here.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/include/asm/mmu_context_32.h | 13 | ||||
| -rw-r--r-- | arch/x86/kernel/tlb_32.c | 11 |
2 files changed, 11 insertions, 13 deletions
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h index 8e10015781fb..7e98ce1d2c0e 100644 --- a/arch/x86/include/asm/mmu_context_32.h +++ b/arch/x86/include/asm/mmu_context_32.h | |||
| @@ -4,9 +4,8 @@ | |||
| 4 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 4 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 5 | { | 5 | { |
| 6 | #ifdef CONFIG_SMP | 6 | #ifdef CONFIG_SMP |
| 7 | unsigned cpu = smp_processor_id(); | 7 | if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) |
| 8 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | 8 | x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY); |
| 9 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; | ||
| 10 | #endif | 9 | #endif |
| 11 | } | 10 | } |
| 12 | 11 | ||
| @@ -20,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev, | |||
| 20 | /* stop flush ipis for the previous mm */ | 19 | /* stop flush ipis for the previous mm */ |
| 21 | cpu_clear(cpu, prev->cpu_vm_mask); | 20 | cpu_clear(cpu, prev->cpu_vm_mask); |
| 22 | #ifdef CONFIG_SMP | 21 | #ifdef CONFIG_SMP |
| 23 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | 22 | x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); |
| 24 | per_cpu(cpu_tlbstate, cpu).active_mm = next; | 23 | x86_write_percpu(cpu_tlbstate.active_mm, next); |
| 25 | #endif | 24 | #endif |
| 26 | cpu_set(cpu, next->cpu_vm_mask); | 25 | cpu_set(cpu, next->cpu_vm_mask); |
| 27 | 26 | ||
| @@ -36,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, | |||
| 36 | } | 35 | } |
| 37 | #ifdef CONFIG_SMP | 36 | #ifdef CONFIG_SMP |
| 38 | else { | 37 | else { |
| 39 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | 38 | x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); |
| 40 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | 39 | BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next); |
| 41 | 40 | ||
| 42 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | 41 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
| 43 | /* We were in lazy tlb mode and leave_mm disabled | 42 | /* We were in lazy tlb mode and leave_mm disabled |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index f4049f3513b6..4290d918b58a 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
| @@ -34,9 +34,8 @@ static DEFINE_SPINLOCK(tlbstate_lock); | |||
| 34 | */ | 34 | */ |
| 35 | void leave_mm(int cpu) | 35 | void leave_mm(int cpu) |
| 36 | { | 36 | { |
| 37 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | 37 | BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK); |
| 38 | BUG(); | 38 | cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask); |
| 39 | cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); | ||
| 40 | load_cr3(swapper_pg_dir); | 39 | load_cr3(swapper_pg_dir); |
| 41 | } | 40 | } |
| 42 | EXPORT_SYMBOL_GPL(leave_mm); | 41 | EXPORT_SYMBOL_GPL(leave_mm); |
| @@ -104,8 +103,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs) | |||
| 104 | * BUG(); | 103 | * BUG(); |
| 105 | */ | 104 | */ |
| 106 | 105 | ||
| 107 | if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { | 106 | if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) { |
| 108 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { | 107 | if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) { |
| 109 | if (flush_va == TLB_FLUSH_ALL) | 108 | if (flush_va == TLB_FLUSH_ALL) |
| 110 | local_flush_tlb(); | 109 | local_flush_tlb(); |
| 111 | else | 110 | else |
| @@ -238,7 +237,7 @@ static void do_flush_tlb_all(void *info) | |||
| 238 | unsigned long cpu = smp_processor_id(); | 237 | unsigned long cpu = smp_processor_id(); |
| 239 | 238 | ||
| 240 | __flush_tlb_all(); | 239 | __flush_tlb_all(); |
| 241 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) | 240 | if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY) |
| 242 | leave_mm(cpu); | 241 | leave_mm(cpu); |
| 243 | } | 242 | } |
| 244 | 243 | ||
