diff options
Diffstat (limited to 'arch/x86/include/asm/mmu_context.h')
| -rw-r--r-- | arch/x86/include/asm/mmu_context.h | 63 |
1 files changed, 59 insertions, 4 deletions
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 8aeeb3fd73db..f923203dc39a 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
| @@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, | |||
| 21 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 21 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
| 22 | void destroy_context(struct mm_struct *mm); | 22 | void destroy_context(struct mm_struct *mm); |
| 23 | 23 | ||
| 24 | #ifdef CONFIG_X86_32 | 24 | |
| 25 | # include "mmu_context_32.h" | 25 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 26 | #else | 26 | { |
| 27 | # include "mmu_context_64.h" | 27 | #ifdef CONFIG_SMP |
| 28 | if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) | ||
| 29 | percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); | ||
| 30 | #endif | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
| 34 | struct task_struct *tsk) | ||
| 35 | { | ||
| 36 | unsigned cpu = smp_processor_id(); | ||
| 37 | |||
| 38 | if (likely(prev != next)) { | ||
| 39 | /* stop flush ipis for the previous mm */ | ||
| 40 | cpu_clear(cpu, prev->cpu_vm_mask); | ||
| 41 | #ifdef CONFIG_SMP | ||
| 42 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | ||
| 43 | percpu_write(cpu_tlbstate.active_mm, next); | ||
| 28 | #endif | 44 | #endif |
| 45 | cpu_set(cpu, next->cpu_vm_mask); | ||
| 46 | |||
| 47 | /* Re-load page tables */ | ||
| 48 | load_cr3(next->pgd); | ||
| 49 | |||
| 50 | /* | ||
| 51 | * load the LDT, if the LDT is different: | ||
| 52 | */ | ||
| 53 | if (unlikely(prev->context.ldt != next->context.ldt)) | ||
| 54 | load_LDT_nolock(&next->context); | ||
| 55 | } | ||
| 56 | #ifdef CONFIG_SMP | ||
| 57 | else { | ||
| 58 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | ||
| 59 | BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); | ||
| 60 | |||
| 61 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | ||
| 62 | /* We were in lazy tlb mode and leave_mm disabled | ||
| 63 | * tlb flush IPI delivery. We must reload CR3 | ||
| 64 | * to make sure to use no freed page tables. | ||
| 65 | */ | ||
| 66 | load_cr3(next->pgd); | ||
| 67 | load_LDT_nolock(&next->context); | ||
| 68 | } | ||
| 69 | } | ||
| 70 | #endif | ||
| 71 | } | ||
| 29 | 72 | ||
| 30 | #define activate_mm(prev, next) \ | 73 | #define activate_mm(prev, next) \ |
| 31 | do { \ | 74 | do { \ |
| @@ -33,5 +76,17 @@ do { \ | |||
| 33 | switch_mm((prev), (next), NULL); \ | 76 | switch_mm((prev), (next), NULL); \ |
| 34 | } while (0); | 77 | } while (0); |
| 35 | 78 | ||
| 79 | #ifdef CONFIG_X86_32 | ||
| 80 | #define deactivate_mm(tsk, mm) \ | ||
| 81 | do { \ | ||
| 82 | lazy_load_gs(0); \ | ||
| 83 | } while (0) | ||
| 84 | #else | ||
| 85 | #define deactivate_mm(tsk, mm) \ | ||
| 86 | do { \ | ||
| 87 | load_gs_index(0); \ | ||
| 88 | loadsegment(fs, 0); \ | ||
| 89 | } while (0) | ||
| 90 | #endif | ||
| 36 | 91 | ||
| 37 | #endif /* _ASM_X86_MMU_CONTEXT_H */ | 92 | #endif /* _ASM_X86_MMU_CONTEXT_H */ |
