diff options
Diffstat (limited to 'arch/x86/include/asm/mmu_context_64.h')
-rw-r--r-- | arch/x86/include/asm/mmu_context_64.h | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h new file mode 100644 index 000000000000..26758673c828 --- /dev/null +++ b/arch/x86/include/asm/mmu_context_64.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef ASM_X86__MMU_CONTEXT_64_H | ||
2 | #define ASM_X86__MMU_CONTEXT_64_H | ||
3 | |||
4 | #include <asm/pda.h> | ||
5 | |||
6 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
7 | { | ||
8 | #ifdef CONFIG_SMP | ||
9 | if (read_pda(mmu_state) == TLBSTATE_OK) | ||
10 | write_pda(mmu_state, TLBSTATE_LAZY); | ||
11 | #endif | ||
12 | } | ||
13 | |||
14 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
15 | struct task_struct *tsk) | ||
16 | { | ||
17 | unsigned cpu = smp_processor_id(); | ||
18 | if (likely(prev != next)) { | ||
19 | /* stop flush ipis for the previous mm */ | ||
20 | cpu_clear(cpu, prev->cpu_vm_mask); | ||
21 | #ifdef CONFIG_SMP | ||
22 | write_pda(mmu_state, TLBSTATE_OK); | ||
23 | write_pda(active_mm, next); | ||
24 | #endif | ||
25 | cpu_set(cpu, next->cpu_vm_mask); | ||
26 | load_cr3(next->pgd); | ||
27 | |||
28 | if (unlikely(next->context.ldt != prev->context.ldt)) | ||
29 | load_LDT_nolock(&next->context); | ||
30 | } | ||
31 | #ifdef CONFIG_SMP | ||
32 | else { | ||
33 | write_pda(mmu_state, TLBSTATE_OK); | ||
34 | if (read_pda(active_mm) != next) | ||
35 | BUG(); | ||
36 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | ||
37 | /* We were in lazy tlb mode and leave_mm disabled | ||
38 | * tlb flush IPI delivery. We must reload CR3 | ||
39 | * to make sure to use no freed page tables. | ||
40 | */ | ||
41 | load_cr3(next->pgd); | ||
42 | load_LDT_nolock(&next->context); | ||
43 | } | ||
44 | } | ||
45 | #endif | ||
46 | } | ||
47 | |||
48 | #define deactivate_mm(tsk, mm) \ | ||
49 | do { \ | ||
50 | load_gs_index(0); \ | ||
51 | asm volatile("movl %0,%%fs"::"r"(0)); \ | ||
52 | } while (0) | ||
53 | |||
54 | #endif /* ASM_X86__MMU_CONTEXT_64_H */ | ||