diff options
Diffstat (limited to 'include/asm-i386/mmu_context.h')
-rw-r--r-- | include/asm-i386/mmu_context.h | 84 |
1 files changed, 0 insertions, 84 deletions
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h deleted file mode 100644 index 8198d1cca1f3..000000000000 --- a/include/asm-i386/mmu_context.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | #ifndef __I386_SCHED_H | ||
2 | #define __I386_SCHED_H | ||
3 | |||
4 | #include <asm/desc.h> | ||
5 | #include <asm/atomic.h> | ||
6 | #include <asm/pgalloc.h> | ||
7 | #include <asm/tlbflush.h> | ||
8 | #include <asm/paravirt.h> | ||
9 | #ifndef CONFIG_PARAVIRT | ||
10 | #include <asm-generic/mm_hooks.h> | ||
11 | |||
12 | static inline void paravirt_activate_mm(struct mm_struct *prev, | ||
13 | struct mm_struct *next) | ||
14 | { | ||
15 | } | ||
16 | #endif /* !CONFIG_PARAVIRT */ | ||
17 | |||
18 | |||
19 | /* | ||
20 | * Used for LDT copy/destruction. | ||
21 | */ | ||
22 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
23 | void destroy_context(struct mm_struct *mm); | ||
24 | |||
25 | |||
26 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
27 | { | ||
28 | #ifdef CONFIG_SMP | ||
29 | unsigned cpu = smp_processor_id(); | ||
30 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | ||
31 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; | ||
32 | #endif | ||
33 | } | ||
34 | |||
35 | static inline void switch_mm(struct mm_struct *prev, | ||
36 | struct mm_struct *next, | ||
37 | struct task_struct *tsk) | ||
38 | { | ||
39 | int cpu = smp_processor_id(); | ||
40 | |||
41 | if (likely(prev != next)) { | ||
42 | /* stop flush ipis for the previous mm */ | ||
43 | cpu_clear(cpu, prev->cpu_vm_mask); | ||
44 | #ifdef CONFIG_SMP | ||
45 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | ||
46 | per_cpu(cpu_tlbstate, cpu).active_mm = next; | ||
47 | #endif | ||
48 | cpu_set(cpu, next->cpu_vm_mask); | ||
49 | |||
50 | /* Re-load page tables */ | ||
51 | load_cr3(next->pgd); | ||
52 | |||
53 | /* | ||
54 | * load the LDT, if the LDT is different: | ||
55 | */ | ||
56 | if (unlikely(prev->context.ldt != next->context.ldt)) | ||
57 | load_LDT_nolock(&next->context); | ||
58 | } | ||
59 | #ifdef CONFIG_SMP | ||
60 | else { | ||
61 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | ||
62 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | ||
63 | |||
64 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | ||
65 | /* We were in lazy tlb mode and leave_mm disabled | ||
66 | * tlb flush IPI delivery. We must reload %cr3. | ||
67 | */ | ||
68 | load_cr3(next->pgd); | ||
69 | load_LDT_nolock(&next->context); | ||
70 | } | ||
71 | } | ||
72 | #endif | ||
73 | } | ||
74 | |||
75 | #define deactivate_mm(tsk, mm) \ | ||
76 | asm("movl %0,%%gs": :"r" (0)); | ||
77 | |||
78 | #define activate_mm(prev, next) \ | ||
79 | do { \ | ||
80 | paravirt_activate_mm(prev, next); \ | ||
81 | switch_mm((prev),(next),NULL); \ | ||
82 | } while(0); | ||
83 | |||
84 | #endif | ||