diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-11 05:20:03 -0400 |
commit | 96a388de5dc53a8b234b3fd41f3ae2cedc9ffd42 (patch) | |
tree | d947a467aa2da3140279617bc4b9b101640d7bf4 /include/asm-x86/mmu_context_32.h | |
parent | 27bd0c955648646abf2a353a8371d28c37bcd982 (diff) |
i386/x86_64: move headers to include/asm-x86
Move the headers to include/asm-x86 and fixup the
header install make rules
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/mmu_context_32.h')
-rw-r--r-- | include/asm-x86/mmu_context_32.h | 86 |
1 files changed, 86 insertions, 0 deletions
diff --git a/include/asm-x86/mmu_context_32.h b/include/asm-x86/mmu_context_32.h new file mode 100644 index 00000000000..7eb0b0b1fb3 --- /dev/null +++ b/include/asm-x86/mmu_context_32.h | |||
@@ -0,0 +1,86 @@ | |||
1 | #ifndef __I386_SCHED_H | ||
2 | #define __I386_SCHED_H | ||
3 | |||
4 | #include <asm/desc.h> | ||
5 | #include <asm/atomic.h> | ||
6 | #include <asm/pgalloc.h> | ||
7 | #include <asm/tlbflush.h> | ||
8 | #include <asm/paravirt.h> | ||
9 | #ifndef CONFIG_PARAVIRT | ||
10 | #include <asm-generic/mm_hooks.h> | ||
11 | |||
12 | static inline void paravirt_activate_mm(struct mm_struct *prev, | ||
13 | struct mm_struct *next) | ||
14 | { | ||
15 | } | ||
16 | #endif /* !CONFIG_PARAVIRT */ | ||
17 | |||
18 | |||
19 | /* | ||
20 | * Used for LDT copy/destruction. | ||
21 | */ | ||
22 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | ||
23 | void destroy_context(struct mm_struct *mm); | ||
24 | |||
25 | |||
26 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
27 | { | ||
28 | #ifdef CONFIG_SMP | ||
29 | unsigned cpu = smp_processor_id(); | ||
30 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) | ||
31 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; | ||
32 | #endif | ||
33 | } | ||
34 | |||
35 | void leave_mm(unsigned long cpu); | ||
36 | |||
37 | static inline void switch_mm(struct mm_struct *prev, | ||
38 | struct mm_struct *next, | ||
39 | struct task_struct *tsk) | ||
40 | { | ||
41 | int cpu = smp_processor_id(); | ||
42 | |||
43 | if (likely(prev != next)) { | ||
44 | /* stop flush ipis for the previous mm */ | ||
45 | cpu_clear(cpu, prev->cpu_vm_mask); | ||
46 | #ifdef CONFIG_SMP | ||
47 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | ||
48 | per_cpu(cpu_tlbstate, cpu).active_mm = next; | ||
49 | #endif | ||
50 | cpu_set(cpu, next->cpu_vm_mask); | ||
51 | |||
52 | /* Re-load page tables */ | ||
53 | load_cr3(next->pgd); | ||
54 | |||
55 | /* | ||
56 | * load the LDT, if the LDT is different: | ||
57 | */ | ||
58 | if (unlikely(prev->context.ldt != next->context.ldt)) | ||
59 | load_LDT_nolock(&next->context); | ||
60 | } | ||
61 | #ifdef CONFIG_SMP | ||
62 | else { | ||
63 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; | ||
64 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); | ||
65 | |||
66 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | ||
67 | /* We were in lazy tlb mode and leave_mm disabled | ||
68 | * tlb flush IPI delivery. We must reload %cr3. | ||
69 | */ | ||
70 | load_cr3(next->pgd); | ||
71 | load_LDT_nolock(&next->context); | ||
72 | } | ||
73 | } | ||
74 | #endif | ||
75 | } | ||
76 | |||
77 | #define deactivate_mm(tsk, mm) \ | ||
78 | asm("movl %0,%%gs": :"r" (0)); | ||
79 | |||
80 | #define activate_mm(prev, next) \ | ||
81 | do { \ | ||
82 | paravirt_activate_mm(prev, next); \ | ||
83 | switch_mm((prev),(next),NULL); \ | ||
84 | } while(0); | ||
85 | |||
86 | #endif | ||