aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/mmu_context_32.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include/asm/mmu_context_32.h')
-rw-r--r--arch/x86/include/asm/mmu_context_32.h56
1 files changed, 56 insertions, 0 deletions
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
new file mode 100644
index 00000000000..cce6f6e4afd
--- /dev/null
+++ b/arch/x86/include/asm/mmu_context_32.h
@@ -0,0 +1,56 @@
1#ifndef ASM_X86__MMU_CONTEXT_32_H
2#define ASM_X86__MMU_CONTEXT_32_H
3
4static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
5{
6#ifdef CONFIG_SMP
7 unsigned cpu = smp_processor_id();
8 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
9 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
10#endif
11}
12
13static inline void switch_mm(struct mm_struct *prev,
14 struct mm_struct *next,
15 struct task_struct *tsk)
16{
17 int cpu = smp_processor_id();
18
19 if (likely(prev != next)) {
20 /* stop flush ipis for the previous mm */
21 cpu_clear(cpu, prev->cpu_vm_mask);
22#ifdef CONFIG_SMP
23 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
24 per_cpu(cpu_tlbstate, cpu).active_mm = next;
25#endif
26 cpu_set(cpu, next->cpu_vm_mask);
27
28 /* Re-load page tables */
29 load_cr3(next->pgd);
30
31 /*
32 * load the LDT, if the LDT is different:
33 */
34 if (unlikely(prev->context.ldt != next->context.ldt))
35 load_LDT_nolock(&next->context);
36 }
37#ifdef CONFIG_SMP
38 else {
39 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
40 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
41
42 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
43 /* We were in lazy tlb mode and leave_mm disabled
44 * tlb flush IPI delivery. We must reload %cr3.
45 */
46 load_cr3(next->pgd);
47 load_LDT_nolock(&next->context);
48 }
49 }
50#endif
51}
52
53#define deactivate_mm(tsk, mm) \
54 asm("movl %0,%%gs": :"r" (0));
55
56#endif /* ASM_X86__MMU_CONTEXT_32_H */