diff options
Diffstat (limited to 'arch/mn10300/include/asm/mmu_context.h')
-rw-r--r-- | arch/mn10300/include/asm/mmu_context.h | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h new file mode 100644 index 000000000000..a9e2e34f69b0 --- /dev/null +++ b/arch/mn10300/include/asm/mmu_context.h | |||
@@ -0,0 +1,138 @@ | |||
1 | /* MN10300 MMU context management | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Modified by David Howells (dhowells@redhat.com) | ||
5 | * - Derived from include/asm-m32r/mmu_context.h | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public Licence | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the Licence, or (at your option) any later version. | ||
11 | * | ||
12 | * | ||
13 | * This implements an algorithm to provide TLB PID mappings to provide | ||
14 | * selective access to the TLB for processes, thus reducing the number of TLB | ||
15 | * flushes required. | ||
16 | * | ||
17 | * Note, however, that the M32R algorithm is technically broken as it does not | ||
18 | * handle version wrap-around, and could, theoretically, have a problem with a | ||
19 | * very long lived program that sleeps long enough for the version number to | ||
20 | * wrap all the way around so that its TLB mappings appear valid once again. | ||
21 | */ | ||
22 | #ifndef _ASM_MMU_CONTEXT_H | ||
23 | #define _ASM_MMU_CONTEXT_H | ||
24 | |||
25 | #include <asm/atomic.h> | ||
26 | #include <asm/pgalloc.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm-generic/mm_hooks.h> | ||
29 | |||
30 | #define MMU_CONTEXT_TLBPID_MASK 0x000000ffUL | ||
31 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00UL | ||
32 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100UL | ||
33 | #define MMU_NO_CONTEXT 0x00000000UL | ||
34 | |||
35 | extern unsigned long mmu_context_cache[NR_CPUS]; | ||
36 | #define mm_context(mm) (mm->context.tlbpid[smp_processor_id()]) | ||
37 | |||
38 | #define enter_lazy_tlb(mm, tsk) do {} while (0) | ||
39 | |||
40 | #ifdef CONFIG_SMP | ||
41 | #define cpu_ran_vm(cpu, task) \ | ||
42 | cpu_set((cpu), (task)->cpu_vm_mask) | ||
43 | #define cpu_maybe_ran_vm(cpu, task) \ | ||
44 | cpu_test_and_set((cpu), (task)->cpu_vm_mask) | ||
45 | #else | ||
46 | #define cpu_ran_vm(cpu, task) do {} while (0) | ||
47 | #define cpu_maybe_ran_vm(cpu, task) true | ||
48 | #endif /* CONFIG_SMP */ | ||
49 | |||
50 | /* | ||
51 | * allocate an MMU context | ||
52 | */ | ||
53 | static inline unsigned long allocate_mmu_context(struct mm_struct *mm) | ||
54 | { | ||
55 | unsigned long *pmc = &mmu_context_cache[smp_processor_id()]; | ||
56 | unsigned long mc = ++(*pmc); | ||
57 | |||
58 | if (!(mc & MMU_CONTEXT_TLBPID_MASK)) { | ||
59 | /* we exhausted the TLB PIDs of this version on this CPU, so we | ||
60 | * flush this CPU's TLB in its entirety and start new cycle */ | ||
61 | flush_tlb_all(); | ||
62 | |||
63 | /* fix the TLB version if needed (we avoid version #0 so as to | ||
64 | * distingush MMU_NO_CONTEXT) */ | ||
65 | if (!mc) | ||
66 | *pmc = mc = MMU_CONTEXT_FIRST_VERSION; | ||
67 | } | ||
68 | mm_context(mm) = mc; | ||
69 | return mc; | ||
70 | } | ||
71 | |||
72 | /* | ||
73 | * get an MMU context if one is needed | ||
74 | */ | ||
75 | static inline unsigned long get_mmu_context(struct mm_struct *mm) | ||
76 | { | ||
77 | unsigned long mc = MMU_NO_CONTEXT, cache; | ||
78 | |||
79 | if (mm) { | ||
80 | cache = mmu_context_cache[smp_processor_id()]; | ||
81 | mc = mm_context(mm); | ||
82 | |||
83 | /* if we have an old version of the context, replace it */ | ||
84 | if ((mc ^ cache) & MMU_CONTEXT_VERSION_MASK) | ||
85 | mc = allocate_mmu_context(mm); | ||
86 | } | ||
87 | return mc; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * initialise the context related info for a new mm_struct instance | ||
92 | */ | ||
93 | static inline int init_new_context(struct task_struct *tsk, | ||
94 | struct mm_struct *mm) | ||
95 | { | ||
96 | int num_cpus = NR_CPUS, i; | ||
97 | |||
98 | for (i = 0; i < num_cpus; i++) | ||
99 | mm->context.tlbpid[i] = MMU_NO_CONTEXT; | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * destroy context related info for an mm_struct that is about to be put to | ||
105 | * rest | ||
106 | */ | ||
107 | #define destroy_context(mm) do { } while (0) | ||
108 | |||
109 | /* | ||
110 | * after we have set current->mm to a new value, this activates the context for | ||
111 | * the new mm so we see the new mappings. | ||
112 | */ | ||
113 | static inline void activate_context(struct mm_struct *mm, int cpu) | ||
114 | { | ||
115 | PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * change between virtual memory sets | ||
120 | */ | ||
121 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
122 | struct task_struct *tsk) | ||
123 | { | ||
124 | int cpu = smp_processor_id(); | ||
125 | |||
126 | if (prev != next) { | ||
127 | cpu_ran_vm(cpu, next); | ||
128 | activate_context(next, cpu); | ||
129 | PTBR = (unsigned long) next->pgd; | ||
130 | } else if (!cpu_maybe_ran_vm(cpu, next)) { | ||
131 | activate_context(next, cpu); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | #define deactivate_mm(tsk, mm) do {} while (0) | ||
136 | #define activate_mm(prev, next) switch_mm((prev), (next), NULL) | ||
137 | |||
138 | #endif /* _ASM_MMU_CONTEXT_H */ | ||