diff options
Diffstat (limited to 'include/asm-sh/mmu_context.h')
-rw-r--r-- | include/asm-sh/mmu_context.h | 61 |
1 files changed, 36 insertions, 25 deletions
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h index 46f04e23bd45..342024425b7d 100644 --- a/include/asm-sh/mmu_context.h +++ b/include/asm-sh/mmu_context.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 1999 Niibe Yutaka | 2 | * Copyright (C) 1999 Niibe Yutaka |
3 | * Copyright (C) 2003 Paul Mundt | 3 | * Copyright (C) 2003 - 2006 Paul Mundt |
4 | * | 4 | * |
5 | * ASID handling idea taken from MIPS implementation. | 5 | * ASID handling idea taken from MIPS implementation. |
6 | */ | 6 | */ |
@@ -19,11 +19,6 @@ | |||
19 | * (b) ASID (Address Space IDentifier) | 19 | * (b) ASID (Address Space IDentifier) |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* | ||
23 | * Cache of MMU context last used. | ||
24 | */ | ||
25 | extern unsigned long mmu_context_cache; | ||
26 | |||
27 | #define MMU_CONTEXT_ASID_MASK 0x000000ff | 22 | #define MMU_CONTEXT_ASID_MASK 0x000000ff |
28 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 | 23 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 |
29 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 | 24 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 |
@@ -32,6 +27,11 @@ extern unsigned long mmu_context_cache; | |||
32 | /* ASID is 8-bit value, so it can't be 0x100 */ | 27 | /* ASID is 8-bit value, so it can't be 0x100 */ |
33 | #define MMU_NO_ASID 0x100 | 28 | #define MMU_NO_ASID 0x100 |
34 | 29 | ||
30 | #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) | ||
31 | #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & \ | ||
32 | MMU_CONTEXT_ASID_MASK) | ||
33 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) | ||
34 | |||
35 | /* | 35 | /* |
36 | * Virtual Page Number mask | 36 | * Virtual Page Number mask |
37 | */ | 37 | */ |
@@ -41,18 +41,17 @@ extern unsigned long mmu_context_cache; | |||
41 | /* | 41 | /* |
42 | * Get MMU context if needed. | 42 | * Get MMU context if needed. |
43 | */ | 43 | */ |
44 | static inline void get_mmu_context(struct mm_struct *mm) | 44 | static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) |
45 | { | 45 | { |
46 | unsigned long mc = mmu_context_cache; | 46 | unsigned long asid = asid_cache(cpu); |
47 | 47 | ||
48 | /* Check if we have old version of context. */ | 48 | /* Check if we have old version of context. */ |
49 | if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) | 49 | if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) |
50 | /* It's up to date, do nothing */ | 50 | /* It's up to date, do nothing */ |
51 | return; | 51 | return; |
52 | 52 | ||
53 | /* It's old, we need to get new context with new version. */ | 53 | /* It's old, we need to get new context with new version. */ |
54 | mc = ++mmu_context_cache; | 54 | if (!(++asid & MMU_CONTEXT_ASID_MASK)) { |
55 | if (!(mc & MMU_CONTEXT_ASID_MASK)) { | ||
56 | /* | 55 | /* |
57 | * We exhaust ASID of this version. | 56 | * We exhaust ASID of this version. |
58 | * Flush all TLB and start new cycle. | 57 | * Flush all TLB and start new cycle. |
@@ -63,10 +62,11 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
63 | * Fix version; Note that we avoid version #0 | 62 | * Fix version; Note that we avoid version #0 |
64 | * to distingush NO_CONTEXT. | 63 | * to distingush NO_CONTEXT. |
65 | */ | 64 | */ |
66 | if (!mc) | 65 | if (!asid) |
67 | mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; | 66 | asid = MMU_CONTEXT_FIRST_VERSION; |
68 | } | 67 | } |
69 | mm->context.id = mc; | 68 | |
69 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; | ||
70 | } | 70 | } |
71 | 71 | ||
72 | /* | 72 | /* |
@@ -74,9 +74,13 @@ static inline void get_mmu_context(struct mm_struct *mm) | |||
74 | * instance. | 74 | * instance. |
75 | */ | 75 | */ |
76 | static inline int init_new_context(struct task_struct *tsk, | 76 | static inline int init_new_context(struct task_struct *tsk, |
77 | struct mm_struct *mm) | 77 | struct mm_struct *mm) |
78 | { | 78 | { |
79 | mm->context.id = NO_CONTEXT; | 79 | int i; |
80 | |||
81 | for (i = 0; i < num_online_cpus(); i++) | ||
82 | cpu_context(i, mm) = NO_CONTEXT; | ||
83 | |||
80 | return 0; | 84 | return 0; |
81 | } | 85 | } |
82 | 86 | ||
@@ -117,10 +121,10 @@ static inline unsigned long get_asid(void) | |||
117 | * After we have set current->mm to a new value, this activates | 121 | * After we have set current->mm to a new value, this activates |
118 | * the context for the new mm so we see the new mappings. | 122 | * the context for the new mm so we see the new mappings. |
119 | */ | 123 | */ |
120 | static inline void activate_context(struct mm_struct *mm) | 124 | static inline void activate_context(struct mm_struct *mm, unsigned int cpu) |
121 | { | 125 | { |
122 | get_mmu_context(mm); | 126 | get_mmu_context(mm, cpu); |
123 | set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); | 127 | set_asid(cpu_asid(cpu, mm)); |
124 | } | 128 | } |
125 | 129 | ||
126 | /* MMU_TTB is used for optimizing the fault handling. */ | 130 | /* MMU_TTB is used for optimizing the fault handling. */ |
@@ -138,10 +142,15 @@ static inline void switch_mm(struct mm_struct *prev, | |||
138 | struct mm_struct *next, | 142 | struct mm_struct *next, |
139 | struct task_struct *tsk) | 143 | struct task_struct *tsk) |
140 | { | 144 | { |
145 | unsigned int cpu = smp_processor_id(); | ||
146 | |||
141 | if (likely(prev != next)) { | 147 | if (likely(prev != next)) { |
148 | cpu_set(cpu, next->cpu_vm_mask); | ||
142 | set_TTB(next->pgd); | 149 | set_TTB(next->pgd); |
143 | activate_context(next); | 150 | activate_context(next, cpu); |
144 | } | 151 | } else |
152 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) | ||
153 | activate_context(next, cpu); | ||
145 | } | 154 | } |
146 | 155 | ||
147 | #define deactivate_mm(tsk,mm) do { } while (0) | 156 | #define deactivate_mm(tsk,mm) do { } while (0) |
@@ -159,7 +168,7 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
159 | #define destroy_context(mm) do { } while (0) | 168 | #define destroy_context(mm) do { } while (0) |
160 | #define set_asid(asid) do { } while (0) | 169 | #define set_asid(asid) do { } while (0) |
161 | #define get_asid() (0) | 170 | #define get_asid() (0) |
162 | #define activate_context(mm) do { } while (0) | 171 | #define activate_context(mm,cpu) do { } while (0) |
163 | #define switch_mm(prev,next,tsk) do { } while (0) | 172 | #define switch_mm(prev,next,tsk) do { } while (0) |
164 | #define deactivate_mm(tsk,mm) do { } while (0) | 173 | #define deactivate_mm(tsk,mm) do { } while (0) |
165 | #define activate_mm(prev,next) do { } while (0) | 174 | #define activate_mm(prev,next) do { } while (0) |
@@ -174,14 +183,16 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |||
174 | */ | 183 | */ |
175 | static inline void enable_mmu(void) | 184 | static inline void enable_mmu(void) |
176 | { | 185 | { |
186 | unsigned int cpu = smp_processor_id(); | ||
187 | |||
177 | /* Enable MMU */ | 188 | /* Enable MMU */ |
178 | ctrl_outl(MMU_CONTROL_INIT, MMUCR); | 189 | ctrl_outl(MMU_CONTROL_INIT, MMUCR); |
179 | ctrl_barrier(); | 190 | ctrl_barrier(); |
180 | 191 | ||
181 | if (mmu_context_cache == NO_CONTEXT) | 192 | if (asid_cache(cpu) == NO_CONTEXT) |
182 | mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; | 193 | asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION; |
183 | 194 | ||
184 | set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); | 195 | set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK); |
185 | } | 196 | } |
186 | 197 | ||
187 | static inline void disable_mmu(void) | 198 | static inline void disable_mmu(void) |