aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mmu_context_nohash.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mmu_context_nohash.c')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c96
1 files changed, 64 insertions, 32 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index b1a727def15b..c2f93dc470e6 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -25,10 +25,20 @@
25 * also clear mm->cpu_vm_mask bits when processes are migrated 25 * also clear mm->cpu_vm_mask bits when processes are migrated
26 */ 26 */
27 27
28#undef DEBUG 28#define DEBUG_MAP_CONSISTENCY
29#define DEBUG_STEAL_ONLY 29#define DEBUG_CLAMP_LAST_CONTEXT 31
30#undef DEBUG_MAP_CONSISTENCY 30//#define DEBUG_HARDER
31/*#define DEBUG_CLAMP_LAST_CONTEXT 15 */ 31
32/* We don't use DEBUG because it tends to be compiled in always nowadays
33 * and this would generate way too much output
34 */
35#ifdef DEBUG_HARDER
36#define pr_hard(args...) printk(KERN_DEBUG args)
37#define pr_hardcont(args...) printk(KERN_CONT args)
38#else
39#define pr_hard(args...) do { } while(0)
40#define pr_hardcont(args...) do { } while(0)
41#endif
32 42
33#include <linux/kernel.h> 43#include <linux/kernel.h>
34#include <linux/mm.h> 44#include <linux/mm.h>
@@ -71,7 +81,7 @@ static DEFINE_SPINLOCK(context_lock);
71static unsigned int steal_context_smp(unsigned int id) 81static unsigned int steal_context_smp(unsigned int id)
72{ 82{
73 struct mm_struct *mm; 83 struct mm_struct *mm;
74 unsigned int cpu, max; 84 unsigned int cpu, max, i;
75 85
76 max = last_context - first_context; 86 max = last_context - first_context;
77 87
@@ -89,15 +99,22 @@ static unsigned int steal_context_smp(unsigned int id)
89 id = first_context; 99 id = first_context;
90 continue; 100 continue;
91 } 101 }
92 pr_devel("[%d] steal context %d from mm @%p\n", 102 pr_hardcont(" | steal %d from 0x%p", id, mm);
93 smp_processor_id(), id, mm);
94 103
95 /* Mark this mm has having no context anymore */ 104 /* Mark this mm has having no context anymore */
96 mm->context.id = MMU_NO_CONTEXT; 105 mm->context.id = MMU_NO_CONTEXT;
97 106
98 /* Mark it stale on all CPUs that used this mm */ 107 /* Mark it stale on all CPUs that used this mm. For threaded
99 for_each_cpu(cpu, mm_cpumask(mm)) 108 * implementations, we set it on all threads on each core
100 __set_bit(id, stale_map[cpu]); 109 * represented in the mask. A future implementation will use
110 * a core map instead but this will do for now.
111 */
112 for_each_cpu(cpu, mm_cpumask(mm)) {
113 for (i = cpu_first_thread_in_core(cpu);
114 i <= cpu_last_thread_in_core(cpu); i++)
115 __set_bit(id, stale_map[i]);
116 cpu = i - 1;
117 }
101 return id; 118 return id;
102 } 119 }
103 120
@@ -126,7 +143,7 @@ static unsigned int steal_context_up(unsigned int id)
126 /* Pick up the victim mm */ 143 /* Pick up the victim mm */
127 mm = context_mm[id]; 144 mm = context_mm[id];
128 145
129 pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm); 146 pr_hardcont(" | steal %d from 0x%p", id, mm);
130 147
131 /* Flush the TLB for that context */ 148 /* Flush the TLB for that context */
132 local_flush_tlb_mm(mm); 149 local_flush_tlb_mm(mm);
@@ -173,25 +190,20 @@ static void context_check_map(void) { }
173 190
174void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) 191void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
175{ 192{
176 unsigned int id, cpu = smp_processor_id(); 193 unsigned int i, id, cpu = smp_processor_id();
177 unsigned long *map; 194 unsigned long *map;
178 195
179 /* No lockless fast path .. yet */ 196 /* No lockless fast path .. yet */
180 spin_lock(&context_lock); 197 spin_lock(&context_lock);
181 198
182#ifndef DEBUG_STEAL_ONLY 199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
183 pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", 200 cpu, next, next->context.active, next->context.id);
184 cpu, next, next->context.active, next->context.id);
185#endif
186 201
187#ifdef CONFIG_SMP 202#ifdef CONFIG_SMP
188 /* Mark us active and the previous one not anymore */ 203 /* Mark us active and the previous one not anymore */
189 next->context.active++; 204 next->context.active++;
190 if (prev) { 205 if (prev) {
191#ifndef DEBUG_STEAL_ONLY 206 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
192 pr_devel(" old context %p active was: %d\n",
193 prev, prev->context.active);
194#endif
195 WARN_ON(prev->context.active < 1); 207 WARN_ON(prev->context.active < 1);
196 prev->context.active--; 208 prev->context.active--;
197 } 209 }
@@ -201,8 +213,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
201 213
202 /* If we already have a valid assigned context, skip all that */ 214 /* If we already have a valid assigned context, skip all that */
203 id = next->context.id; 215 id = next->context.id;
204 if (likely(id != MMU_NO_CONTEXT)) 216 if (likely(id != MMU_NO_CONTEXT)) {
217#ifdef DEBUG_MAP_CONSISTENCY
218 if (context_mm[id] != next)
219 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
220 next, id, id, context_mm[id]);
221#endif
205 goto ctxt_ok; 222 goto ctxt_ok;
223 }
206 224
207 /* We really don't have a context, let's try to acquire one */ 225 /* We really don't have a context, let's try to acquire one */
208 id = next_context; 226 id = next_context;
@@ -235,11 +253,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
235 next_context = id + 1; 253 next_context = id + 1;
236 context_mm[id] = next; 254 context_mm[id] = next;
237 next->context.id = id; 255 next->context.id = id;
238 256 pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
239#ifndef DEBUG_STEAL_ONLY
240 pr_devel("[%d] picked up new id %d, nrf is now %d\n",
241 cpu, id, nr_free_contexts);
242#endif
243 257
244 context_check_map(); 258 context_check_map();
245 ctxt_ok: 259 ctxt_ok:
@@ -248,15 +262,21 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
248 * local TLB for it and unmark it before we use it 262 * local TLB for it and unmark it before we use it
249 */ 263 */
250 if (test_bit(id, stale_map[cpu])) { 264 if (test_bit(id, stale_map[cpu])) {
251 pr_devel("[%d] flushing stale context %d for mm @%p !\n", 265 pr_hardcont(" | stale flush %d [%d..%d]",
252 cpu, id, next); 266 id, cpu_first_thread_in_core(cpu),
267 cpu_last_thread_in_core(cpu));
268
253 local_flush_tlb_mm(next); 269 local_flush_tlb_mm(next);
254 270
255 /* XXX This clear should ultimately be part of local_flush_tlb_mm */ 271 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
256 __clear_bit(id, stale_map[cpu]); 272 for (i = cpu_first_thread_in_core(cpu);
273 i <= cpu_last_thread_in_core(cpu); i++) {
274 __clear_bit(id, stale_map[i]);
275 }
257 } 276 }
258 277
259 /* Flick the MMU and release lock */ 278 /* Flick the MMU and release lock */
279 pr_hardcont(" -> %d\n", id);
260 set_context(id, next->pgd); 280 set_context(id, next->pgd);
261 spin_unlock(&context_lock); 281 spin_unlock(&context_lock);
262} 282}
@@ -266,6 +286,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
266 */ 286 */
267int init_new_context(struct task_struct *t, struct mm_struct *mm) 287int init_new_context(struct task_struct *t, struct mm_struct *mm)
268{ 288{
289 pr_hard("initing context for mm @%p\n", mm);
290
269 mm->context.id = MMU_NO_CONTEXT; 291 mm->context.id = MMU_NO_CONTEXT;
270 mm->context.active = 0; 292 mm->context.active = 0;
271 293
@@ -305,7 +327,9 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
305 unsigned long action, void *hcpu) 327 unsigned long action, void *hcpu)
306{ 328{
307 unsigned int cpu = (unsigned int)(long)hcpu; 329 unsigned int cpu = (unsigned int)(long)hcpu;
308 330#ifdef CONFIG_HOTPLUG_CPU
331 struct task_struct *p;
332#endif
309 /* We don't touch CPU 0 map, it's allocated at aboot and kept 333 /* We don't touch CPU 0 map, it's allocated at aboot and kept
310 * around forever 334 * around forever
311 */ 335 */
@@ -324,8 +348,16 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
324 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); 348 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
325 kfree(stale_map[cpu]); 349 kfree(stale_map[cpu]);
326 stale_map[cpu] = NULL; 350 stale_map[cpu] = NULL;
327 break; 351
328#endif 352 /* We also clear the cpu_vm_mask bits of CPUs going away */
353 read_lock(&tasklist_lock);
354 for_each_process(p) {
355 if (p->mm)
356 cpu_mask_clear_cpu(cpu, mm_cpumask(p->mm));
357 }
358 read_unlock(&tasklist_lock);
359 break;
360#endif /* CONFIG_HOTPLUG_CPU */
329 } 361 }
330 return NOTIFY_OK; 362 return NOTIFY_OK;
331} 363}