aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mmu_context_nohash.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 19:15:10 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:12:37 -0400
commitfcce810986b3f32a8322faf240f8cc5560a4c463 (patch)
tree4ee99143e18a008c52a274d74c1cc11055983bc6 /arch/powerpc/mm/mmu_context_nohash.c
parent6c1719942e19936044c4673b18afa26e45a02320 (diff)
powerpc/mm: Add HW threads support to no_hash TLB management
The current "no hash" MMU context management code is written with the assumption that one CPU == one TLB. This is not the case on implementations that support HW multithreading, where several linux CPUs can share the same TLB. This adds some basic support for this to our context management and our TLB flushing code. It also cleans up the optional debugging output a bit Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/mmu_context_nohash.c')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c93
1 files changed, 62 insertions, 31 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index b1a727def15..834436d6d6b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -25,10 +25,20 @@
25 * also clear mm->cpu_vm_mask bits when processes are migrated 25 * also clear mm->cpu_vm_mask bits when processes are migrated
26 */ 26 */
27 27
28#undef DEBUG 28#define DEBUG_MAP_CONSISTENCY
29#define DEBUG_STEAL_ONLY 29#define DEBUG_CLAMP_LAST_CONTEXT 31
30#undef DEBUG_MAP_CONSISTENCY 30//#define DEBUG_HARDER
31/*#define DEBUG_CLAMP_LAST_CONTEXT 15 */ 31
32/* We don't use DEBUG because it tends to be compiled in always nowadays
33 * and this would generate way too much output
34 */
35#ifdef DEBUG_HARDER
36#define pr_hard(args...) printk(KERN_DEBUG args)
37#define pr_hardcont(args...) printk(KERN_CONT args)
38#else
39#define pr_hard(args...) do { } while(0)
40#define pr_hardcont(args...) do { } while(0)
41#endif
32 42
33#include <linux/kernel.h> 43#include <linux/kernel.h>
34#include <linux/mm.h> 44#include <linux/mm.h>
@@ -71,7 +81,7 @@ static DEFINE_SPINLOCK(context_lock);
71static unsigned int steal_context_smp(unsigned int id) 81static unsigned int steal_context_smp(unsigned int id)
72{ 82{
73 struct mm_struct *mm; 83 struct mm_struct *mm;
74 unsigned int cpu, max; 84 unsigned int cpu, max, i;
75 85
76 max = last_context - first_context; 86 max = last_context - first_context;
77 87
@@ -89,15 +99,22 @@ static unsigned int steal_context_smp(unsigned int id)
89 id = first_context; 99 id = first_context;
90 continue; 100 continue;
91 } 101 }
92 pr_devel("[%d] steal context %d from mm @%p\n", 102 pr_hardcont(" | steal %d from 0x%p", id, mm);
93 smp_processor_id(), id, mm);
94 103
95 /* Mark this mm has having no context anymore */ 104 /* Mark this mm has having no context anymore */
96 mm->context.id = MMU_NO_CONTEXT; 105 mm->context.id = MMU_NO_CONTEXT;
97 106
98 /* Mark it stale on all CPUs that used this mm */ 107 /* Mark it stale on all CPUs that used this mm. For threaded
99 for_each_cpu(cpu, mm_cpumask(mm)) 108 * implementations, we set it on all threads on each core
100 __set_bit(id, stale_map[cpu]); 109 * represented in the mask. A future implementation will use
110 * a core map instead but this will do for now.
111 */
112 for_each_cpu(cpu, mm_cpumask(mm)) {
113 for (i = cpu_first_thread_in_core(cpu);
114 i <= cpu_last_thread_in_core(cpu); i++)
115 __set_bit(id, stale_map[i]);
116 cpu = i - 1;
117 }
101 return id; 118 return id;
102 } 119 }
103 120
@@ -126,7 +143,7 @@ static unsigned int steal_context_up(unsigned int id)
126 /* Pick up the victim mm */ 143 /* Pick up the victim mm */
127 mm = context_mm[id]; 144 mm = context_mm[id];
128 145
129 pr_devel("[%d] steal context %d from mm @%p\n", cpu, id, mm); 146 pr_hardcont(" | steal %d from 0x%p", id, mm);
130 147
131 /* Flush the TLB for that context */ 148 /* Flush the TLB for that context */
132 local_flush_tlb_mm(mm); 149 local_flush_tlb_mm(mm);
@@ -179,19 +196,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
179 /* No lockless fast path .. yet */ 196 /* No lockless fast path .. yet */
180 spin_lock(&context_lock); 197 spin_lock(&context_lock);
181 198
182#ifndef DEBUG_STEAL_ONLY 199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
183 pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", 200 cpu, next, next->context.active, next->context.id);
184 cpu, next, next->context.active, next->context.id);
185#endif
186 201
187#ifdef CONFIG_SMP 202#ifdef CONFIG_SMP
188 /* Mark us active and the previous one not anymore */ 203 /* Mark us active and the previous one not anymore */
189 next->context.active++; 204 next->context.active++;
190 if (prev) { 205 if (prev) {
191#ifndef DEBUG_STEAL_ONLY 206 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
192 pr_devel(" old context %p active was: %d\n",
193 prev, prev->context.active);
194#endif
195 WARN_ON(prev->context.active < 1); 207 WARN_ON(prev->context.active < 1);
196 prev->context.active--; 208 prev->context.active--;
197 } 209 }
@@ -201,8 +213,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
201 213
202 /* If we already have a valid assigned context, skip all that */ 214 /* If we already have a valid assigned context, skip all that */
203 id = next->context.id; 215 id = next->context.id;
204 if (likely(id != MMU_NO_CONTEXT)) 216 if (likely(id != MMU_NO_CONTEXT)) {
217#ifdef DEBUG_MAP_CONSISTENCY
218 if (context_mm[id] != next)
219 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
220 next, id, id, context_mm[id]);
221#endif
205 goto ctxt_ok; 222 goto ctxt_ok;
223 }
206 224
207 /* We really don't have a context, let's try to acquire one */ 225 /* We really don't have a context, let's try to acquire one */
208 id = next_context; 226 id = next_context;
@@ -235,11 +253,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
235 next_context = id + 1; 253 next_context = id + 1;
236 context_mm[id] = next; 254 context_mm[id] = next;
237 next->context.id = id; 255 next->context.id = id;
238 256 pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
239#ifndef DEBUG_STEAL_ONLY
240 pr_devel("[%d] picked up new id %d, nrf is now %d\n",
241 cpu, id, nr_free_contexts);
242#endif
243 257
244 context_check_map(); 258 context_check_map();
245 ctxt_ok: 259 ctxt_ok:
@@ -248,15 +262,20 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
248 * local TLB for it and unmark it before we use it 262 * local TLB for it and unmark it before we use it
249 */ 263 */
250 if (test_bit(id, stale_map[cpu])) { 264 if (test_bit(id, stale_map[cpu])) {
251 pr_devel("[%d] flushing stale context %d for mm @%p !\n", 265 pr_hardcont(" | stale flush %d [%d..%d]",
252 cpu, id, next); 266 id, cpu_first_thread_in_core(cpu),
267 cpu_last_thread_in_core(cpu));
268
253 local_flush_tlb_mm(next); 269 local_flush_tlb_mm(next);
254 270
255 /* XXX This clear should ultimately be part of local_flush_tlb_mm */ 271 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
256 __clear_bit(id, stale_map[cpu]); 272 for (cpu = cpu_first_thread_in_core(cpu);
273 cpu <= cpu_last_thread_in_core(cpu); cpu++)
274 __clear_bit(id, stale_map[cpu]);
257 } 275 }
258 276
259 /* Flick the MMU and release lock */ 277 /* Flick the MMU and release lock */
278 pr_hardcont(" -> %d\n", id);
260 set_context(id, next->pgd); 279 set_context(id, next->pgd);
261 spin_unlock(&context_lock); 280 spin_unlock(&context_lock);
262} 281}
@@ -266,6 +285,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
266 */ 285 */
267int init_new_context(struct task_struct *t, struct mm_struct *mm) 286int init_new_context(struct task_struct *t, struct mm_struct *mm)
268{ 287{
288 pr_hard("initing context for mm @%p\n", mm);
289
269 mm->context.id = MMU_NO_CONTEXT; 290 mm->context.id = MMU_NO_CONTEXT;
270 mm->context.active = 0; 291 mm->context.active = 0;
271 292
@@ -305,7 +326,9 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
305 unsigned long action, void *hcpu) 326 unsigned long action, void *hcpu)
306{ 327{
307 unsigned int cpu = (unsigned int)(long)hcpu; 328 unsigned int cpu = (unsigned int)(long)hcpu;
308 329#ifdef CONFIG_HOTPLUG_CPU
330 struct task_struct *p;
331#endif
309 /* We don't touch CPU 0 map, it's allocated at aboot and kept 332 /* We don't touch CPU 0 map, it's allocated at aboot and kept
310 * around forever 333 * around forever
311 */ 334 */
@@ -324,8 +347,16 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
324 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); 347 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
325 kfree(stale_map[cpu]); 348 kfree(stale_map[cpu]);
326 stale_map[cpu] = NULL; 349 stale_map[cpu] = NULL;
327 break; 350
328#endif 351 /* We also clear the cpu_vm_mask bits of CPUs going away */
352 read_lock(&tasklist_lock);
353 for_each_process(p) {
354 if (p->mm)
355 cpu_mask_clear_cpu(cpu, mm_cpumask(p->mm));
356 }
357 read_unlock(&tasklist_lock);
358 break;
359#endif /* CONFIG_HOTPLUG_CPU */
329 } 360 }
330 return NOTIFY_OK; 361 return NOTIFY_OK;
331} 362}