aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mmu_context_nohash.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mmu_context_nohash.c')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 9cba6cba2e50..986afbc22c76 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -52,12 +52,15 @@
52#include <asm/mmu_context.h> 52#include <asm/mmu_context.h>
53#include <asm/tlbflush.h> 53#include <asm/tlbflush.h>
54 54
55#include "mmu_decl.h"
56
55static unsigned int first_context, last_context; 57static unsigned int first_context, last_context;
56static unsigned int next_context, nr_free_contexts; 58static unsigned int next_context, nr_free_contexts;
57static unsigned long *context_map; 59static unsigned long *context_map;
58static unsigned long *stale_map[NR_CPUS]; 60static unsigned long *stale_map[NR_CPUS];
59static struct mm_struct **context_mm; 61static struct mm_struct **context_mm;
60static DEFINE_RAW_SPINLOCK(context_lock); 62static DEFINE_RAW_SPINLOCK(context_lock);
63static bool no_selective_tlbil;
61 64
62#define CTX_MAP_SIZE \ 65#define CTX_MAP_SIZE \
63 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) 66 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -133,6 +136,38 @@ static unsigned int steal_context_smp(unsigned int id)
133} 136}
134#endif /* CONFIG_SMP */ 137#endif /* CONFIG_SMP */
135 138
139static unsigned int steal_all_contexts(void)
140{
141 struct mm_struct *mm;
142 int cpu = smp_processor_id();
143 unsigned int id;
144
145 for (id = first_context; id <= last_context; id++) {
146 /* Pick up the victim mm */
147 mm = context_mm[id];
148
149 pr_hardcont(" | steal %d from 0x%p", id, mm);
150
151 /* Mark this mm as having no context anymore */
152 mm->context.id = MMU_NO_CONTEXT;
153 if (id != first_context) {
154 context_mm[id] = NULL;
155 __clear_bit(id, context_map);
156#ifdef DEBUG_MAP_CONSISTENCY
157 mm->context.active = 0;
158#endif
159 }
160 __clear_bit(id, stale_map[cpu]);
161 }
162
163 /* Flush the TLB for all contexts (not to be used on SMP) */
164 _tlbil_all();
165
166 nr_free_contexts = last_context - first_context;
167
168 return first_context;
169}
170
136/* Note that this will also be called on SMP if all other CPUs are 171/* Note that this will also be called on SMP if all other CPUs are
137 * offlined, which means that it may be called for cpu != 0. For 172 * offlined, which means that it may be called for cpu != 0. For
138 * this to work, we somewhat assume that CPUs that are onlined 173 * this to work, we somewhat assume that CPUs that are onlined
@@ -241,7 +276,10 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
241 goto stolen; 276 goto stolen;
242 } 277 }
243#endif /* CONFIG_SMP */ 278#endif /* CONFIG_SMP */
244 id = steal_context_up(id); 279 if (no_selective_tlbil)
280 id = steal_all_contexts();
281 else
282 id = steal_context_up(id);
245 goto stolen; 283 goto stolen;
246 } 284 }
247 nr_free_contexts--; 285 nr_free_contexts--;
@@ -407,12 +445,15 @@ void __init mmu_context_init(void)
407 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { 445 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
408 first_context = 0; 446 first_context = 0;
409 last_context = 15; 447 last_context = 15;
448 no_selective_tlbil = true;
410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { 449 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
411 first_context = 1; 450 first_context = 1;
412 last_context = 65535; 451 last_context = 65535;
452 no_selective_tlbil = false;
413 } else { 453 } else {
414 first_context = 1; 454 first_context = 1;
415 last_context = 255; 455 last_context = 255;
456 no_selective_tlbil = false;
416 } 457 }
417 458
418#ifdef DEBUG_CLAMP_LAST_CONTEXT 459#ifdef DEBUG_CLAMP_LAST_CONTEXT