aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-17 21:22:39 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-18 22:52:30 -0500
commitbe833f3371bd9580d9f5a507390d72452577f394 (patch)
tree329b435c37f9957f9c65a8a7cba11868b1537e57 /arch/powerpc/mm
parent87d31345c0a90ccdf185feed9923ed14764f45dc (diff)
powerpc: Convert context_lock to raw_spinlock
context_lock needs to be a real spinlock in RT. Convert it to raw_spinlock. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1044a634b6d0..dbc692145ecb 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -56,7 +56,7 @@ static unsigned int next_context, nr_free_contexts;
56static unsigned long *context_map; 56static unsigned long *context_map;
57static unsigned long *stale_map[NR_CPUS]; 57static unsigned long *stale_map[NR_CPUS];
58static struct mm_struct **context_mm; 58static struct mm_struct **context_mm;
59static DEFINE_SPINLOCK(context_lock); 59static DEFINE_RAW_SPINLOCK(context_lock);
60 60
61#define CTX_MAP_SIZE \ 61#define CTX_MAP_SIZE \
62 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) 62 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -121,9 +121,9 @@ static unsigned int steal_context_smp(unsigned int id)
121 /* This will happen if you have more CPUs than available contexts, 121 /* This will happen if you have more CPUs than available contexts,
122 * all we can do here is wait a bit and try again 122 * all we can do here is wait a bit and try again
123 */ 123 */
124 spin_unlock(&context_lock); 124 raw_spin_unlock(&context_lock);
125 cpu_relax(); 125 cpu_relax();
126 spin_lock(&context_lock); 126 raw_spin_lock(&context_lock);
127 127
128 /* This will cause the caller to try again */ 128 /* This will cause the caller to try again */
129 return MMU_NO_CONTEXT; 129 return MMU_NO_CONTEXT;
@@ -194,7 +194,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
194 unsigned long *map; 194 unsigned long *map;
195 195
196 /* No lockless fast path .. yet */ 196 /* No lockless fast path .. yet */
197 spin_lock(&context_lock); 197 raw_spin_lock(&context_lock);
198 198
199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", 199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
200 cpu, next, next->context.active, next->context.id); 200 cpu, next, next->context.active, next->context.id);
@@ -278,7 +278,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
278 /* Flick the MMU and release lock */ 278 /* Flick the MMU and release lock */
279 pr_hardcont(" -> %d\n", id); 279 pr_hardcont(" -> %d\n", id);
280 set_context(id, next->pgd); 280 set_context(id, next->pgd);
281 spin_unlock(&context_lock); 281 raw_spin_unlock(&context_lock);
282} 282}
283 283
284/* 284/*
@@ -307,7 +307,7 @@ void destroy_context(struct mm_struct *mm)
307 307
308 WARN_ON(mm->context.active != 0); 308 WARN_ON(mm->context.active != 0);
309 309
310 spin_lock_irqsave(&context_lock, flags); 310 raw_spin_lock_irqsave(&context_lock, flags);
311 id = mm->context.id; 311 id = mm->context.id;
312 if (id != MMU_NO_CONTEXT) { 312 if (id != MMU_NO_CONTEXT) {
313 __clear_bit(id, context_map); 313 __clear_bit(id, context_map);
@@ -318,7 +318,7 @@ void destroy_context(struct mm_struct *mm)
318 context_mm[id] = NULL; 318 context_mm[id] = NULL;
319 nr_free_contexts++; 319 nr_free_contexts++;
320 } 320 }
321 spin_unlock_irqrestore(&context_lock, flags); 321 raw_spin_unlock_irqrestore(&context_lock, flags);
322} 322}
323 323
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP