aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/mmu_context_nohash.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mmu_context_nohash.c')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1044a634b6d0..dbc692145ecb 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -56,7 +56,7 @@ static unsigned int next_context, nr_free_contexts;
56static unsigned long *context_map; 56static unsigned long *context_map;
57static unsigned long *stale_map[NR_CPUS]; 57static unsigned long *stale_map[NR_CPUS];
58static struct mm_struct **context_mm; 58static struct mm_struct **context_mm;
59static DEFINE_SPINLOCK(context_lock); 59static DEFINE_RAW_SPINLOCK(context_lock);
60 60
61#define CTX_MAP_SIZE \ 61#define CTX_MAP_SIZE \
62 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) 62 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -121,9 +121,9 @@ static unsigned int steal_context_smp(unsigned int id)
121 /* This will happen if you have more CPUs than available contexts, 121 /* This will happen if you have more CPUs than available contexts,
122 * all we can do here is wait a bit and try again 122 * all we can do here is wait a bit and try again
123 */ 123 */
124 spin_unlock(&context_lock); 124 raw_spin_unlock(&context_lock);
125 cpu_relax(); 125 cpu_relax();
126 spin_lock(&context_lock); 126 raw_spin_lock(&context_lock);
127 127
128 /* This will cause the caller to try again */ 128 /* This will cause the caller to try again */
129 return MMU_NO_CONTEXT; 129 return MMU_NO_CONTEXT;
@@ -194,7 +194,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
194 unsigned long *map; 194 unsigned long *map;
195 195
196 /* No lockless fast path .. yet */ 196 /* No lockless fast path .. yet */
197 spin_lock(&context_lock); 197 raw_spin_lock(&context_lock);
198 198
199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", 199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
200 cpu, next, next->context.active, next->context.id); 200 cpu, next, next->context.active, next->context.id);
@@ -278,7 +278,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
278 /* Flick the MMU and release lock */ 278 /* Flick the MMU and release lock */
279 pr_hardcont(" -> %d\n", id); 279 pr_hardcont(" -> %d\n", id);
280 set_context(id, next->pgd); 280 set_context(id, next->pgd);
281 spin_unlock(&context_lock); 281 raw_spin_unlock(&context_lock);
282} 282}
283 283
284/* 284/*
@@ -307,7 +307,7 @@ void destroy_context(struct mm_struct *mm)
307 307
308 WARN_ON(mm->context.active != 0); 308 WARN_ON(mm->context.active != 0);
309 309
310 spin_lock_irqsave(&context_lock, flags); 310 raw_spin_lock_irqsave(&context_lock, flags);
311 id = mm->context.id; 311 id = mm->context.id;
312 if (id != MMU_NO_CONTEXT) { 312 if (id != MMU_NO_CONTEXT) {
313 __clear_bit(id, context_map); 313 __clear_bit(id, context_map);
@@ -318,7 +318,7 @@ void destroy_context(struct mm_struct *mm)
318 context_mm[id] = NULL; 318 context_mm[id] = NULL;
319 nr_free_contexts++; 319 nr_free_contexts++;
320 } 320 }
321 spin_unlock_irqrestore(&context_lock, flags); 321 raw_spin_unlock_irqrestore(&context_lock, flags);
322} 322}
323 323
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP