aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-03-06 22:59:50 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:14:20 -0500
commita77754b4d0731321db266c6c60ffcd7c62757da5 (patch)
tree37cc4c6793e2b616791d42ee840e5a007a13eccb
parent9132983ae140a8ca81e95e081d5a4c0dd7a7f670 (diff)
[SPARC64]: Bulletproof MMU context locking.
1) Always spin_lock_init() in init_context(). The caller essentially clears it out, or copies the mm info from the parent. In both cases we need to explicitly initialize the spinlock. 2) Always do explicit IRQ disabling while taking mm->context.lock and ctx_alloc_lock. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/mm/init.c5
-rw-r--r--arch/sparc64/mm/tsb.c1
-rw-r--r--include/asm-sparc64/mmu_context.h6
3 files changed, 7 insertions, 5 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 9bbd0bf64af0..a63939347b3d 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -639,9 +639,10 @@ void get_new_mmu_context(struct mm_struct *mm)
639{ 639{
640 unsigned long ctx, new_ctx; 640 unsigned long ctx, new_ctx;
641 unsigned long orig_pgsz_bits; 641 unsigned long orig_pgsz_bits;
642 unsigned long flags;
642 int new_version; 643 int new_version;
643 644
644 spin_lock(&ctx_alloc_lock); 645 spin_lock_irqsave(&ctx_alloc_lock, flags);
645 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 646 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
646 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 647 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
647 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 648 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
@@ -677,7 +678,7 @@ void get_new_mmu_context(struct mm_struct *mm)
677out: 678out:
678 tlb_context_cache = new_ctx; 679 tlb_context_cache = new_ctx;
679 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 680 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
680 spin_unlock(&ctx_alloc_lock); 681 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
681 682
682 if (unlikely(new_version)) 683 if (unlikely(new_version))
683 smp_new_mmu_context_version(); 684 smp_new_mmu_context_version();
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 534ac2819892..f36799b7152c 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -354,6 +354,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
354 354
355int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 355int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
356{ 356{
357 spin_lock_init(&mm->context.lock);
357 358
358 mm->context.sparc64_ctx_val = 0UL; 359 mm->context.sparc64_ctx_val = 0UL;
359 360
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 4be40c58e3c1..ca36ea96f64b 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -67,14 +67,14 @@ extern void __flush_tlb_mm(unsigned long, unsigned long);
67/* Switch the current MM context. Interrupts are disabled. */ 67/* Switch the current MM context. Interrupts are disabled. */
68static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 68static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
69{ 69{
70 unsigned long ctx_valid; 70 unsigned long ctx_valid, flags;
71 int cpu; 71 int cpu;
72 72
73 spin_lock(&mm->context.lock); 73 spin_lock_irqsave(&mm->context.lock, flags);
74 ctx_valid = CTX_VALID(mm->context); 74 ctx_valid = CTX_VALID(mm->context);
75 if (!ctx_valid) 75 if (!ctx_valid)
76 get_new_mmu_context(mm); 76 get_new_mmu_context(mm);
77 spin_unlock(&mm->context.lock); 77 spin_unlock_irqrestore(&mm->context.lock, flags);
78 78
79 if (!ctx_valid || (old_mm != mm)) { 79 if (!ctx_valid || (old_mm != mm)) {
80 load_secondary_context(mm); 80 load_secondary_context(mm);