aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2008-02-28 00:06:30 -0500
committerJeremy Kerr <jk@ozlabs.org>2008-02-28 23:19:52 -0500
commitc92a1acb675058375cc508ad024c33358b42d766 (patch)
tree2860b1e74c48d09b12ea8b103366e6db0658fdfb /arch/powerpc
parentcc4b7c1814c9ad375e8167ea4a9ec4a0ec1ada04 (diff)
[POWERPC] spufs: serialize SLB invalidation against SLB loading
There is a potential race between flushes of the entire SLB in the MFC and the point where new entries are being established. The problem is that we might put a ESID entry into the MFC SLB when the VSID entry has just been cleared by the global flush. This can be circumvented by holding the register_lock throughout both the flushing and the creation of SLB entries. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index cfc28e93c825..712001f6b7da 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -81,9 +81,12 @@ struct spu_slb {
81void spu_invalidate_slbs(struct spu *spu) 81void spu_invalidate_slbs(struct spu *spu)
82{ 82{
83 struct spu_priv2 __iomem *priv2 = spu->priv2; 83 struct spu_priv2 __iomem *priv2 = spu->priv2;
84 unsigned long flags;
84 85
86 spin_lock_irqsave(&spu->register_lock, flags);
85 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) 87 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
86 out_be64(&priv2->slb_invalidate_all_W, 0UL); 88 out_be64(&priv2->slb_invalidate_all_W, 0UL);
89 spin_unlock_irqrestore(&spu->register_lock, flags);
87} 90}
88EXPORT_SYMBOL_GPL(spu_invalidate_slbs); 91EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
89 92
@@ -294,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
294 nr_slbs++; 297 nr_slbs++;
295 } 298 }
296 299
300 spin_lock_irq(&spu->register_lock);
297 /* Add the set of SLBs */ 301 /* Add the set of SLBs */
298 for (i = 0; i < nr_slbs; i++) 302 for (i = 0; i < nr_slbs; i++)
299 spu_load_slb(spu, i, &slbs[i]); 303 spu_load_slb(spu, i, &slbs[i]);
304 spin_unlock_irq(&spu->register_lock);
300} 305}
301EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); 306EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
302 307
@@ -341,13 +346,14 @@ spu_irq_class_1(int irq, void *data)
341 if (stat & CLASS1_STORAGE_FAULT_INTR) 346 if (stat & CLASS1_STORAGE_FAULT_INTR)
342 spu_mfc_dsisr_set(spu, 0ul); 347 spu_mfc_dsisr_set(spu, 0ul);
343 spu_int_stat_clear(spu, 1, stat); 348 spu_int_stat_clear(spu, 1, stat);
344 spin_unlock(&spu->register_lock);
345 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
346 dar, dsisr);
347 349
348 if (stat & CLASS1_SEGMENT_FAULT_INTR) 350 if (stat & CLASS1_SEGMENT_FAULT_INTR)
349 __spu_trap_data_seg(spu, dar); 351 __spu_trap_data_seg(spu, dar);
350 352
353 spin_unlock(&spu->register_lock);
354 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
355 dar, dsisr);
356
351 if (stat & CLASS1_STORAGE_FAULT_INTR) 357 if (stat & CLASS1_STORAGE_FAULT_INTR)
352 __spu_trap_data_map(spu, dar, dsisr); 358 __spu_trap_data_map(spu, dar, dsisr);
353 359