aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/sched.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd.bergmann@de.ibm.com>2007-04-23 15:08:10 -0400
committerArnd Bergmann <arnd@klappe.arndb.de>2007-04-23 15:18:54 -0400
commit390c53430498c9973e015432806edd53b2efe6c6 (patch)
tree931970a9d56ecc7e1663d3a4ae64c68606f4b08a /arch/powerpc/platforms/cell/spufs/sched.c
parente097b513285e616215b23af234d127298bb8d89a (diff)
[POWERPC] spufs: add memory barriers after set_bit
set_bit does not guarantee ordering on powerpc, so using it for communication between threads requires explicit mb() calls. Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 405a0555d75..1582d764523 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -76,6 +76,7 @@ void spu_start_tick(struct spu_context *ctx)
76 * Make sure the exiting bit is cleared. 76 * Make sure the exiting bit is cleared.
77 */ 77 */
78 clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags); 78 clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
79 mb();
79 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); 80 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
80 } 81 }
81} 82}
@@ -88,6 +89,7 @@ void spu_stop_tick(struct spu_context *ctx)
88 * makes sure it does not rearm itself anymore. 89 * makes sure it does not rearm itself anymore.
89 */ 90 */
90 set_bit(SPU_SCHED_EXITING, &ctx->sched_flags); 91 set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
92 mb();
91 cancel_delayed_work(&ctx->sched_work); 93 cancel_delayed_work(&ctx->sched_work);
92 } 94 }
93} 95}
@@ -239,6 +241,7 @@ static void spu_add_to_rq(struct spu_context *ctx)
239 spin_lock(&spu_prio->runq_lock); 241 spin_lock(&spu_prio->runq_lock);
240 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); 242 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
241 set_bit(ctx->prio, spu_prio->bitmap); 243 set_bit(ctx->prio, spu_prio->bitmap);
244 mb();
242 spin_unlock(&spu_prio->runq_lock); 245 spin_unlock(&spu_prio->runq_lock);
243} 246}
244 247