aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-06-28 20:58:06 -0400
committerPaul Mackerras <paulus@samba.org>2007-07-03 01:24:46 -0400
commit27449971e6907ff38bde7bbc4647e55bd7309fc3 (patch)
treed1fb5cf5e5a93eea27dada901623b02bf9a01e02 /arch/powerpc
parentc77239b8be74f775142d9dd01041e2ce864ba20d (diff)
[POWERPC] spusched: Fix runqueue corruption
spu_activate can be called from multiple threads at the same time on behalf of the same spu context. We need to make sure to only add it once to avoid runqueue corruption. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 2fb0e63344cc..9fb3133268f6 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -292,12 +292,25 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
292 */ 292 */
293static void __spu_add_to_rq(struct spu_context *ctx) 293static void __spu_add_to_rq(struct spu_context *ctx)
294{ 294{
295 int prio = ctx->prio; 295 /*
296 296 * Unfortunately this code path can be called from multiple threads
297 list_add_tail(&ctx->rq, &spu_prio->runq[prio]); 297 * on behalf of a single context due to the way the problem state
298 set_bit(prio, spu_prio->bitmap); 298 * mmap support works.
299 if (!spu_prio->nr_waiting++) 299 *
300 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); 300 * Fortunately we need to wake up all these threads at the same time
301 * and can simply skip the runqueue addition for every but the first
302 * thread getting into this codepath.
303 *
304 * It's still quite hacky, and long-term we should proxy all other
305 * threads through the owner thread so that spu_run is in control
306 * of all the scheduling activity for a given context.
307 */
308 if (list_empty(&ctx->rq)) {
309 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
310 set_bit(ctx->prio, spu_prio->bitmap);
311 if (!spu_prio->nr_waiting++)
312 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
313 }
301} 314}
302 315
303static void __spu_del_from_rq(struct spu_context *ctx) 316static void __spu_del_from_rq(struct spu_context *ctx)
@@ -440,12 +453,18 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
440{ 453{
441 spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM); 454 spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
442 455
443 if (ctx->spu)
444 return 0;
445
446 do { 456 do {
447 struct spu *spu; 457 struct spu *spu;
448 458
459 /*
460 * If there are multiple threads waiting for a single context
461 * only one actually binds the context while the others will
462 * only be able to acquire the state_mutex once the context
463 * already is in runnable state.
464 */
465 if (ctx->spu)
466 return 0;
467
449 spu = spu_get_idle(ctx); 468 spu = spu_get_idle(ctx);
450 /* 469 /*
451 * If this is a realtime thread we try to get it running by 470 * If this is a realtime thread we try to get it running by