diff options
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 37 |
1 files changed, 28 insertions, 9 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 2fb0e63344cc..9fb3133268f6 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -292,12 +292,25 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) | |||
292 | */ | 292 | */ |
293 | static void __spu_add_to_rq(struct spu_context *ctx) | 293 | static void __spu_add_to_rq(struct spu_context *ctx) |
294 | { | 294 | { |
295 | int prio = ctx->prio; | 295 | /* |
296 | 296 | * Unfortunately this code path can be called from multiple threads | |
297 | list_add_tail(&ctx->rq, &spu_prio->runq[prio]); | 297 | * on behalf of a single context due to the way the problem state |
298 | set_bit(prio, spu_prio->bitmap); | 298 | * mmap support works. |
299 | if (!spu_prio->nr_waiting++) | 299 | * |
300 | __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); | 300 | * Fortunately we need to wake up all these threads at the same time |
301 | * and can simply skip the runqueue addition for every but the first | ||
302 | * thread getting into this codepath. | ||
303 | * | ||
304 | * It's still quite hacky, and long-term we should proxy all other | ||
305 | * threads through the owner thread so that spu_run is in control | ||
306 | * of all the scheduling activity for a given context. | ||
307 | */ | ||
308 | if (list_empty(&ctx->rq)) { | ||
309 | list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); | ||
310 | set_bit(ctx->prio, spu_prio->bitmap); | ||
311 | if (!spu_prio->nr_waiting++) | ||
312 | __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); | ||
313 | } | ||
301 | } | 314 | } |
302 | 315 | ||
303 | static void __spu_del_from_rq(struct spu_context *ctx) | 316 | static void __spu_del_from_rq(struct spu_context *ctx) |
@@ -440,12 +453,18 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) | |||
440 | { | 453 | { |
441 | spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM); | 454 | spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM); |
442 | 455 | ||
443 | if (ctx->spu) | ||
444 | return 0; | ||
445 | |||
446 | do { | 456 | do { |
447 | struct spu *spu; | 457 | struct spu *spu; |
448 | 458 | ||
459 | /* | ||
460 | * If there are multiple threads waiting for a single context | ||
461 | * only one actually binds the context while the others will | ||
462 | * only be able to acquire the state_mutex once the context | ||
463 | * already is in runnable state. | ||
464 | */ | ||
465 | if (ctx->spu) | ||
466 | return 0; | ||
467 | |||
449 | spu = spu_get_idle(ctx); | 468 | spu = spu_get_idle(ctx); |
450 | /* | 469 | /* |
451 | * If this is a realtime thread we try to get it running by | 470 | * If this is a realtime thread we try to get it running by |