diff options
author | Jeremy Kerr <jk@ozlabs.org> | 2008-02-18 18:05:35 -0500 |
---|---|---|
committer | Jeremy Kerr <jk@ozlabs.org> | 2008-02-18 18:12:02 -0500 |
commit | 4ef110141b3e0758fe30d686417b5686b87eb25b (patch) | |
tree | 388b52684d98bfc2e8ad443c391edb675eaf5360 /arch/powerpc/platforms/cell/spufs/sched.c | |
parent | 101142c37be8e5af9b847860219217e6b958c739 (diff) |
[POWERPC] spufs: fix scheduler starvation by idle contexts
2.6.25 has a regression where we can starve the scheduler by creating
(N_SPES+1) contexts, then running them one at a time.
The final context will never be run, as the other contexts are loaded on
the SPEs, none of which are repoted as free (ie, spu->alloc_state !=
SPU_FREE), so spu_get_idle() doesn't give us a spu to run on. Because
all of the contexts are stopped, none are descheduled by the scheduler
tick, as spusched_tick returns if spu_stopped(ctx).
This change replaces the spu_stopped() check with checking for SCHED_IDLE
in ctx->policy. We set a context's policy to SCHED_IDLE when we're not
in spu_run(). We also favour SCHED_IDLE contexts when looking for contexts
to unbind, but leave their timeslice intact for later resumption.
This patch fixes the following test in the spufs-testsuite:
tests/20-scheduler/02-yield-starvation
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 5915343e2599..3a5972117de7 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -856,21 +856,18 @@ static noinline void spusched_tick(struct spu_context *ctx) | |||
856 | { | 856 | { |
857 | struct spu_context *new = NULL; | 857 | struct spu_context *new = NULL; |
858 | struct spu *spu = NULL; | 858 | struct spu *spu = NULL; |
859 | u32 status; | ||
860 | 859 | ||
861 | if (spu_acquire(ctx)) | 860 | if (spu_acquire(ctx)) |
862 | BUG(); /* a kernel thread never has signals pending */ | 861 | BUG(); /* a kernel thread never has signals pending */ |
863 | 862 | ||
864 | if (ctx->state != SPU_STATE_RUNNABLE) | 863 | if (ctx->state != SPU_STATE_RUNNABLE) |
865 | goto out; | 864 | goto out; |
866 | if (spu_stopped(ctx, &status)) | ||
867 | goto out; | ||
868 | if (ctx->flags & SPU_CREATE_NOSCHED) | 865 | if (ctx->flags & SPU_CREATE_NOSCHED) |
869 | goto out; | 866 | goto out; |
870 | if (ctx->policy == SCHED_FIFO) | 867 | if (ctx->policy == SCHED_FIFO) |
871 | goto out; | 868 | goto out; |
872 | 869 | ||
873 | if (--ctx->time_slice) | 870 | if (--ctx->time_slice && ctx->policy != SCHED_IDLE) |
874 | goto out; | 871 | goto out; |
875 | 872 | ||
876 | spu = ctx->spu; | 873 | spu = ctx->spu; |
@@ -880,7 +877,8 @@ static noinline void spusched_tick(struct spu_context *ctx) | |||
880 | new = grab_runnable_context(ctx->prio + 1, spu->node); | 877 | new = grab_runnable_context(ctx->prio + 1, spu->node); |
881 | if (new) { | 878 | if (new) { |
882 | spu_unschedule(spu, ctx); | 879 | spu_unschedule(spu, ctx); |
883 | spu_add_to_rq(ctx); | 880 | if (ctx->policy != SCHED_IDLE) |
881 | spu_add_to_rq(ctx); | ||
884 | } else { | 882 | } else { |
885 | spu_context_nospu_trace(spusched_tick__newslice, ctx); | 883 | spu_context_nospu_trace(spusched_tick__newslice, ctx); |
886 | ctx->time_slice++; | 884 | ctx->time_slice++; |