aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2008-09-04 01:02:47 -0400
committerJeremy Kerr <jk@ozlabs.org>2008-09-04 20:52:03 -0400
commitb65fe0356b5b732d7e1e0224c6a1cf2eb5255984 (patch)
treea5b1e1e16695c6335cdcb51aa2a4a0d47b2b690a /arch/powerpc
parent9f43e3914dceb0f8191875b3cdf4325b48d0d70a (diff)
powerpc/spufs: Fix race for a free SPU
We currently have a race for a free SPE. With one thread doing a spu_yield(), and another doing a spu_activate(): thread 1 thread 2 spu_yield(oldctx) spu_activate(ctx) __spu_deactivate(oldctx) spu_unschedule(oldctx, spu) spu->alloc_state = SPU_FREE spu = spu_get_idle(ctx) - searches for a SPE in state SPU_FREE, gets the context just freed by thread 1 spu_schedule(ctx, spu) spu->alloc_state = SPU_USED spu_schedule(newctx, spu) - assumes spu is still free - tries to schedule context on already-used spu This change introduces a 'free_spu' flag to spu_unschedule, to indicate whether or not the function should free the spu after descheduling the context. We only set this flag if we're not going to re-schedule another context on this SPU. Add a comment to document this behaviour. Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c23
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 9bb45c6b839c..897c74061168 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -732,13 +732,28 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
732 spu_release(ctx); 732 spu_release(ctx);
733} 733}
734 734
735static void spu_unschedule(struct spu *spu, struct spu_context *ctx) 735/**
736 * spu_unschedule - remove a context from a spu, and possibly release it.
737 * @spu: The SPU to unschedule from
738 * @ctx: The context currently scheduled on the SPU
739 * @free_spu Whether to free the SPU for other contexts
740 *
741 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
742 * SPU is made available for other contexts (ie, may be returned by
743 * spu_get_idle). If this is zero, the caller is expected to schedule another
744 * context to this spu.
745 *
746 * Should be called with ctx->state_mutex held.
747 */
748static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
749 int free_spu)
736{ 750{
737 int node = spu->node; 751 int node = spu->node;
738 752
739 mutex_lock(&cbe_spu_info[node].list_mutex); 753 mutex_lock(&cbe_spu_info[node].list_mutex);
740 cbe_spu_info[node].nr_active--; 754 cbe_spu_info[node].nr_active--;
741 spu->alloc_state = SPU_FREE; 755 if (free_spu)
756 spu->alloc_state = SPU_FREE;
742 spu_unbind_context(spu, ctx); 757 spu_unbind_context(spu, ctx);
743 ctx->stats.invol_ctx_switch++; 758 ctx->stats.invol_ctx_switch++;
744 spu->stats.invol_ctx_switch++; 759 spu->stats.invol_ctx_switch++;
@@ -838,7 +853,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
838 if (spu) { 853 if (spu) {
839 new = grab_runnable_context(max_prio, spu->node); 854 new = grab_runnable_context(max_prio, spu->node);
840 if (new || force) { 855 if (new || force) {
841 spu_unschedule(spu, ctx); 856 spu_unschedule(spu, ctx, new == NULL);
842 if (new) { 857 if (new) {
843 if (new->flags & SPU_CREATE_NOSCHED) 858 if (new->flags & SPU_CREATE_NOSCHED)
844 wake_up(&new->stop_wq); 859 wake_up(&new->stop_wq);
@@ -911,7 +926,7 @@ static noinline void spusched_tick(struct spu_context *ctx)
911 926
912 new = grab_runnable_context(ctx->prio + 1, spu->node); 927 new = grab_runnable_context(ctx->prio + 1, spu->node);
913 if (new) { 928 if (new) {
914 spu_unschedule(spu, ctx); 929 spu_unschedule(spu, ctx, 0);
915 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 930 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
916 spu_add_to_rq(ctx); 931 spu_add_to_rq(ctx);
917 } else { 932 } else {