aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1c1b627ee843..67595bc380dc 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -643,9 +643,10 @@ static struct spu *find_victim(struct spu_context *ctx)
643 !(tmp->flags & SPU_CREATE_NOSCHED) && 643 !(tmp->flags & SPU_CREATE_NOSCHED) &&
644 (!victim || tmp->prio > victim->prio)) { 644 (!victim || tmp->prio > victim->prio)) {
645 victim = spu->ctx; 645 victim = spu->ctx;
646 get_spu_context(victim);
647 } 646 }
648 } 647 }
648 if (victim)
649 get_spu_context(victim);
649 mutex_unlock(&cbe_spu_info[node].list_mutex); 650 mutex_unlock(&cbe_spu_info[node].list_mutex);
650 651
651 if (victim) { 652 if (victim) {
@@ -727,17 +728,33 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx)
727 /* not a candidate for interruptible because it's called either 728 /* not a candidate for interruptible because it's called either
728 from the scheduler thread or from spu_deactivate */ 729 from the scheduler thread or from spu_deactivate */
729 mutex_lock(&ctx->state_mutex); 730 mutex_lock(&ctx->state_mutex);
730 __spu_schedule(spu, ctx); 731 if (ctx->state == SPU_STATE_SAVED)
732 __spu_schedule(spu, ctx);
731 spu_release(ctx); 733 spu_release(ctx);
732} 734}
733 735
734static void spu_unschedule(struct spu *spu, struct spu_context *ctx) 736/**
737 * spu_unschedule - remove a context from a spu, and possibly release it.
738 * @spu: The SPU to unschedule from
739 * @ctx: The context currently scheduled on the SPU
740 * @free_spu Whether to free the SPU for other contexts
741 *
742 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
743 * SPU is made available for other contexts (ie, may be returned by
744 * spu_get_idle). If this is zero, the caller is expected to schedule another
745 * context to this spu.
746 *
747 * Should be called with ctx->state_mutex held.
748 */
749static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
750 int free_spu)
735{ 751{
736 int node = spu->node; 752 int node = spu->node;
737 753
738 mutex_lock(&cbe_spu_info[node].list_mutex); 754 mutex_lock(&cbe_spu_info[node].list_mutex);
739 cbe_spu_info[node].nr_active--; 755 cbe_spu_info[node].nr_active--;
740 spu->alloc_state = SPU_FREE; 756 if (free_spu)
757 spu->alloc_state = SPU_FREE;
741 spu_unbind_context(spu, ctx); 758 spu_unbind_context(spu, ctx);
742 ctx->stats.invol_ctx_switch++; 759 ctx->stats.invol_ctx_switch++;
743 spu->stats.invol_ctx_switch++; 760 spu->stats.invol_ctx_switch++;
@@ -837,7 +854,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
837 if (spu) { 854 if (spu) {
838 new = grab_runnable_context(max_prio, spu->node); 855 new = grab_runnable_context(max_prio, spu->node);
839 if (new || force) { 856 if (new || force) {
840 spu_unschedule(spu, ctx); 857 spu_unschedule(spu, ctx, new == NULL);
841 if (new) { 858 if (new) {
842 if (new->flags & SPU_CREATE_NOSCHED) 859 if (new->flags & SPU_CREATE_NOSCHED)
843 wake_up(&new->stop_wq); 860 wake_up(&new->stop_wq);
@@ -910,7 +927,7 @@ static noinline void spusched_tick(struct spu_context *ctx)
910 927
911 new = grab_runnable_context(ctx->prio + 1, spu->node); 928 new = grab_runnable_context(ctx->prio + 1, spu->node);
912 if (new) { 929 if (new) {
913 spu_unschedule(spu, ctx); 930 spu_unschedule(spu, ctx, 0);
914 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) 931 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
915 spu_add_to_rq(ctx); 932 spu_add_to_rq(ctx);
916 } else { 933 } else {