diff options
| -rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 9bb45c6b839c..897c74061168 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
| @@ -732,13 +732,28 @@ static void spu_schedule(struct spu *spu, struct spu_context *ctx) | |||
| 732 | spu_release(ctx); | 732 | spu_release(ctx); |
| 733 | } | 733 | } |
| 734 | 734 | ||
| 735 | static void spu_unschedule(struct spu *spu, struct spu_context *ctx) | 735 | /** |
| 736 | * spu_unschedule - remove a context from a spu, and possibly release it. | ||
| 737 | * @spu: The SPU to unschedule from | ||
| 738 | * @ctx: The context currently scheduled on the SPU | ||
| 739 | * @free_spu Whether to free the SPU for other contexts | ||
| 740 | * | ||
| 741 | * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the | ||
| 742 | * SPU is made available for other contexts (ie, may be returned by | ||
| 743 | * spu_get_idle). If this is zero, the caller is expected to schedule another | ||
| 744 | * context to this spu. | ||
| 745 | * | ||
| 746 | * Should be called with ctx->state_mutex held. | ||
| 747 | */ | ||
| 748 | static void spu_unschedule(struct spu *spu, struct spu_context *ctx, | ||
| 749 | int free_spu) | ||
| 736 | { | 750 | { |
| 737 | int node = spu->node; | 751 | int node = spu->node; |
| 738 | 752 | ||
| 739 | mutex_lock(&cbe_spu_info[node].list_mutex); | 753 | mutex_lock(&cbe_spu_info[node].list_mutex); |
| 740 | cbe_spu_info[node].nr_active--; | 754 | cbe_spu_info[node].nr_active--; |
| 741 | spu->alloc_state = SPU_FREE; | 755 | if (free_spu) |
| 756 | spu->alloc_state = SPU_FREE; | ||
| 742 | spu_unbind_context(spu, ctx); | 757 | spu_unbind_context(spu, ctx); |
| 743 | ctx->stats.invol_ctx_switch++; | 758 | ctx->stats.invol_ctx_switch++; |
| 744 | spu->stats.invol_ctx_switch++; | 759 | spu->stats.invol_ctx_switch++; |
| @@ -838,7 +853,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) | |||
| 838 | if (spu) { | 853 | if (spu) { |
| 839 | new = grab_runnable_context(max_prio, spu->node); | 854 | new = grab_runnable_context(max_prio, spu->node); |
| 840 | if (new || force) { | 855 | if (new || force) { |
| 841 | spu_unschedule(spu, ctx); | 856 | spu_unschedule(spu, ctx, new == NULL); |
| 842 | if (new) { | 857 | if (new) { |
| 843 | if (new->flags & SPU_CREATE_NOSCHED) | 858 | if (new->flags & SPU_CREATE_NOSCHED) |
| 844 | wake_up(&new->stop_wq); | 859 | wake_up(&new->stop_wq); |
| @@ -911,7 +926,7 @@ static noinline void spusched_tick(struct spu_context *ctx) | |||
| 911 | 926 | ||
| 912 | new = grab_runnable_context(ctx->prio + 1, spu->node); | 927 | new = grab_runnable_context(ctx->prio + 1, spu->node); |
| 913 | if (new) { | 928 | if (new) { |
| 914 | spu_unschedule(spu, ctx); | 929 | spu_unschedule(spu, ctx, 0); |
| 915 | if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) | 930 | if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) |
| 916 | spu_add_to_rq(ctx); | 931 | spu_add_to_rq(ctx); |
| 917 | } else { | 932 | } else { |
