aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuke Browning <lukebr@linux.vnet.ibm.com>2007-12-20 02:39:59 -0500
committerPaul Mackerras <paulus@samba.org>2007-12-21 03:46:21 -0500
commite65c2f6fcebb9af0c3f53c796aff730dd657f5e7 (patch)
tree98b39e5efb858fc46022a5621aee07e57dad3919
parent9476141c185aa131fa8b4b6ccc5c0ccf92300225 (diff)
[POWERPC] spufs: decouple spu scheduler from spufs_spu_run (asynchronous scheduling)
Change spufs_spu_run so that the context is queued directly to the scheduler and the controlling thread advances directly to spufs_wait() for spe errors and exceptions. nosched contexts are treated the same as before. Fixes from Christoph Hellwig <hch@lst.de> Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c31
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c90
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c235
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h3
5 files changed, 200 insertions, 168 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 6fa24d38706e..290b10e45105 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -134,37 +134,6 @@ void spu_unmap_mappings(struct spu_context *ctx)
134} 134}
135 135
136/** 136/**
137 * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
138 * @ctx: spu contex to lock
139 *
140 * Note:
141 * Returns 0 and with the context locked on success
142 * Returns negative error and with the context _unlocked_ on failure.
143 */
144int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
145{
146 int ret = -EINVAL;
147
148 spu_acquire(ctx);
149 if (ctx->state == SPU_STATE_SAVED) {
150 /*
151 * Context is about to be freed, so we can't acquire it anymore.
152 */
153 if (!ctx->owner)
154 goto out_unlock;
155 ret = spu_activate(ctx, flags);
156 if (ret)
157 goto out_unlock;
158 }
159
160 return 0;
161
162 out_unlock:
163 spu_release(ctx);
164 return ret;
165}
166
167/**
168 * spu_acquire_saved - lock spu contex and make sure it is in saved state 137 * spu_acquire_saved - lock spu contex and make sure it is in saved state
169 * @ctx: spu contex to lock 138 * @ctx: spu contex to lock
170 */ 139 */
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 714972621220..78df905743b3 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -572,6 +572,9 @@ void spufs_ibox_callback(struct spu *spu)
572{ 572{
573 struct spu_context *ctx = spu->ctx; 573 struct spu_context *ctx = spu->ctx;
574 574
575 if (!ctx)
576 return;
577
575 wake_up_all(&ctx->ibox_wq); 578 wake_up_all(&ctx->ibox_wq);
576 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); 579 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
577} 580}
@@ -708,6 +711,9 @@ void spufs_wbox_callback(struct spu *spu)
708{ 711{
709 struct spu_context *ctx = spu->ctx; 712 struct spu_context *ctx = spu->ctx;
710 713
714 if (!ctx)
715 return;
716
711 wake_up_all(&ctx->wbox_wq); 717 wake_up_all(&ctx->wbox_wq);
712 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); 718 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
713} 719}
@@ -1339,6 +1345,9 @@ void spufs_mfc_callback(struct spu *spu)
1339{ 1345{
1340 struct spu_context *ctx = spu->ctx; 1346 struct spu_context *ctx = spu->ctx;
1341 1347
1348 if (!ctx)
1349 return;
1350
1342 wake_up_all(&ctx->mfc_wq); 1351 wake_up_all(&ctx->mfc_wq);
1343 1352
1344 pr_debug("%s %s\n", __FUNCTION__, spu->name); 1353 pr_debug("%s %s\n", __FUNCTION__, spu->name);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 652ae1366dc8..b380050cdbc7 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -41,21 +41,29 @@ void spufs_stop_callback(struct spu *spu)
41 spu->dar = 0; 41 spu->dar = 0;
42} 42}
43 43
44static inline int spu_stopped(struct spu_context *ctx, u32 *stat) 44int spu_stopped(struct spu_context *ctx, u32 *stat)
45{ 45{
46 struct spu *spu; 46 u64 dsisr;
47 u64 pte_fault; 47 u32 stopped;
48 48
49 *stat = ctx->ops->status_read(ctx); 49 *stat = ctx->ops->status_read(ctx);
50 50
51 spu = ctx->spu; 51 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
52 if (ctx->state != SPU_STATE_RUNNABLE || 52 return 1;
53 test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) 53
54 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
55 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
56 if (*stat & stopped)
57 return 1;
58
59 dsisr = ctx->csa.dsisr;
60 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
54 return 1; 61 return 1;
55 pte_fault = ctx->csa.dsisr & 62
56 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); 63 if (ctx->csa.class_0_pending)
57 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ? 64 return 1;
58 1 : 0; 65
66 return 0;
59} 67}
60 68
61static int spu_setup_isolated(struct spu_context *ctx) 69static int spu_setup_isolated(struct spu_context *ctx)
@@ -151,24 +159,27 @@ out:
151 159
152static int spu_run_init(struct spu_context *ctx, u32 *npc) 160static int spu_run_init(struct spu_context *ctx, u32 *npc)
153{ 161{
154 unsigned long runcntl; 162 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
155 int ret; 163 int ret;
156 164
157 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 165 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
158 166
159 if (ctx->flags & SPU_CREATE_ISOLATE) { 167 /*
160 /* 168 * NOSCHED is synchronous scheduling with respect to the caller.
161 * Force activation of spu. Isolated state assumes that 169 * The caller waits for the context to be loaded.
162 * special loader context is loaded and running on spu. 170 */
163 */ 171 if (ctx->flags & SPU_CREATE_NOSCHED) {
164 if (ctx->state == SPU_STATE_SAVED) { 172 if (ctx->state == SPU_STATE_SAVED) {
165 spu_set_timeslice(ctx);
166
167 ret = spu_activate(ctx, 0); 173 ret = spu_activate(ctx, 0);
168 if (ret) 174 if (ret)
169 return ret; 175 return ret;
170 } 176 }
177 }
171 178
179 /*
180 * Apply special setup as required.
181 */
182 if (ctx->flags & SPU_CREATE_ISOLATE) {
172 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 183 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
173 ret = spu_setup_isolated(ctx); 184 ret = spu_setup_isolated(ctx);
174 if (ret) 185 if (ret)
@@ -183,10 +194,11 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
183 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 194 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
184 if (runcntl == 0) 195 if (runcntl == 0)
185 runcntl = SPU_RUNCNTL_RUNNABLE; 196 runcntl = SPU_RUNCNTL_RUNNABLE;
197 }
186 198
199 if (ctx->flags & SPU_CREATE_NOSCHED) {
187 spuctx_switch_state(ctx, SPU_UTIL_USER); 200 spuctx_switch_state(ctx, SPU_UTIL_USER);
188 ctx->ops->runcntl_write(ctx, runcntl); 201 ctx->ops->runcntl_write(ctx, runcntl);
189
190 } else { 202 } else {
191 unsigned long privcntl; 203 unsigned long privcntl;
192 204
@@ -194,20 +206,18 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
194 privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP; 206 privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
195 else 207 else
196 privcntl = SPU_PRIVCNTL_MODE_NORMAL; 208 privcntl = SPU_PRIVCNTL_MODE_NORMAL;
197 runcntl = SPU_RUNCNTL_RUNNABLE;
198 209
199 ctx->ops->npc_write(ctx, *npc); 210 ctx->ops->npc_write(ctx, *npc);
200 ctx->ops->privcntl_write(ctx, privcntl); 211 ctx->ops->privcntl_write(ctx, privcntl);
212 ctx->ops->runcntl_write(ctx, runcntl);
201 213
202 if (ctx->state == SPU_STATE_SAVED) { 214 if (ctx->state == SPU_STATE_SAVED) {
203 spu_set_timeslice(ctx);
204 ret = spu_activate(ctx, 0); 215 ret = spu_activate(ctx, 0);
205 if (ret) 216 if (ret)
206 return ret; 217 return ret;
218 } else {
219 spuctx_switch_state(ctx, SPU_UTIL_USER);
207 } 220 }
208
209 spuctx_switch_state(ctx, SPU_UTIL_USER);
210 ctx->ops->runcntl_write(ctx, runcntl);
211 } 221 }
212 222
213 return 0; 223 return 0;
@@ -218,6 +228,8 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
218{ 228{
219 int ret = 0; 229 int ret = 0;
220 230
231 spu_del_from_rq(ctx);
232
221 *status = ctx->ops->status_read(ctx); 233 *status = ctx->ops->status_read(ctx);
222 *npc = ctx->ops->npc_read(ctx); 234 *npc = ctx->ops->npc_read(ctx);
223 235
@@ -230,26 +242,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
230 return ret; 242 return ret;
231} 243}
232 244
233static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
234 u32 *status)
235{
236 int ret;
237
238 ret = spu_run_fini(ctx, npc, status);
239 if (ret)
240 return ret;
241
242 if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
243 return *status;
244
245 ret = spu_acquire_runnable(ctx, 0);
246 if (ret)
247 return ret;
248
249 spuctx_switch_state(ctx, SPU_UTIL_USER);
250 return 0;
251}
252
253/* 245/*
254 * SPU syscall restarting is tricky because we violate the basic 246 * SPU syscall restarting is tricky because we violate the basic
255 * assumption that the signal handler is running on the interrupted 247 * assumption that the signal handler is running on the interrupted
@@ -386,17 +378,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
386 if (ret) 378 if (ret)
387 break; 379 break;
388 380
389 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
390 ret = spu_reacquire_runnable(ctx, npc, &status);
391 if (ret)
392 goto out2;
393 continue;
394 }
395
396 if (signal_pending(current)) 381 if (signal_pending(current))
397 ret = -ERESTARTSYS; 382 ret = -ERESTARTSYS;
398
399
400 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 383 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
401 SPU_STATUS_STOPPED_BY_HALT | 384 SPU_STATUS_STOPPED_BY_HALT |
402 SPU_STATUS_SINGLE_STEP))); 385 SPU_STATUS_SINGLE_STEP)));
@@ -411,7 +394,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
411 ret = spu_run_fini(ctx, npc, &status); 394 ret = spu_run_fini(ctx, npc, &status);
412 spu_yield(ctx); 395 spu_yield(ctx);
413 396
414out2:
415 if ((ret == 0) || 397 if ((ret == 0) ||
416 ((ret == -ERESTARTSYS) && 398 ((ret == -ERESTARTSYS) &&
417 ((status & SPU_STATUS_STOPPED_BY_HALT) || 399 ((status & SPU_STATUS_STOPPED_BY_HALT) ||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 54b338f8363f..2775c1652ba4 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -146,6 +146,10 @@ void spu_update_sched_info(struct spu_context *ctx)
146 146
147 if (ctx->state == SPU_STATE_RUNNABLE) { 147 if (ctx->state == SPU_STATE_RUNNABLE) {
148 node = ctx->spu->node; 148 node = ctx->spu->node;
149
150 /*
151 * Take list_mutex to sync with find_victim().
152 */
149 mutex_lock(&cbe_spu_info[node].list_mutex); 153 mutex_lock(&cbe_spu_info[node].list_mutex);
150 __spu_update_sched_info(ctx); 154 __spu_update_sched_info(ctx);
151 mutex_unlock(&cbe_spu_info[node].list_mutex); 155 mutex_unlock(&cbe_spu_info[node].list_mutex);
@@ -487,6 +491,13 @@ static void __spu_add_to_rq(struct spu_context *ctx)
487 } 491 }
488} 492}
489 493
494static void spu_add_to_rq(struct spu_context *ctx)
495{
496 spin_lock(&spu_prio->runq_lock);
497 __spu_add_to_rq(ctx);
498 spin_unlock(&spu_prio->runq_lock);
499}
500
490static void __spu_del_from_rq(struct spu_context *ctx) 501static void __spu_del_from_rq(struct spu_context *ctx)
491{ 502{
492 int prio = ctx->prio; 503 int prio = ctx->prio;
@@ -501,10 +512,24 @@ static void __spu_del_from_rq(struct spu_context *ctx)
501 } 512 }
502} 513}
503 514
515void spu_del_from_rq(struct spu_context *ctx)
516{
517 spin_lock(&spu_prio->runq_lock);
518 __spu_del_from_rq(ctx);
519 spin_unlock(&spu_prio->runq_lock);
520}
521
504static void spu_prio_wait(struct spu_context *ctx) 522static void spu_prio_wait(struct spu_context *ctx)
505{ 523{
506 DEFINE_WAIT(wait); 524 DEFINE_WAIT(wait);
507 525
526 /*
527 * The caller must explicitly wait for a context to be loaded
528 * if the nosched flag is set. If NOSCHED is not set, the caller
529 * queues the context and waits for an spu event or error.
530 */
531 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
532
508 spin_lock(&spu_prio->runq_lock); 533 spin_lock(&spu_prio->runq_lock);
509 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 534 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
510 if (!signal_pending(current)) { 535 if (!signal_pending(current)) {
@@ -604,6 +629,7 @@ static struct spu *find_victim(struct spu_context *ctx)
604 struct spu_context *tmp = spu->ctx; 629 struct spu_context *tmp = spu->ctx;
605 630
606 if (tmp && tmp->prio > ctx->prio && 631 if (tmp && tmp->prio > ctx->prio &&
632 !(tmp->flags & SPU_CREATE_NOSCHED) &&
607 (!victim || tmp->prio > victim->prio)) 633 (!victim || tmp->prio > victim->prio))
608 victim = spu->ctx; 634 victim = spu->ctx;
609 } 635 }
@@ -644,13 +670,10 @@ static struct spu *find_victim(struct spu_context *ctx)
644 670
645 victim->stats.invol_ctx_switch++; 671 victim->stats.invol_ctx_switch++;
646 spu->stats.invol_ctx_switch++; 672 spu->stats.invol_ctx_switch++;
673 spu_add_to_rq(victim);
674
647 mutex_unlock(&victim->state_mutex); 675 mutex_unlock(&victim->state_mutex);
648 /* 676
649 * We need to break out of the wait loop in spu_run
650 * manually to ensure this context gets put on the
651 * runqueue again ASAP.
652 */
653 wake_up(&victim->stop_wq);
654 return spu; 677 return spu;
655 } 678 }
656 } 679 }
@@ -658,6 +681,48 @@ static struct spu *find_victim(struct spu_context *ctx)
658 return NULL; 681 return NULL;
659} 682}
660 683
684static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
685{
686 int node = spu->node;
687 int success = 0;
688
689 spu_set_timeslice(ctx);
690
691 mutex_lock(&cbe_spu_info[node].list_mutex);
692 if (spu->ctx == NULL) {
693 spu_bind_context(spu, ctx);
694 cbe_spu_info[node].nr_active++;
695 spu->alloc_state = SPU_USED;
696 success = 1;
697 }
698 mutex_unlock(&cbe_spu_info[node].list_mutex);
699
700 if (success)
701 wake_up_all(&ctx->run_wq);
702 else
703 spu_add_to_rq(ctx);
704}
705
706static void spu_schedule(struct spu *spu, struct spu_context *ctx)
707{
708 spu_acquire(ctx);
709 __spu_schedule(spu, ctx);
710 spu_release(ctx);
711}
712
713static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
714{
715 int node = spu->node;
716
717 mutex_lock(&cbe_spu_info[node].list_mutex);
718 cbe_spu_info[node].nr_active--;
719 spu->alloc_state = SPU_FREE;
720 spu_unbind_context(spu, ctx);
721 ctx->stats.invol_ctx_switch++;
722 spu->stats.invol_ctx_switch++;
723 mutex_unlock(&cbe_spu_info[node].list_mutex);
724}
725
661/** 726/**
662 * spu_activate - find a free spu for a context and execute it 727 * spu_activate - find a free spu for a context and execute it
663 * @ctx: spu context to schedule 728 * @ctx: spu context to schedule
@@ -669,40 +734,47 @@ static struct spu *find_victim(struct spu_context *ctx)
669 */ 734 */
670int spu_activate(struct spu_context *ctx, unsigned long flags) 735int spu_activate(struct spu_context *ctx, unsigned long flags)
671{ 736{
672 do { 737 struct spu *spu;
673 struct spu *spu;
674 738
675 /* 739 /*
676 * If there are multiple threads waiting for a single context 740 * If there are multiple threads waiting for a single context
677 * only one actually binds the context while the others will 741 * only one actually binds the context while the others will
678 * only be able to acquire the state_mutex once the context 742 * only be able to acquire the state_mutex once the context
679 * already is in runnable state. 743 * already is in runnable state.
680 */ 744 */
681 if (ctx->spu) 745 if (ctx->spu)
682 return 0; 746 return 0;
683 747
684 spu = spu_get_idle(ctx); 748spu_activate_top:
685 /* 749 if (signal_pending(current))
686 * If this is a realtime thread we try to get it running by 750 return -ERESTARTSYS;
687 * preempting a lower priority thread.
688 */
689 if (!spu && rt_prio(ctx->prio))
690 spu = find_victim(ctx);
691 if (spu) {
692 int node = spu->node;
693 751
694 mutex_lock(&cbe_spu_info[node].list_mutex); 752 spu = spu_get_idle(ctx);
695 spu_bind_context(spu, ctx); 753 /*
696 cbe_spu_info[node].nr_active++; 754 * If this is a realtime thread we try to get it running by
697 mutex_unlock(&cbe_spu_info[node].list_mutex); 755 * preempting a lower priority thread.
698 wake_up_all(&ctx->run_wq); 756 */
699 return 0; 757 if (!spu && rt_prio(ctx->prio))
700 } 758 spu = find_victim(ctx);
759 if (spu) {
760 unsigned long runcntl;
701 761
762 runcntl = ctx->ops->runcntl_read(ctx);
763 __spu_schedule(spu, ctx);
764 if (runcntl & SPU_RUNCNTL_RUNNABLE)
765 spuctx_switch_state(ctx, SPU_UTIL_USER);
766
767 return 0;
768 }
769
770 if (ctx->flags & SPU_CREATE_NOSCHED) {
702 spu_prio_wait(ctx); 771 spu_prio_wait(ctx);
703 } while (!signal_pending(current)); 772 goto spu_activate_top;
773 }
704 774
705 return -ERESTARTSYS; 775 spu_add_to_rq(ctx);
776
777 return 0;
706} 778}
707 779
708/** 780/**
@@ -744,21 +816,17 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
744 if (spu) { 816 if (spu) {
745 new = grab_runnable_context(max_prio, spu->node); 817 new = grab_runnable_context(max_prio, spu->node);
746 if (new || force) { 818 if (new || force) {
747 int node = spu->node; 819 spu_unschedule(spu, ctx);
748 820 if (new) {
749 mutex_lock(&cbe_spu_info[node].list_mutex); 821 if (new->flags & SPU_CREATE_NOSCHED)
750 spu_unbind_context(spu, ctx); 822 wake_up(&new->stop_wq);
751 spu->alloc_state = SPU_FREE; 823 else {
752 cbe_spu_info[node].nr_active--; 824 spu_release(ctx);
753 mutex_unlock(&cbe_spu_info[node].list_mutex); 825 spu_schedule(spu, new);
754 826 spu_acquire(ctx);
755 ctx->stats.vol_ctx_switch++; 827 }
756 spu->stats.vol_ctx_switch++; 828 }
757
758 if (new)
759 wake_up(&new->stop_wq);
760 } 829 }
761
762 } 830 }
763 831
764 return new != NULL; 832 return new != NULL;
@@ -795,43 +863,37 @@ void spu_yield(struct spu_context *ctx)
795 863
796static noinline void spusched_tick(struct spu_context *ctx) 864static noinline void spusched_tick(struct spu_context *ctx)
797{ 865{
866 struct spu_context *new = NULL;
867 struct spu *spu = NULL;
868 u32 status;
869
870 spu_acquire(ctx);
871
872 if (ctx->state != SPU_STATE_RUNNABLE)
873 goto out;
874 if (spu_stopped(ctx, &status))
875 goto out;
798 if (ctx->flags & SPU_CREATE_NOSCHED) 876 if (ctx->flags & SPU_CREATE_NOSCHED)
799 return; 877 goto out;
800 if (ctx->policy == SCHED_FIFO) 878 if (ctx->policy == SCHED_FIFO)
801 return; 879 goto out;
802 880
803 if (--ctx->time_slice) 881 if (--ctx->time_slice)
804 return; 882 goto out;
805 883
806 /* 884 spu = ctx->spu;
807 * Unfortunately list_mutex ranks outside of state_mutex, so 885 new = grab_runnable_context(ctx->prio + 1, spu->node);
808 * we have to trylock here. If we fail give the context another 886 if (new) {
809 * tick and try again. 887 spu_unschedule(spu, ctx);
810 */ 888 spu_add_to_rq(ctx);
811 if (mutex_trylock(&ctx->state_mutex)) {
812 struct spu *spu = ctx->spu;
813 struct spu_context *new;
814
815 new = grab_runnable_context(ctx->prio + 1, spu->node);
816 if (new) {
817 spu_unbind_context(spu, ctx);
818 ctx->stats.invol_ctx_switch++;
819 spu->stats.invol_ctx_switch++;
820 spu->alloc_state = SPU_FREE;
821 cbe_spu_info[spu->node].nr_active--;
822 wake_up(&new->stop_wq);
823 /*
824 * We need to break out of the wait loop in
825 * spu_run manually to ensure this context
826 * gets put on the runqueue again ASAP.
827 */
828 wake_up(&ctx->stop_wq);
829 }
830 spu_set_timeslice(ctx);
831 mutex_unlock(&ctx->state_mutex);
832 } else { 889 } else {
833 ctx->time_slice++; 890 ctx->time_slice++;
834 } 891 }
892out:
893 spu_release(ctx);
894
895 if (new)
896 spu_schedule(spu, new);
835} 897}
836 898
837/** 899/**
@@ -895,11 +957,20 @@ static int spusched_thread(void *unused)
895 set_current_state(TASK_INTERRUPTIBLE); 957 set_current_state(TASK_INTERRUPTIBLE);
896 schedule(); 958 schedule();
897 for (node = 0; node < MAX_NUMNODES; node++) { 959 for (node = 0; node < MAX_NUMNODES; node++) {
898 mutex_lock(&cbe_spu_info[node].list_mutex); 960 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
899 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) 961
900 if (spu->ctx) 962 mutex_lock(mtx);
901 spusched_tick(spu->ctx); 963 list_for_each_entry(spu, &cbe_spu_info[node].spus,
902 mutex_unlock(&cbe_spu_info[node].list_mutex); 964 cbe_list) {
965 struct spu_context *ctx = spu->ctx;
966
967 if (ctx) {
968 mutex_unlock(mtx);
969 spusched_tick(ctx);
970 mutex_lock(mtx);
971 }
972 }
973 mutex_unlock(mtx);
903 } 974 }
904 } 975 }
905 976
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index eaab1b239d02..412de58f5b0f 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -246,10 +246,11 @@ int put_spu_context(struct spu_context *ctx);
246void spu_unmap_mappings(struct spu_context *ctx); 246void spu_unmap_mappings(struct spu_context *ctx);
247 247
248void spu_forget(struct spu_context *ctx); 248void spu_forget(struct spu_context *ctx);
249int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
250void spu_acquire_saved(struct spu_context *ctx); 249void spu_acquire_saved(struct spu_context *ctx);
251void spu_release_saved(struct spu_context *ctx); 250void spu_release_saved(struct spu_context *ctx);
252 251
252int spu_stopped(struct spu_context *ctx, u32 * stat);
253void spu_del_from_rq(struct spu_context *ctx);
253int spu_activate(struct spu_context *ctx, unsigned long flags); 254int spu_activate(struct spu_context *ctx, unsigned long flags);
254void spu_deactivate(struct spu_context *ctx); 255void spu_deactivate(struct spu_context *ctx);
255void spu_yield(struct spu_context *ctx); 256void spu_yield(struct spu_context *ctx);