aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/sched.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-02-13 15:54:24 -0500
committerArnd Bergmann <arnd@klappe.arndb.de>2007-02-13 15:55:41 -0500
commit26bec67386dbf6ef887254e815398842e182cdcd (patch)
treecde0851af46df1b376a7af47e7c59362506cecc5 /arch/powerpc/platforms/cell/spufs/sched.c
parent079cdb61614c466c939ebf74c7ef6745667bc61e (diff)
[POWERPC] spufs: optimize spu_run
There is no need to directly wake up contexts in spu_activate when called from spu_run, so add a flag to surpress this wakeup. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/sched.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 6f8e2257c5a..07d0d095c62 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -247,8 +247,8 @@ static void spu_prio_wait(struct spu_context *ctx)
247{ 247{
248 DEFINE_WAIT(wait); 248 DEFINE_WAIT(wait);
249 249
250 set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
250 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 251 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
251
252 if (!signal_pending(current)) { 252 if (!signal_pending(current)) {
253 mutex_unlock(&ctx->state_mutex); 253 mutex_unlock(&ctx->state_mutex);
254 schedule(); 254 schedule();
@@ -256,6 +256,7 @@ static void spu_prio_wait(struct spu_context *ctx)
256 } 256 }
257 __set_current_state(TASK_RUNNING); 257 __set_current_state(TASK_RUNNING);
258 remove_wait_queue(&ctx->stop_wq, &wait); 258 remove_wait_queue(&ctx->stop_wq, &wait);
259 clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
259} 260}
260 261
261/** 262/**
@@ -275,7 +276,7 @@ static void spu_reschedule(struct spu *spu)
275 best = sched_find_first_bit(spu_prio->bitmap); 276 best = sched_find_first_bit(spu_prio->bitmap);
276 if (best < MAX_PRIO) { 277 if (best < MAX_PRIO) {
277 struct spu_context *ctx = spu_grab_context(best); 278 struct spu_context *ctx = spu_grab_context(best);
278 if (ctx) 279 if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
279 wake_up(&ctx->stop_wq); 280 wake_up(&ctx->stop_wq);
280 } 281 }
281 spin_unlock(&spu_prio->runq_lock); 282 spin_unlock(&spu_prio->runq_lock);
@@ -315,7 +316,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
315 * add the context to the runqueue so it gets woken up once an spu 316 * add the context to the runqueue so it gets woken up once an spu
316 * is available. 317 * is available.
317 */ 318 */
318int spu_activate(struct spu_context *ctx, u64 flags) 319int spu_activate(struct spu_context *ctx, unsigned long flags)
319{ 320{
320 321
321 if (ctx->spu) 322 if (ctx->spu)
@@ -331,7 +332,8 @@ int spu_activate(struct spu_context *ctx, u64 flags)
331 } 332 }
332 333
333 spu_add_to_rq(ctx); 334 spu_add_to_rq(ctx);
334 spu_prio_wait(ctx); 335 if (!(flags & SPU_ACTIVATE_NOWAKE))
336 spu_prio_wait(ctx);
335 spu_del_from_rq(ctx); 337 spu_del_from_rq(ctx);
336 } while (!signal_pending(current)); 338 } while (!signal_pending(current));
337 339