aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-06-04 09:26:51 -0400
committerPaul Mackerras <paulus@samba.org>2007-06-06 21:44:39 -0400
commitbb5db29aa0379f0f3ef857a3a3715f17261c611b (patch)
tree7de8b9a16f8a78fb99ac10e1f8d64664b5472aa3 /arch/powerpc
parent47d3a5faa3f72186f769ed9579c630afb8433f2b (diff)
[POWERPC] spufs scheduler: Fix wakeup races
Fix the race between checking for contexts on the runqueue and actually waking them in spu_deactive and spu_yield. The guts of spu_reschedule are split into a new helper called grab_runnable_context which shows if there is a runnable thread below a specified priority and if yes removes if from the runqueue and uses it. This function is used by the new __spu_deactivate hepler shared by preemption and spu_yield to grab a new context before deactivating a specified priority and if yes removes if from the runqueue and uses it. This function is used by the new __spu_deactivate hepler shared by preemption and spu_yield to grab a new context before deactivating the old one. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c159
1 files changed, 77 insertions, 82 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index b6ecb30e7d58..68fcdc4515ab 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -93,43 +93,6 @@ void spu_stop_tick(struct spu_context *ctx)
93 } 93 }
94} 94}
95 95
96void spu_sched_tick(struct work_struct *work)
97{
98 struct spu_context *ctx =
99 container_of(work, struct spu_context, sched_work.work);
100 struct spu *spu;
101 int preempted = 0;
102
103 /*
104 * If this context is being stopped avoid rescheduling from the
105 * scheduler tick because we would block on the state_mutex.
106 * The caller will yield the spu later on anyway.
107 */
108 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
109 return;
110
111 mutex_lock(&ctx->state_mutex);
112 spu = ctx->spu;
113 if (spu) {
114 int best = sched_find_first_bit(spu_prio->bitmap);
115 if (best <= ctx->prio) {
116 spu_deactivate(ctx);
117 preempted = 1;
118 }
119 }
120 mutex_unlock(&ctx->state_mutex);
121
122 if (preempted) {
123 /*
124 * We need to break out of the wait loop in spu_run manually
125 * to ensure this context gets put on the runqueue again
126 * ASAP.
127 */
128 wake_up(&ctx->stop_wq);
129 } else
130 spu_start_tick(ctx);
131}
132
133/** 96/**
134 * spu_add_to_active_list - add spu to active list 97 * spu_add_to_active_list - add spu to active list
135 * @spu: spu to add to the active list 98 * @spu: spu to add to the active list
@@ -273,34 +236,6 @@ static void spu_prio_wait(struct spu_context *ctx)
273 remove_wait_queue(&ctx->stop_wq, &wait); 236 remove_wait_queue(&ctx->stop_wq, &wait);
274} 237}
275 238
276/**
277 * spu_reschedule - try to find a runnable context for a spu
278 * @spu: spu available
279 *
280 * This function is called whenever a spu becomes idle. It looks for the
281 * most suitable runnable spu context and schedules it for execution.
282 */
283static void spu_reschedule(struct spu *spu)
284{
285 int best;
286
287 spu_free(spu);
288
289 spin_lock(&spu_prio->runq_lock);
290 best = sched_find_first_bit(spu_prio->bitmap);
291 if (best < MAX_PRIO) {
292 struct list_head *rq = &spu_prio->runq[best];
293 struct spu_context *ctx;
294
295 BUG_ON(list_empty(rq));
296
297 ctx = list_entry(rq->next, struct spu_context, rq);
298 __spu_del_from_rq(ctx);
299 wake_up(&ctx->stop_wq);
300 }
301 spin_unlock(&spu_prio->runq_lock);
302}
303
304static struct spu *spu_get_idle(struct spu_context *ctx) 239static struct spu *spu_get_idle(struct spu_context *ctx)
305{ 240{
306 struct spu *spu = NULL; 241 struct spu *spu = NULL;
@@ -429,6 +364,51 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
429} 364}
430 365
431/** 366/**
367 * grab_runnable_context - try to find a runnable context
368 *
369 * Remove the highest priority context on the runqueue and return it
370 * to the caller. Returns %NULL if no runnable context was found.
371 */
372static struct spu_context *grab_runnable_context(int prio)
373{
374 struct spu_context *ctx = NULL;
375 int best;
376
377 spin_lock(&spu_prio->runq_lock);
378 best = sched_find_first_bit(spu_prio->bitmap);
379 if (best < prio) {
380 struct list_head *rq = &spu_prio->runq[best];
381
382 BUG_ON(list_empty(rq));
383
384 ctx = list_entry(rq->next, struct spu_context, rq);
385 __spu_del_from_rq(ctx);
386 }
387 spin_unlock(&spu_prio->runq_lock);
388
389 return ctx;
390}
391
392static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
393{
394 struct spu *spu = ctx->spu;
395 struct spu_context *new = NULL;
396
397 if (spu) {
398 new = grab_runnable_context(max_prio);
399 if (new || force) {
400 spu_unbind_context(spu, ctx);
401 spu_free(spu);
402 if (new)
403 wake_up(&new->stop_wq);
404 }
405
406 }
407
408 return new != NULL;
409}
410
411/**
432 * spu_deactivate - unbind a context from it's physical spu 412 * spu_deactivate - unbind a context from it's physical spu
433 * @ctx: spu context to unbind 413 * @ctx: spu context to unbind
434 * 414 *
@@ -437,12 +417,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
437 */ 417 */
438void spu_deactivate(struct spu_context *ctx) 418void spu_deactivate(struct spu_context *ctx)
439{ 419{
440 struct spu *spu = ctx->spu; 420 __spu_deactivate(ctx, 1, MAX_PRIO);
441
442 if (spu) {
443 spu_unbind_context(spu, ctx);
444 spu_reschedule(spu);
445 }
446} 421}
447 422
448/** 423/**
@@ -455,18 +430,38 @@ void spu_deactivate(struct spu_context *ctx)
455 */ 430 */
456void spu_yield(struct spu_context *ctx) 431void spu_yield(struct spu_context *ctx)
457{ 432{
458 struct spu *spu; 433 mutex_lock(&ctx->state_mutex);
434 __spu_deactivate(ctx, 0, MAX_PRIO);
435 mutex_unlock(&ctx->state_mutex);
436}
459 437
460 if (mutex_trylock(&ctx->state_mutex)) { 438void spu_sched_tick(struct work_struct *work)
461 if ((spu = ctx->spu) != NULL) { 439{
462 int best = sched_find_first_bit(spu_prio->bitmap); 440 struct spu_context *ctx =
463 if (best < MAX_PRIO) { 441 container_of(work, struct spu_context, sched_work.work);
464 pr_debug("%s: yielding SPU %d NODE %d\n", 442 int preempted;
465 __FUNCTION__, spu->number, spu->node); 443
466 spu_deactivate(ctx); 444 /*
467 } 445 * If this context is being stopped avoid rescheduling from the
468 } 446 * scheduler tick because we would block on the state_mutex.
469 mutex_unlock(&ctx->state_mutex); 447 * The caller will yield the spu later on anyway.
448 */
449 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
450 return;
451
452 mutex_lock(&ctx->state_mutex);
453 preempted = __spu_deactivate(ctx, 0, ctx->prio + 1);
454 mutex_unlock(&ctx->state_mutex);
455
456 if (preempted) {
457 /*
458 * We need to break out of the wait loop in spu_run manually
459 * to ensure this context gets put on the runqueue again
460 * ASAP.
461 */
462 wake_up(&ctx->stop_wq);
463 } else {
464 spu_start_tick(ctx);
470 } 465 }
471} 466}
472 467