diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 159 |
1 files changed, 77 insertions, 82 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index b6ecb30e7d58..68fcdc4515ab 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -93,43 +93,6 @@ void spu_stop_tick(struct spu_context *ctx) | |||
93 | } | 93 | } |
94 | } | 94 | } |
95 | 95 | ||
96 | void spu_sched_tick(struct work_struct *work) | ||
97 | { | ||
98 | struct spu_context *ctx = | ||
99 | container_of(work, struct spu_context, sched_work.work); | ||
100 | struct spu *spu; | ||
101 | int preempted = 0; | ||
102 | |||
103 | /* | ||
104 | * If this context is being stopped avoid rescheduling from the | ||
105 | * scheduler tick because we would block on the state_mutex. | ||
106 | * The caller will yield the spu later on anyway. | ||
107 | */ | ||
108 | if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) | ||
109 | return; | ||
110 | |||
111 | mutex_lock(&ctx->state_mutex); | ||
112 | spu = ctx->spu; | ||
113 | if (spu) { | ||
114 | int best = sched_find_first_bit(spu_prio->bitmap); | ||
115 | if (best <= ctx->prio) { | ||
116 | spu_deactivate(ctx); | ||
117 | preempted = 1; | ||
118 | } | ||
119 | } | ||
120 | mutex_unlock(&ctx->state_mutex); | ||
121 | |||
122 | if (preempted) { | ||
123 | /* | ||
124 | * We need to break out of the wait loop in spu_run manually | ||
125 | * to ensure this context gets put on the runqueue again | ||
126 | * ASAP. | ||
127 | */ | ||
128 | wake_up(&ctx->stop_wq); | ||
129 | } else | ||
130 | spu_start_tick(ctx); | ||
131 | } | ||
132 | |||
133 | /** | 96 | /** |
134 | * spu_add_to_active_list - add spu to active list | 97 | * spu_add_to_active_list - add spu to active list |
135 | * @spu: spu to add to the active list | 98 | * @spu: spu to add to the active list |
@@ -273,34 +236,6 @@ static void spu_prio_wait(struct spu_context *ctx) | |||
273 | remove_wait_queue(&ctx->stop_wq, &wait); | 236 | remove_wait_queue(&ctx->stop_wq, &wait); |
274 | } | 237 | } |
275 | 238 | ||
276 | /** | ||
277 | * spu_reschedule - try to find a runnable context for a spu | ||
278 | * @spu: spu available | ||
279 | * | ||
280 | * This function is called whenever a spu becomes idle. It looks for the | ||
281 | * most suitable runnable spu context and schedules it for execution. | ||
282 | */ | ||
283 | static void spu_reschedule(struct spu *spu) | ||
284 | { | ||
285 | int best; | ||
286 | |||
287 | spu_free(spu); | ||
288 | |||
289 | spin_lock(&spu_prio->runq_lock); | ||
290 | best = sched_find_first_bit(spu_prio->bitmap); | ||
291 | if (best < MAX_PRIO) { | ||
292 | struct list_head *rq = &spu_prio->runq[best]; | ||
293 | struct spu_context *ctx; | ||
294 | |||
295 | BUG_ON(list_empty(rq)); | ||
296 | |||
297 | ctx = list_entry(rq->next, struct spu_context, rq); | ||
298 | __spu_del_from_rq(ctx); | ||
299 | wake_up(&ctx->stop_wq); | ||
300 | } | ||
301 | spin_unlock(&spu_prio->runq_lock); | ||
302 | } | ||
303 | |||
304 | static struct spu *spu_get_idle(struct spu_context *ctx) | 239 | static struct spu *spu_get_idle(struct spu_context *ctx) |
305 | { | 240 | { |
306 | struct spu *spu = NULL; | 241 | struct spu *spu = NULL; |
@@ -429,6 +364,51 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) | |||
429 | } | 364 | } |
430 | 365 | ||
431 | /** | 366 | /** |
367 | * grab_runnable_context - try to find a runnable context | ||
368 | * | ||
369 | * Remove the highest priority context on the runqueue and return it | ||
370 | * to the caller. Returns %NULL if no runnable context was found. | ||
371 | */ | ||
372 | static struct spu_context *grab_runnable_context(int prio) | ||
373 | { | ||
374 | struct spu_context *ctx = NULL; | ||
375 | int best; | ||
376 | |||
377 | spin_lock(&spu_prio->runq_lock); | ||
378 | best = sched_find_first_bit(spu_prio->bitmap); | ||
379 | if (best < prio) { | ||
380 | struct list_head *rq = &spu_prio->runq[best]; | ||
381 | |||
382 | BUG_ON(list_empty(rq)); | ||
383 | |||
384 | ctx = list_entry(rq->next, struct spu_context, rq); | ||
385 | __spu_del_from_rq(ctx); | ||
386 | } | ||
387 | spin_unlock(&spu_prio->runq_lock); | ||
388 | |||
389 | return ctx; | ||
390 | } | ||
391 | |||
392 | static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) | ||
393 | { | ||
394 | struct spu *spu = ctx->spu; | ||
395 | struct spu_context *new = NULL; | ||
396 | |||
397 | if (spu) { | ||
398 | new = grab_runnable_context(max_prio); | ||
399 | if (new || force) { | ||
400 | spu_unbind_context(spu, ctx); | ||
401 | spu_free(spu); | ||
402 | if (new) | ||
403 | wake_up(&new->stop_wq); | ||
404 | } | ||
405 | |||
406 | } | ||
407 | |||
408 | return new != NULL; | ||
409 | } | ||
410 | |||
411 | /** | ||
432 | * spu_deactivate - unbind a context from it's physical spu | 412 | * spu_deactivate - unbind a context from it's physical spu |
433 | * @ctx: spu context to unbind | 413 | * @ctx: spu context to unbind |
434 | * | 414 | * |
@@ -437,12 +417,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) | |||
437 | */ | 417 | */ |
438 | void spu_deactivate(struct spu_context *ctx) | 418 | void spu_deactivate(struct spu_context *ctx) |
439 | { | 419 | { |
440 | struct spu *spu = ctx->spu; | 420 | __spu_deactivate(ctx, 1, MAX_PRIO); |
441 | |||
442 | if (spu) { | ||
443 | spu_unbind_context(spu, ctx); | ||
444 | spu_reschedule(spu); | ||
445 | } | ||
446 | } | 421 | } |
447 | 422 | ||
448 | /** | 423 | /** |
@@ -455,18 +430,38 @@ void spu_deactivate(struct spu_context *ctx) | |||
455 | */ | 430 | */ |
456 | void spu_yield(struct spu_context *ctx) | 431 | void spu_yield(struct spu_context *ctx) |
457 | { | 432 | { |
458 | struct spu *spu; | 433 | mutex_lock(&ctx->state_mutex); |
434 | __spu_deactivate(ctx, 0, MAX_PRIO); | ||
435 | mutex_unlock(&ctx->state_mutex); | ||
436 | } | ||
459 | 437 | ||
460 | if (mutex_trylock(&ctx->state_mutex)) { | 438 | void spu_sched_tick(struct work_struct *work) |
461 | if ((spu = ctx->spu) != NULL) { | 439 | { |
462 | int best = sched_find_first_bit(spu_prio->bitmap); | 440 | struct spu_context *ctx = |
463 | if (best < MAX_PRIO) { | 441 | container_of(work, struct spu_context, sched_work.work); |
464 | pr_debug("%s: yielding SPU %d NODE %d\n", | 442 | int preempted; |
465 | __FUNCTION__, spu->number, spu->node); | 443 | |
466 | spu_deactivate(ctx); | 444 | /* |
467 | } | 445 | * If this context is being stopped avoid rescheduling from the |
468 | } | 446 | * scheduler tick because we would block on the state_mutex. |
469 | mutex_unlock(&ctx->state_mutex); | 447 | * The caller will yield the spu later on anyway. |
448 | */ | ||
449 | if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) | ||
450 | return; | ||
451 | |||
452 | mutex_lock(&ctx->state_mutex); | ||
453 | preempted = __spu_deactivate(ctx, 0, ctx->prio + 1); | ||
454 | mutex_unlock(&ctx->state_mutex); | ||
455 | |||
456 | if (preempted) { | ||
457 | /* | ||
458 | * We need to break out of the wait loop in spu_run manually | ||
459 | * to ensure this context gets put on the runqueue again | ||
460 | * ASAP. | ||
461 | */ | ||
462 | wake_up(&ctx->stop_wq); | ||
463 | } else { | ||
464 | spu_start_tick(ctx); | ||
470 | } | 465 | } |
471 | } | 466 | } |
472 | 467 | ||