diff options
author | Luke Browning <lukebrowning@us.ibm.com> | 2007-04-23 15:08:13 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@klappe.arndb.de> | 2007-04-23 15:18:55 -0400 |
commit | 4e0f4ed0df71013290cd2a01f7b84264f7b99678 (patch) | |
tree | 7fd056227ddce717269b653c563179b5b7c7b88d /arch/powerpc/platforms | |
parent | 7ec18ab923a2e377ecb05c74a2d38f457f79950f (diff) |
[POWERPC] spu sched: make addition to stop_wq and runque atomic vs wakeup
Addition to stop_wq needs to happen before adding to the runqeueue and
under the same lock so that we don't have a race window for a lost
wake up in the spu scheduler.
Signed-off-by: Luke Browning <lukebrowning@us.ibm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 38 |
1 files changed, 16 insertions, 22 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 876828cc95a2..91030b8abdca 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -236,44 +236,40 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) | |||
236 | * spu_add_to_rq - add a context to the runqueue | 236 | * spu_add_to_rq - add a context to the runqueue |
237 | * @ctx: context to add | 237 | * @ctx: context to add |
238 | */ | 238 | */ |
239 | static void spu_add_to_rq(struct spu_context *ctx) | 239 | static void __spu_add_to_rq(struct spu_context *ctx) |
240 | { | 240 | { |
241 | spin_lock(&spu_prio->runq_lock); | 241 | int prio = ctx->prio; |
242 | list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); | 242 | |
243 | set_bit(ctx->prio, spu_prio->bitmap); | 243 | list_add_tail(&ctx->rq, &spu_prio->runq[prio]); |
244 | mb(); | 244 | set_bit(prio, spu_prio->bitmap); |
245 | spin_unlock(&spu_prio->runq_lock); | ||
246 | } | 245 | } |
247 | 246 | ||
248 | static void __spu_del_from_rq(struct spu_context *ctx, int prio) | 247 | static void __spu_del_from_rq(struct spu_context *ctx) |
249 | { | 248 | { |
249 | int prio = ctx->prio; | ||
250 | |||
250 | if (!list_empty(&ctx->rq)) | 251 | if (!list_empty(&ctx->rq)) |
251 | list_del_init(&ctx->rq); | 252 | list_del_init(&ctx->rq); |
252 | if (list_empty(&spu_prio->runq[prio])) | 253 | if (list_empty(&spu_prio->runq[prio])) |
253 | clear_bit(ctx->prio, spu_prio->bitmap); | 254 | clear_bit(prio, spu_prio->bitmap); |
254 | } | ||
255 | |||
256 | /** | ||
257 | * spu_del_from_rq - remove a context from the runqueue | ||
258 | * @ctx: context to remove | ||
259 | */ | ||
260 | static void spu_del_from_rq(struct spu_context *ctx) | ||
261 | { | ||
262 | spin_lock(&spu_prio->runq_lock); | ||
263 | __spu_del_from_rq(ctx, ctx->prio); | ||
264 | spin_unlock(&spu_prio->runq_lock); | ||
265 | } | 255 | } |
266 | 256 | ||
267 | static void spu_prio_wait(struct spu_context *ctx) | 257 | static void spu_prio_wait(struct spu_context *ctx) |
268 | { | 258 | { |
269 | DEFINE_WAIT(wait); | 259 | DEFINE_WAIT(wait); |
270 | 260 | ||
261 | spin_lock(&spu_prio->runq_lock); | ||
271 | prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); | 262 | prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); |
272 | if (!signal_pending(current)) { | 263 | if (!signal_pending(current)) { |
264 | __spu_add_to_rq(ctx); | ||
265 | spin_unlock(&spu_prio->runq_lock); | ||
273 | mutex_unlock(&ctx->state_mutex); | 266 | mutex_unlock(&ctx->state_mutex); |
274 | schedule(); | 267 | schedule(); |
275 | mutex_lock(&ctx->state_mutex); | 268 | mutex_lock(&ctx->state_mutex); |
269 | spin_lock(&spu_prio->runq_lock); | ||
270 | __spu_del_from_rq(ctx); | ||
276 | } | 271 | } |
272 | spin_unlock(&spu_prio->runq_lock); | ||
277 | __set_current_state(TASK_RUNNING); | 273 | __set_current_state(TASK_RUNNING); |
278 | remove_wait_queue(&ctx->stop_wq, &wait); | 274 | remove_wait_queue(&ctx->stop_wq, &wait); |
279 | } | 275 | } |
@@ -300,7 +296,7 @@ static void spu_reschedule(struct spu *spu) | |||
300 | BUG_ON(list_empty(rq)); | 296 | BUG_ON(list_empty(rq)); |
301 | 297 | ||
302 | ctx = list_entry(rq->next, struct spu_context, rq); | 298 | ctx = list_entry(rq->next, struct spu_context, rq); |
303 | __spu_del_from_rq(ctx, best); | 299 | __spu_del_from_rq(ctx); |
304 | wake_up(&ctx->stop_wq); | 300 | wake_up(&ctx->stop_wq); |
305 | } | 301 | } |
306 | spin_unlock(&spu_prio->runq_lock); | 302 | spin_unlock(&spu_prio->runq_lock); |
@@ -427,9 +423,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) | |||
427 | return 0; | 423 | return 0; |
428 | } | 424 | } |
429 | 425 | ||
430 | spu_add_to_rq(ctx); | ||
431 | spu_prio_wait(ctx); | 426 | spu_prio_wait(ctx); |
432 | spu_del_from_rq(ctx); | ||
433 | } while (!signal_pending(current)); | 427 | } while (!signal_pending(current)); |
434 | 428 | ||
435 | return -ERESTARTSYS; | 429 | return -ERESTARTSYS; |