aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-04-23 15:08:06 -0400
committerArnd Bergmann <arnd@klappe.arndb.de>2007-04-23 15:18:52 -0400
commit0887309589824fb1c3744c69a330c99c369124a0 (patch)
treedf69c7be00e75c240302a37b78964c10419615c9
parent390cbb56a731546edc0f35fbc4c5045676467581 (diff)
[POWERPC] spufs: use cancel_rearming_delayed_workqueue when stopping spu contexts
The scheduler workqueue may rearm itself and deadlock when we try to stop it. Put a flag in place to avoid skip the work if we're tearing down the context. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c25
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
2 files changed, 23 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index c9561582ce2a..003e330fc76f 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -71,14 +71,25 @@ static inline int node_allowed(int node)
71 71
72void spu_start_tick(struct spu_context *ctx) 72void spu_start_tick(struct spu_context *ctx)
73{ 73{
74 if (ctx->policy == SCHED_RR) 74 if (ctx->policy == SCHED_RR) {
75 /*
76 * Make sure the exiting bit is cleared.
77 */
78 clear_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
75 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE); 79 queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
80 }
76} 81}
77 82
78void spu_stop_tick(struct spu_context *ctx) 83void spu_stop_tick(struct spu_context *ctx)
79{ 84{
80 if (ctx->policy == SCHED_RR) 85 if (ctx->policy == SCHED_RR) {
86 /*
87 * While the work can be rearming normally setting this flag
88 * makes sure it does not rearm itself anymore.
89 */
90 set_bit(SPU_SCHED_EXITING, &ctx->sched_flags);
81 cancel_delayed_work(&ctx->sched_work); 91 cancel_delayed_work(&ctx->sched_work);
92 }
82} 93}
83 94
84void spu_sched_tick(struct work_struct *work) 95void spu_sched_tick(struct work_struct *work)
@@ -88,6 +99,14 @@ void spu_sched_tick(struct work_struct *work)
88 struct spu *spu; 99 struct spu *spu;
89 int rearm = 1; 100 int rearm = 1;
90 101
102 /*
103 * If this context is being stopped avoid rescheduling from the
104 * scheduler tick because we would block on the state_mutex.
105 * The caller will yield the spu later on anyway.
106 */
107 if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags))
108 return;
109
91 mutex_lock(&ctx->state_mutex); 110 mutex_lock(&ctx->state_mutex);
92 spu = ctx->spu; 111 spu = ctx->spu;
93 if (spu) { 112 if (spu) {
@@ -377,7 +396,7 @@ static struct spu *find_victim(struct spu_context *ctx)
377 * @ctx: spu context to schedule 396 * @ctx: spu context to schedule
378 * @flags: flags (currently ignored) 397 * @flags: flags (currently ignored)
379 * 398 *
380 * Tries to find a free spu to run @ctx. If no free spu is availble 399 * Tries to find a free spu to run @ctx. If no free spu is available
381 * add the context to the runqueue so it gets woken up once an spu 400 * add the context to the runqueue so it gets woken up once an spu
382 * is available. 401 * is available.
383 */ 402 */
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 5c4e47d69d79..f418378abdff 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -41,7 +41,7 @@ struct spu_gang;
41 41
42/* ctx->sched_flags */ 42/* ctx->sched_flags */
43enum { 43enum {
44 SPU_SCHED_WAKE = 0, /* currently unused */ 44 SPU_SCHED_EXITING = 0,
45}; 45};
46 46
47struct spu_context { 47struct spu_context {