aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-03-09 18:05:36 -0500
committerArnd Bergmann <arnd@klappe.arndb.de>2007-03-09 18:07:49 -0500
commit50b520d4efbce45281f58112789470ec7965fd33 (patch)
tree4d161a9dbf0b73f13b448591183cc56d6bdb545f /arch
parentaa0ed2bdb663608d5e409faecff3e1e81a3d413a (diff)
[POWERPC] avoid SPU_ACTIVATE_NOWAKE optimization
This optimization was added recently but is still buggy, so back it out for now. Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h6
3 files changed, 6 insertions, 11 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 353a8fa07ab8..f95a611ca362 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
143 int ret; 143 int ret;
144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; 144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
145 145
146 ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); 146 ret = spu_acquire_runnable(ctx, 0);
147 if (ret) 147 if (ret)
148 return ret; 148 return ret;
149 149
@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
155 spu_release(ctx); 155 spu_release(ctx);
156 ret = spu_setup_isolated(ctx); 156 ret = spu_setup_isolated(ctx);
157 if (!ret) 157 if (!ret)
158 ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE); 158 ret = spu_acquire_runnable(ctx, 0);
159 } 159 }
160 160
161 /* if userspace has set the runcntrl register (eg, to issue an 161 /* if userspace has set the runcntrl register (eg, to issue an
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 2f25e68b4bac..7dbf57c30282 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -263,7 +263,6 @@ static void spu_prio_wait(struct spu_context *ctx)
263{ 263{
264 DEFINE_WAIT(wait); 264 DEFINE_WAIT(wait);
265 265
266 set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
267 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 266 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
268 if (!signal_pending(current)) { 267 if (!signal_pending(current)) {
269 mutex_unlock(&ctx->state_mutex); 268 mutex_unlock(&ctx->state_mutex);
@@ -272,7 +271,6 @@ static void spu_prio_wait(struct spu_context *ctx)
272 } 271 }
273 __set_current_state(TASK_RUNNING); 272 __set_current_state(TASK_RUNNING);
274 remove_wait_queue(&ctx->stop_wq, &wait); 273 remove_wait_queue(&ctx->stop_wq, &wait);
275 clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
276} 274}
277 275
278/** 276/**
@@ -292,7 +290,7 @@ static void spu_reschedule(struct spu *spu)
292 best = sched_find_first_bit(spu_prio->bitmap); 290 best = sched_find_first_bit(spu_prio->bitmap);
293 if (best < MAX_PRIO) { 291 if (best < MAX_PRIO) {
294 struct spu_context *ctx = spu_grab_context(best); 292 struct spu_context *ctx = spu_grab_context(best);
295 if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags)) 293 if (ctx)
296 wake_up(&ctx->stop_wq); 294 wake_up(&ctx->stop_wq);
297 } 295 }
298 spin_unlock(&spu_prio->runq_lock); 296 spin_unlock(&spu_prio->runq_lock);
@@ -414,8 +412,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags)
414 } 412 }
415 413
416 spu_add_to_rq(ctx); 414 spu_add_to_rq(ctx);
417 if (!(flags & SPU_ACTIVATE_NOWAKE)) 415 spu_prio_wait(ctx);
418 spu_prio_wait(ctx);
419 spu_del_from_rq(ctx); 416 spu_del_from_rq(ctx);
420 } while (!signal_pending(current)); 417 } while (!signal_pending(current));
421 418
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0c437891dfd5..5c4e47d69d79 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -41,7 +41,7 @@ struct spu_gang;
41 41
42/* ctx->sched_flags */ 42/* ctx->sched_flags */
43enum { 43enum {
44 SPU_SCHED_WAKE = 0, 44 SPU_SCHED_WAKE = 0, /* currently unused */
45}; 45};
46 46
47struct spu_context { 47struct spu_context {
@@ -191,9 +191,7 @@ void spu_forget(struct spu_context *ctx);
191int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags); 191int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
192void spu_acquire_saved(struct spu_context *ctx); 192void spu_acquire_saved(struct spu_context *ctx);
193int spu_acquire_exclusive(struct spu_context *ctx); 193int spu_acquire_exclusive(struct spu_context *ctx);
194enum { 194
195 SPU_ACTIVATE_NOWAKE = 1,
196};
197int spu_activate(struct spu_context *ctx, unsigned long flags); 195int spu_activate(struct spu_context *ctx, unsigned long flags);
198void spu_deactivate(struct spu_context *ctx); 196void spu_deactivate(struct spu_context *ctx);
199void spu_yield(struct spu_context *ctx); 197void spu_yield(struct spu_context *ctx);