aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2007-02-13 15:54:24 -0500
committerArnd Bergmann <arnd@klappe.arndb.de>2007-02-13 15:55:41 -0500
commit26bec67386dbf6ef887254e815398842e182cdcd (patch)
treecde0851af46df1b376a7af47e7c59362506cecc5 /arch
parent079cdb61614c466c939ebf74c7ef6745667bc61e (diff)
[POWERPC] spufs: optimize spu_run
There is no need to directly wake up contexts in spu_activate when called from spu_run, so add a flag to surpress this wakeup. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h13
5 files changed, 23 insertions, 12 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 88a887186303..056a8ad02385 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -163,7 +163,7 @@ int spu_acquire_exclusive(struct spu_context *ctx)
163 * Returns 0 and with the context locked on success 163 * Returns 0 and with the context locked on success
164 * Returns negative error and with the context _unlocked_ on failure. 164 * Returns negative error and with the context _unlocked_ on failure.
165 */ 165 */
166int spu_acquire_runnable(struct spu_context *ctx) 166int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
167{ 167{
168 int ret = -EINVAL; 168 int ret = -EINVAL;
169 169
@@ -174,7 +174,7 @@ int spu_acquire_runnable(struct spu_context *ctx)
174 */ 174 */
175 if (!ctx->owner) 175 if (!ctx->owner)
176 goto out_unlock; 176 goto out_unlock;
177 ret = spu_activate(ctx, 0); 177 ret = spu_activate(ctx, flags);
178 if (ret) 178 if (ret)
179 goto out_unlock; 179 goto out_unlock;
180 } 180 }
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index a528020baa18..c729813043a6 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -164,7 +164,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
164 /* error here usually means a signal.. we might want to test 164 /* error here usually means a signal.. we might want to test
165 * the error code more precisely though 165 * the error code more precisely though
166 */ 166 */
167 ret = spu_acquire_runnable(ctx); 167 ret = spu_acquire_runnable(ctx, 0);
168 if (ret) 168 if (ret)
169 return NOPFN_REFAULT; 169 return NOPFN_REFAULT;
170 170
@@ -1306,7 +1306,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
1306 if (ret) 1306 if (ret)
1307 goto out; 1307 goto out;
1308 1308
1309 spu_acquire_runnable(ctx); 1309 spu_acquire_runnable(ctx, 0);
1310 if (file->f_flags & O_NONBLOCK) { 1310 if (file->f_flags & O_NONBLOCK) {
1311 ret = ctx->ops->send_mfc_command(ctx, &cmd); 1311 ret = ctx->ops->send_mfc_command(ctx, &cmd);
1312 } else { 1312 } else {
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index e1647311044b..a973e79e9fdc 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
143 int ret; 143 int ret;
144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; 144 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
145 145
146 ret = spu_acquire_runnable(ctx); 146 ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
147 if (ret) 147 if (ret)
148 return ret; 148 return ret;
149 149
@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
155 spu_release(ctx); 155 spu_release(ctx);
156 ret = spu_setup_isolated(ctx); 156 ret = spu_setup_isolated(ctx);
157 if (!ret) 157 if (!ret)
158 ret = spu_acquire_runnable(ctx); 158 ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
159 } 159 }
160 160
161 /* if userspace has set the runcntrl register (eg, to issue an 161 /* if userspace has set the runcntrl register (eg, to issue an
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 6f8e2257c5a6..07d0d095c62a 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -247,8 +247,8 @@ static void spu_prio_wait(struct spu_context *ctx)
247{ 247{
248 DEFINE_WAIT(wait); 248 DEFINE_WAIT(wait);
249 249
250 set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
250 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); 251 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
251
252 if (!signal_pending(current)) { 252 if (!signal_pending(current)) {
253 mutex_unlock(&ctx->state_mutex); 253 mutex_unlock(&ctx->state_mutex);
254 schedule(); 254 schedule();
@@ -256,6 +256,7 @@ static void spu_prio_wait(struct spu_context *ctx)
256 } 256 }
257 __set_current_state(TASK_RUNNING); 257 __set_current_state(TASK_RUNNING);
258 remove_wait_queue(&ctx->stop_wq, &wait); 258 remove_wait_queue(&ctx->stop_wq, &wait);
259 clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
259} 260}
260 261
261/** 262/**
@@ -275,7 +276,7 @@ static void spu_reschedule(struct spu *spu)
275 best = sched_find_first_bit(spu_prio->bitmap); 276 best = sched_find_first_bit(spu_prio->bitmap);
276 if (best < MAX_PRIO) { 277 if (best < MAX_PRIO) {
277 struct spu_context *ctx = spu_grab_context(best); 278 struct spu_context *ctx = spu_grab_context(best);
278 if (ctx) 279 if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
279 wake_up(&ctx->stop_wq); 280 wake_up(&ctx->stop_wq);
280 } 281 }
281 spin_unlock(&spu_prio->runq_lock); 282 spin_unlock(&spu_prio->runq_lock);
@@ -315,7 +316,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
315 * add the context to the runqueue so it gets woken up once an spu 316 * add the context to the runqueue so it gets woken up once an spu
316 * is available. 317 * is available.
317 */ 318 */
318int spu_activate(struct spu_context *ctx, u64 flags) 319int spu_activate(struct spu_context *ctx, unsigned long flags)
319{ 320{
320 321
321 if (ctx->spu) 322 if (ctx->spu)
@@ -331,7 +332,8 @@ int spu_activate(struct spu_context *ctx, u64 flags)
331 } 332 }
332 333
333 spu_add_to_rq(ctx); 334 spu_add_to_rq(ctx);
334 spu_prio_wait(ctx); 335 if (!(flags & SPU_ACTIVATE_NOWAKE))
336 spu_prio_wait(ctx);
335 spu_del_from_rq(ctx); 337 spu_del_from_rq(ctx);
336 } while (!signal_pending(current)); 338 } while (!signal_pending(current));
337 339
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 7f5a4fc03c00..421f59167c55 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -39,6 +39,11 @@ enum {
39struct spu_context_ops; 39struct spu_context_ops;
40struct spu_gang; 40struct spu_gang;
41 41
42/* ctx->sched_flags */
43enum {
44 SPU_SCHED_WAKE = 0,
45};
46
42struct spu_context { 47struct spu_context {
43 struct spu *spu; /* pointer to a physical SPU */ 48 struct spu *spu; /* pointer to a physical SPU */
44 struct spu_state csa; /* SPU context save area. */ 49 struct spu_state csa; /* SPU context save area. */
@@ -77,6 +82,7 @@ struct spu_context {
77 82
78 /* scheduler fields */ 83 /* scheduler fields */
79 struct list_head rq; 84 struct list_head rq;
85 unsigned long sched_flags;
80 int prio; 86 int prio;
81}; 87};
82 88
@@ -179,10 +185,13 @@ int put_spu_context(struct spu_context *ctx);
179void spu_unmap_mappings(struct spu_context *ctx); 185void spu_unmap_mappings(struct spu_context *ctx);
180 186
181void spu_forget(struct spu_context *ctx); 187void spu_forget(struct spu_context *ctx);
182int spu_acquire_runnable(struct spu_context *ctx); 188int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
183void spu_acquire_saved(struct spu_context *ctx); 189void spu_acquire_saved(struct spu_context *ctx);
184int spu_acquire_exclusive(struct spu_context *ctx); 190int spu_acquire_exclusive(struct spu_context *ctx);
185int spu_activate(struct spu_context *ctx, u64 flags); 191enum {
192 SPU_ACTIVATE_NOWAKE = 1,
193};
194int spu_activate(struct spu_context *ctx, unsigned long flags);
186void spu_deactivate(struct spu_context *ctx); 195void spu_deactivate(struct spu_context *ctx);
187void spu_yield(struct spu_context *ctx); 196void spu_yield(struct spu_context *ctx);
188int __init spu_sched_init(void); 197int __init spu_sched_init(void);