aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell/spufs/run.c
diff options
context:
space:
mode:
authorLuke Browning <lukebr@linux.vnet.ibm.com>2007-12-20 02:39:59 -0500
committerPaul Mackerras <paulus@samba.org>2007-12-21 03:46:21 -0500
commite65c2f6fcebb9af0c3f53c796aff730dd657f5e7 (patch)
tree98b39e5efb858fc46022a5621aee07e57dad3919 /arch/powerpc/platforms/cell/spufs/run.c
parent9476141c185aa131fa8b4b6ccc5c0ccf92300225 (diff)
[POWERPC] spufs: decouple spu scheduler from spufs_spu_run (asynchronous scheduling)
Change spufs_spu_run so that the context is queued directly to the scheduler and the controlling thread advances directly to spufs_wait() for spe errors and exceptions. nosched contexts are treated the same as before. Fixes from Christoph Hellwig <hch@lst.de> Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms/cell/spufs/run.c')
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c90
1 files changed, 36 insertions, 54 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 652ae1366dc8..b380050cdbc7 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -41,21 +41,29 @@ void spufs_stop_callback(struct spu *spu)
41 spu->dar = 0; 41 spu->dar = 0;
42} 42}
43 43
44static inline int spu_stopped(struct spu_context *ctx, u32 *stat) 44int spu_stopped(struct spu_context *ctx, u32 *stat)
45{ 45{
46 struct spu *spu; 46 u64 dsisr;
47 u64 pte_fault; 47 u32 stopped;
48 48
49 *stat = ctx->ops->status_read(ctx); 49 *stat = ctx->ops->status_read(ctx);
50 50
51 spu = ctx->spu; 51 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
52 if (ctx->state != SPU_STATE_RUNNABLE || 52 return 1;
53 test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) 53
54 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
55 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
56 if (*stat & stopped)
57 return 1;
58
59 dsisr = ctx->csa.dsisr;
60 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
54 return 1; 61 return 1;
55 pte_fault = ctx->csa.dsisr & 62
56 (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); 63 if (ctx->csa.class_0_pending)
57 return (!(*stat & SPU_STATUS_RUNNING) || pte_fault || ctx->csa.class_0_pending) ? 64 return 1;
58 1 : 0; 65
66 return 0;
59} 67}
60 68
61static int spu_setup_isolated(struct spu_context *ctx) 69static int spu_setup_isolated(struct spu_context *ctx)
@@ -151,24 +159,27 @@ out:
151 159
152static int spu_run_init(struct spu_context *ctx, u32 *npc) 160static int spu_run_init(struct spu_context *ctx, u32 *npc)
153{ 161{
154 unsigned long runcntl; 162 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
155 int ret; 163 int ret;
156 164
157 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 165 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
158 166
159 if (ctx->flags & SPU_CREATE_ISOLATE) { 167 /*
160 /* 168 * NOSCHED is synchronous scheduling with respect to the caller.
161 * Force activation of spu. Isolated state assumes that 169 * The caller waits for the context to be loaded.
162 * special loader context is loaded and running on spu. 170 */
163 */ 171 if (ctx->flags & SPU_CREATE_NOSCHED) {
164 if (ctx->state == SPU_STATE_SAVED) { 172 if (ctx->state == SPU_STATE_SAVED) {
165 spu_set_timeslice(ctx);
166
167 ret = spu_activate(ctx, 0); 173 ret = spu_activate(ctx, 0);
168 if (ret) 174 if (ret)
169 return ret; 175 return ret;
170 } 176 }
177 }
171 178
179 /*
180 * Apply special setup as required.
181 */
182 if (ctx->flags & SPU_CREATE_ISOLATE) {
172 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { 183 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
173 ret = spu_setup_isolated(ctx); 184 ret = spu_setup_isolated(ctx);
174 if (ret) 185 if (ret)
@@ -183,10 +194,11 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
183 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); 194 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
184 if (runcntl == 0) 195 if (runcntl == 0)
185 runcntl = SPU_RUNCNTL_RUNNABLE; 196 runcntl = SPU_RUNCNTL_RUNNABLE;
197 }
186 198
199 if (ctx->flags & SPU_CREATE_NOSCHED) {
187 spuctx_switch_state(ctx, SPU_UTIL_USER); 200 spuctx_switch_state(ctx, SPU_UTIL_USER);
188 ctx->ops->runcntl_write(ctx, runcntl); 201 ctx->ops->runcntl_write(ctx, runcntl);
189
190 } else { 202 } else {
191 unsigned long privcntl; 203 unsigned long privcntl;
192 204
@@ -194,20 +206,18 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc)
194 privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP; 206 privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
195 else 207 else
196 privcntl = SPU_PRIVCNTL_MODE_NORMAL; 208 privcntl = SPU_PRIVCNTL_MODE_NORMAL;
197 runcntl = SPU_RUNCNTL_RUNNABLE;
198 209
199 ctx->ops->npc_write(ctx, *npc); 210 ctx->ops->npc_write(ctx, *npc);
200 ctx->ops->privcntl_write(ctx, privcntl); 211 ctx->ops->privcntl_write(ctx, privcntl);
212 ctx->ops->runcntl_write(ctx, runcntl);
201 213
202 if (ctx->state == SPU_STATE_SAVED) { 214 if (ctx->state == SPU_STATE_SAVED) {
203 spu_set_timeslice(ctx);
204 ret = spu_activate(ctx, 0); 215 ret = spu_activate(ctx, 0);
205 if (ret) 216 if (ret)
206 return ret; 217 return ret;
218 } else {
219 spuctx_switch_state(ctx, SPU_UTIL_USER);
207 } 220 }
208
209 spuctx_switch_state(ctx, SPU_UTIL_USER);
210 ctx->ops->runcntl_write(ctx, runcntl);
211 } 221 }
212 222
213 return 0; 223 return 0;
@@ -218,6 +228,8 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
218{ 228{
219 int ret = 0; 229 int ret = 0;
220 230
231 spu_del_from_rq(ctx);
232
221 *status = ctx->ops->status_read(ctx); 233 *status = ctx->ops->status_read(ctx);
222 *npc = ctx->ops->npc_read(ctx); 234 *npc = ctx->ops->npc_read(ctx);
223 235
@@ -230,26 +242,6 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
230 return ret; 242 return ret;
231} 243}
232 244
233static int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
234 u32 *status)
235{
236 int ret;
237
238 ret = spu_run_fini(ctx, npc, status);
239 if (ret)
240 return ret;
241
242 if (*status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT))
243 return *status;
244
245 ret = spu_acquire_runnable(ctx, 0);
246 if (ret)
247 return ret;
248
249 spuctx_switch_state(ctx, SPU_UTIL_USER);
250 return 0;
251}
252
253/* 245/*
254 * SPU syscall restarting is tricky because we violate the basic 246 * SPU syscall restarting is tricky because we violate the basic
255 * assumption that the signal handler is running on the interrupted 247 * assumption that the signal handler is running on the interrupted
@@ -386,17 +378,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
386 if (ret) 378 if (ret)
387 break; 379 break;
388 380
389 if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
390 ret = spu_reacquire_runnable(ctx, npc, &status);
391 if (ret)
392 goto out2;
393 continue;
394 }
395
396 if (signal_pending(current)) 381 if (signal_pending(current))
397 ret = -ERESTARTSYS; 382 ret = -ERESTARTSYS;
398
399
400 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | 383 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
401 SPU_STATUS_STOPPED_BY_HALT | 384 SPU_STATUS_STOPPED_BY_HALT |
402 SPU_STATUS_SINGLE_STEP))); 385 SPU_STATUS_SINGLE_STEP)));
@@ -411,7 +394,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
411 ret = spu_run_fini(ctx, npc, &status); 394 ret = spu_run_fini(ctx, npc, &status);
412 spu_yield(ctx); 395 spu_yield(ctx);
413 396
414out2:
415 if ((ret == 0) || 397 if ((ret == 0) ||
416 ((ret == -ERESTARTSYS) && 398 ((ret == -ERESTARTSYS) &&
417 ((status & SPU_STATUS_STOPPED_BY_HALT) || 399 ((status & SPU_STATUS_STOPPED_BY_HALT) ||