diff options
author | Masato Noguchi <Masato.Noguchi@jp.sony.com> | 2007-12-04 21:49:31 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-12-21 03:45:05 -0500 |
commit | c25620d7663fef41c373d42c4923c1d6b9847684 (patch) | |
tree | 4b734431a2f79ba9cdf033e9f2028f5fb5010af6 | |
parent | eda09fbdcd8c5afaa81c2f1d28e8b9725bad4d5a (diff) |
[POWERPC] cell: wrap master run control bit
Add platform specific SPU run control routines to the spufs. The current
spufs implementation uses the SPU master run control bit (MFC_SR1[S]) to
control SPE execution, but the PS3 hypervisor does not support the use of
this feature.
This change adds the run control wrapper routies spu_enable_spu() and
spu_disable_spu(). The bare metal routines use the master run control
bit, and the PS3 specific routines use the priv2 run control register.
An outstanding enhancement for the PS3 would be to add a guard to check
for incorrect access to the spu problem state when the spu context is
disabled. This check could be implemented with a flag added to the spu
context that would inhibit mapping problem state pages, and a routine
to unmap spu problem state pages. When the spu is enabled with
ps3_enable_spu() the flag would be set allowing pages to be mapped,
and when the spu is disabled with ps3_disable_spu() the flag would be
cleared and mapped problem state pages would be unmapped.
Signed-off-by: Masato Noguchi <Masato.Noguchi@jp.sony.com>
Signed-off-by: Geoff Levand <geoffrey.levand@am.sony.com>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/platforms/cell/spu_manage.c | 13 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/backing_ops.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/hw_ops.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/ps3/spu.c | 27 | ||||
-rw-r--r-- | include/asm-powerpc/spu_priv1.h | 15 |
7 files changed, 72 insertions, 4 deletions
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index 9979197ff409..d351bdebf5f1 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <asm/firmware.h> | 35 | #include <asm/firmware.h> |
36 | #include <asm/prom.h> | 36 | #include <asm/prom.h> |
37 | 37 | ||
38 | #include "spufs/spufs.h" | ||
38 | #include "interrupt.h" | 39 | #include "interrupt.h" |
39 | 40 | ||
40 | struct device_node *spu_devnode(struct spu *spu) | 41 | struct device_node *spu_devnode(struct spu *spu) |
@@ -369,6 +370,16 @@ static int of_destroy_spu(struct spu *spu) | |||
369 | return 0; | 370 | return 0; |
370 | } | 371 | } |
371 | 372 | ||
373 | static void enable_spu_by_master_run(struct spu_context *ctx) | ||
374 | { | ||
375 | ctx->ops->master_start(ctx); | ||
376 | } | ||
377 | |||
378 | static void disable_spu_by_master_run(struct spu_context *ctx) | ||
379 | { | ||
380 | ctx->ops->master_stop(ctx); | ||
381 | } | ||
382 | |||
372 | /* Hardcoded affinity idxs for qs20 */ | 383 | /* Hardcoded affinity idxs for qs20 */ |
373 | #define QS20_SPES_PER_BE 8 | 384 | #define QS20_SPES_PER_BE 8 |
374 | static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; | 385 | static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; |
@@ -540,5 +551,7 @@ const struct spu_management_ops spu_management_of_ops = { | |||
540 | .enumerate_spus = of_enumerate_spus, | 551 | .enumerate_spus = of_enumerate_spus, |
541 | .create_spu = of_create_spu, | 552 | .create_spu = of_create_spu, |
542 | .destroy_spu = of_destroy_spu, | 553 | .destroy_spu = of_destroy_spu, |
554 | .enable_spu = enable_spu_by_master_run, | ||
555 | .disable_spu = disable_spu_by_master_run, | ||
543 | .init_affinity = init_affinity, | 556 | .init_affinity = init_affinity, |
544 | }; | 557 | }; |
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c index ec01214e51ee..a64a0044df99 100644 --- a/arch/powerpc/platforms/cell/spufs/backing_ops.c +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c | |||
@@ -285,6 +285,11 @@ static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) | |||
285 | spin_unlock(&ctx->csa.register_lock); | 285 | spin_unlock(&ctx->csa.register_lock); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void spu_backing_runcntl_stop(struct spu_context *ctx) | ||
289 | { | ||
290 | spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); | ||
291 | } | ||
292 | |||
288 | static void spu_backing_master_start(struct spu_context *ctx) | 293 | static void spu_backing_master_start(struct spu_context *ctx) |
289 | { | 294 | { |
290 | struct spu_state *csa = &ctx->csa; | 295 | struct spu_state *csa = &ctx->csa; |
@@ -381,6 +386,7 @@ struct spu_context_ops spu_backing_ops = { | |||
381 | .get_ls = spu_backing_get_ls, | 386 | .get_ls = spu_backing_get_ls, |
382 | .runcntl_read = spu_backing_runcntl_read, | 387 | .runcntl_read = spu_backing_runcntl_read, |
383 | .runcntl_write = spu_backing_runcntl_write, | 388 | .runcntl_write = spu_backing_runcntl_write, |
389 | .runcntl_stop = spu_backing_runcntl_stop, | ||
384 | .master_start = spu_backing_master_start, | 390 | .master_start = spu_backing_master_start, |
385 | .master_stop = spu_backing_master_stop, | 391 | .master_stop = spu_backing_master_stop, |
386 | .set_mfc_query = spu_backing_set_mfc_query, | 392 | .set_mfc_query = spu_backing_set_mfc_query, |
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c index fc4ed1ffbd4f..e09616733c15 100644 --- a/arch/powerpc/platforms/cell/spufs/hw_ops.c +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c | |||
@@ -220,6 +220,15 @@ static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) | |||
220 | spin_unlock_irq(&ctx->spu->register_lock); | 220 | spin_unlock_irq(&ctx->spu->register_lock); |
221 | } | 221 | } |
222 | 222 | ||
223 | static void spu_hw_runcntl_stop(struct spu_context *ctx) | ||
224 | { | ||
225 | spin_lock_irq(&ctx->spu->register_lock); | ||
226 | out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
227 | while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) | ||
228 | cpu_relax(); | ||
229 | spin_unlock_irq(&ctx->spu->register_lock); | ||
230 | } | ||
231 | |||
223 | static void spu_hw_master_start(struct spu_context *ctx) | 232 | static void spu_hw_master_start(struct spu_context *ctx) |
224 | { | 233 | { |
225 | struct spu *spu = ctx->spu; | 234 | struct spu *spu = ctx->spu; |
@@ -321,6 +330,7 @@ struct spu_context_ops spu_hw_ops = { | |||
321 | .get_ls = spu_hw_get_ls, | 330 | .get_ls = spu_hw_get_ls, |
322 | .runcntl_read = spu_hw_runcntl_read, | 331 | .runcntl_read = spu_hw_runcntl_read, |
323 | .runcntl_write = spu_hw_runcntl_write, | 332 | .runcntl_write = spu_hw_runcntl_write, |
333 | .runcntl_stop = spu_hw_runcntl_stop, | ||
324 | .master_start = spu_hw_master_start, | 334 | .master_start = spu_hw_master_start, |
325 | .master_stop = spu_hw_master_stop, | 335 | .master_stop = spu_hw_master_stop, |
326 | .set_mfc_query = spu_hw_set_mfc_query, | 336 | .set_mfc_query = spu_hw_set_mfc_query, |
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 1ce5e22ea5f4..aad163ff46f4 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -302,7 +302,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | |||
302 | if (mutex_lock_interruptible(&ctx->run_mutex)) | 302 | if (mutex_lock_interruptible(&ctx->run_mutex)) |
303 | return -ERESTARTSYS; | 303 | return -ERESTARTSYS; |
304 | 304 | ||
305 | ctx->ops->master_start(ctx); | 305 | spu_enable_spu(ctx); |
306 | ctx->event_return = 0; | 306 | ctx->event_return = 0; |
307 | 307 | ||
308 | spu_acquire(ctx); | 308 | spu_acquire(ctx); |
@@ -376,7 +376,7 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) | |||
376 | ctx->stats.libassist++; | 376 | ctx->stats.libassist++; |
377 | 377 | ||
378 | 378 | ||
379 | ctx->ops->master_stop(ctx); | 379 | spu_disable_spu(ctx); |
380 | ret = spu_run_fini(ctx, npc, &status); | 380 | ret = spu_run_fini(ctx, npc, &status); |
381 | spu_yield(ctx); | 381 | spu_yield(ctx); |
382 | 382 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index ca47b991bda5..5e92ad32cc9c 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -170,6 +170,7 @@ struct spu_context_ops { | |||
170 | char*(*get_ls) (struct spu_context * ctx); | 170 | char*(*get_ls) (struct spu_context * ctx); |
171 | u32 (*runcntl_read) (struct spu_context * ctx); | 171 | u32 (*runcntl_read) (struct spu_context * ctx); |
172 | void (*runcntl_write) (struct spu_context * ctx, u32 data); | 172 | void (*runcntl_write) (struct spu_context * ctx, u32 data); |
173 | void (*runcntl_stop) (struct spu_context * ctx); | ||
173 | void (*master_start) (struct spu_context * ctx); | 174 | void (*master_start) (struct spu_context * ctx); |
174 | void (*master_stop) (struct spu_context * ctx); | 175 | void (*master_stop) (struct spu_context * ctx); |
175 | int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); | 176 | int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode); |
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index d1630a074acf..5ad41189b494 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/spu_priv1.h> | 28 | #include <asm/spu_priv1.h> |
29 | #include <asm/lv1call.h> | 29 | #include <asm/lv1call.h> |
30 | 30 | ||
31 | #include "../cell/spufs/spufs.h" | ||
31 | #include "platform.h" | 32 | #include "platform.h" |
32 | 33 | ||
33 | /* spu_management_ops */ | 34 | /* spu_management_ops */ |
@@ -419,10 +420,34 @@ static int ps3_init_affinity(void) | |||
419 | return 0; | 420 | return 0; |
420 | } | 421 | } |
421 | 422 | ||
423 | /** | ||
424 | * ps3_enable_spu - Enable SPU run control. | ||
425 | * | ||
426 | * An outstanding enhancement for the PS3 would be to add a guard to check | ||
427 | * for incorrect access to the spu problem state when the spu context is | ||
428 | * disabled. This check could be implemented with a flag added to the spu | ||
429 | * context that would inhibit mapping problem state pages, and a routine | ||
430 | * to unmap spu problem state pages. When the spu is enabled with | ||
431 | * ps3_enable_spu() the flag would be set allowing pages to be mapped, | ||
432 | * and when the spu is disabled with ps3_disable_spu() the flag would be | ||
433 | * cleared and the mapped problem state pages would be unmapped. | ||
434 | */ | ||
435 | |||
436 | static void ps3_enable_spu(struct spu_context *ctx) | ||
437 | { | ||
438 | } | ||
439 | |||
440 | static void ps3_disable_spu(struct spu_context *ctx) | ||
441 | { | ||
442 | ctx->ops->runcntl_stop(ctx); | ||
443 | } | ||
444 | |||
422 | const struct spu_management_ops spu_management_ps3_ops = { | 445 | const struct spu_management_ops spu_management_ps3_ops = { |
423 | .enumerate_spus = ps3_enumerate_spus, | 446 | .enumerate_spus = ps3_enumerate_spus, |
424 | .create_spu = ps3_create_spu, | 447 | .create_spu = ps3_create_spu, |
425 | .destroy_spu = ps3_destroy_spu, | 448 | .destroy_spu = ps3_destroy_spu, |
449 | .enable_spu = ps3_enable_spu, | ||
450 | .disable_spu = ps3_disable_spu, | ||
426 | .init_affinity = ps3_init_affinity, | 451 | .init_affinity = ps3_init_affinity, |
427 | }; | 452 | }; |
428 | 453 | ||
@@ -505,8 +530,6 @@ static void mfc_sr1_set(struct spu *spu, u64 sr1) | |||
505 | static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | 530 | static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
506 | | MFC_STATE1_PROBLEM_STATE_MASK); | 531 | | MFC_STATE1_PROBLEM_STATE_MASK); |
507 | 532 | ||
508 | sr1 |= MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
509 | |||
510 | BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); | 533 | BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); |
511 | 534 | ||
512 | spu_pdata(spu)->cache.sr1 = sr1; | 535 | spu_pdata(spu)->cache.sr1 = sr1; |
diff --git a/include/asm-powerpc/spu_priv1.h b/include/asm-powerpc/spu_priv1.h index 0f37c7c90820..25020a34ce7f 100644 --- a/include/asm-powerpc/spu_priv1.h +++ b/include/asm-powerpc/spu_priv1.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | 25 | ||
26 | struct spu; | 26 | struct spu; |
27 | struct spu_context; | ||
27 | 28 | ||
28 | /* access to priv1 registers */ | 29 | /* access to priv1 registers */ |
29 | 30 | ||
@@ -178,6 +179,8 @@ struct spu_management_ops { | |||
178 | int (*enumerate_spus)(int (*fn)(void *data)); | 179 | int (*enumerate_spus)(int (*fn)(void *data)); |
179 | int (*create_spu)(struct spu *spu, void *data); | 180 | int (*create_spu)(struct spu *spu, void *data); |
180 | int (*destroy_spu)(struct spu *spu); | 181 | int (*destroy_spu)(struct spu *spu); |
182 | void (*enable_spu)(struct spu_context *ctx); | ||
183 | void (*disable_spu)(struct spu_context *ctx); | ||
181 | int (*init_affinity)(void); | 184 | int (*init_affinity)(void); |
182 | }; | 185 | }; |
183 | 186 | ||
@@ -207,6 +210,18 @@ spu_init_affinity (void) | |||
207 | return spu_management_ops->init_affinity(); | 210 | return spu_management_ops->init_affinity(); |
208 | } | 211 | } |
209 | 212 | ||
213 | static inline void | ||
214 | spu_enable_spu (struct spu_context *ctx) | ||
215 | { | ||
216 | spu_management_ops->enable_spu(ctx); | ||
217 | } | ||
218 | |||
219 | static inline void | ||
220 | spu_disable_spu (struct spu_context *ctx) | ||
221 | { | ||
222 | spu_management_ops->disable_spu(ctx); | ||
223 | } | ||
224 | |||
210 | /* | 225 | /* |
211 | * The declarations folowing are put here for convenience | 226 | * The declarations folowing are put here for convenience |
212 | * and only intended to be used by the platform setup code. | 227 | * and only intended to be used by the platform setup code. |