aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c53
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c31
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c17
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c38
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h3
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c71
-rw-r--r--arch/powerpc/xmon/xmon.c6
10 files changed, 188 insertions, 64 deletions
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 04f74f9f9ab6..5bf7df146022 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -35,6 +35,7 @@
35#include <linux/percpu.h> 35#include <linux/percpu.h>
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/ioport.h> 37#include <linux/ioport.h>
38#include <linux/kernel_stat.h>
38 39
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/pgtable.h> 41#include <asm/pgtable.h>
@@ -231,6 +232,54 @@ static int iic_host_match(struct irq_host *h, struct device_node *node)
231 "IBM,CBEA-Internal-Interrupt-Controller"); 232 "IBM,CBEA-Internal-Interrupt-Controller");
232} 233}
233 234
235extern int noirqdebug;
236
237static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
238{
239 const unsigned int cpu = smp_processor_id();
240
241 spin_lock(&desc->lock);
242
243 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
244
245 /*
246 * If we're currently running this IRQ, or its disabled,
247 * we shouldn't process the IRQ. Mark it pending, handle
248 * the necessary masking and go out
249 */
250 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
251 !desc->action)) {
252 desc->status |= IRQ_PENDING;
253 goto out_eoi;
254 }
255
256 kstat_cpu(cpu).irqs[irq]++;
257
258 /* Mark the IRQ currently in progress.*/
259 desc->status |= IRQ_INPROGRESS;
260
261 do {
262 struct irqaction *action = desc->action;
263 irqreturn_t action_ret;
264
265 if (unlikely(!action))
266 goto out_eoi;
267
268 desc->status &= ~IRQ_PENDING;
269 spin_unlock(&desc->lock);
270 action_ret = handle_IRQ_event(irq, action);
271 if (!noirqdebug)
272 note_interrupt(irq, desc, action_ret);
273 spin_lock(&desc->lock);
274
275 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
276
277 desc->status &= ~IRQ_INPROGRESS;
278out_eoi:
279 desc->chip->eoi(irq);
280 spin_unlock(&desc->lock);
281}
282
234static int iic_host_map(struct irq_host *h, unsigned int virq, 283static int iic_host_map(struct irq_host *h, unsigned int virq,
235 irq_hw_number_t hw) 284 irq_hw_number_t hw)
236{ 285{
@@ -240,10 +289,10 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
240 break; 289 break;
241 case IIC_IRQ_TYPE_IOEXC: 290 case IIC_IRQ_TYPE_IOEXC:
242 set_irq_chip_and_handler(virq, &iic_ioexc_chip, 291 set_irq_chip_and_handler(virq, &iic_ioexc_chip,
243 handle_fasteoi_irq); 292 handle_iic_irq);
244 break; 293 break;
245 default: 294 default:
246 set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); 295 set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq);
247 } 296 }
248 return 0; 297 return 0;
249} 298}
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 6bab44b7716b..70c660121ec4 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -141,6 +141,10 @@ static void spu_restart_dma(struct spu *spu)
141 141
142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) 142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); 143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
144 else {
145 set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
146 mb();
147 }
144} 148}
145 149
146static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) 150static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
@@ -226,11 +230,13 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
226 return 0; 230 return 0;
227 } 231 }
228 232
229 spu->class_0_pending = 0; 233 spu->class_1_dar = ea;
230 spu->dar = ea; 234 spu->class_1_dsisr = dsisr;
231 spu->dsisr = dsisr; 235
236 spu->stop_callback(spu, 1);
232 237
233 spu->stop_callback(spu); 238 spu->class_1_dar = 0;
239 spu->class_1_dsisr = 0;
234 240
235 return 0; 241 return 0;
236} 242}
@@ -318,11 +324,15 @@ spu_irq_class_0(int irq, void *data)
318 stat = spu_int_stat_get(spu, 0) & mask; 324 stat = spu_int_stat_get(spu, 0) & mask;
319 325
320 spu->class_0_pending |= stat; 326 spu->class_0_pending |= stat;
321 spu->dsisr = spu_mfc_dsisr_get(spu); 327 spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
322 spu->dar = spu_mfc_dar_get(spu); 328 spu->class_0_dar = spu_mfc_dar_get(spu);
323 spin_unlock(&spu->register_lock); 329 spin_unlock(&spu->register_lock);
324 330
325 spu->stop_callback(spu); 331 spu->stop_callback(spu, 0);
332
333 spu->class_0_pending = 0;
334 spu->class_0_dsisr = 0;
335 spu->class_0_dar = 0;
326 336
327 spu_int_stat_clear(spu, 0, stat); 337 spu_int_stat_clear(spu, 0, stat);
328 338
@@ -363,6 +373,9 @@ spu_irq_class_1(int irq, void *data)
363 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) 373 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
364 ; 374 ;
365 375
376 spu->class_1_dsisr = 0;
377 spu->class_1_dar = 0;
378
366 return stat ? IRQ_HANDLED : IRQ_NONE; 379 return stat ? IRQ_HANDLED : IRQ_NONE;
367} 380}
368 381
@@ -396,10 +409,10 @@ spu_irq_class_2(int irq, void *data)
396 spu->ibox_callback(spu); 409 spu->ibox_callback(spu);
397 410
398 if (stat & CLASS2_SPU_STOP_INTR) 411 if (stat & CLASS2_SPU_STOP_INTR)
399 spu->stop_callback(spu); 412 spu->stop_callback(spu, 2);
400 413
401 if (stat & CLASS2_SPU_HALT_INTR) 414 if (stat & CLASS2_SPU_HALT_INTR)
402 spu->stop_callback(spu); 415 spu->stop_callback(spu, 2);
403 416
404 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) 417 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
405 spu->mfc_callback(spu); 418 spu->mfc_callback(spu);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 67fa7247b80a..906a0a2a9fe1 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -28,6 +28,7 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/sched.h>
31 32
32#include <asm/spu.h> 33#include <asm/spu.h>
33#include <asm/spu_priv1.h> 34#include <asm/spu_priv1.h>
@@ -75,8 +76,19 @@ static u64 int_stat_get(struct spu *spu, int class)
75 76
76static void cpu_affinity_set(struct spu *spu, int cpu) 77static void cpu_affinity_set(struct spu *spu, int cpu)
77{ 78{
78 u64 target = iic_get_target_id(cpu); 79 u64 target;
79 u64 route = target << 48 | target << 32 | target << 16; 80 u64 route;
81
82 if (nr_cpus_node(spu->node)) {
83 cpumask_t spumask = node_to_cpumask(spu->node);
84 cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
85
86 if (!cpus_intersects(spumask, cpumask))
87 return;
88 }
89
90 target = iic_get_target_id(cpu);
91 route = target << 48 | target << 32 | target << 16;
80 out_be64(&spu->priv1->int_route_RW, route); 92 out_be64(&spu->priv1->int_route_RW, route);
81} 93}
82 94
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index e46d300e21a5..f093a581ac74 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -83,13 +83,18 @@ int spufs_handle_class0(struct spu_context *ctx)
83 return 0; 83 return 0;
84 84
85 if (stat & CLASS0_DMA_ALIGNMENT_INTR) 85 if (stat & CLASS0_DMA_ALIGNMENT_INTR)
86 spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT); 86 spufs_handle_event(ctx, ctx->csa.class_0_dar,
87 SPE_EVENT_DMA_ALIGNMENT);
87 88
88 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) 89 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
89 spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA); 90 spufs_handle_event(ctx, ctx->csa.class_0_dar,
91 SPE_EVENT_INVALID_DMA);
90 92
91 if (stat & CLASS0_SPU_ERROR_INTR) 93 if (stat & CLASS0_SPU_ERROR_INTR)
92 spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR); 94 spufs_handle_event(ctx, ctx->csa.class_0_dar,
95 SPE_EVENT_SPE_ERROR);
96
97 ctx->csa.class_0_pending = 0;
93 98
94 return -EIO; 99 return -EIO;
95} 100}
@@ -119,8 +124,8 @@ int spufs_handle_class1(struct spu_context *ctx)
119 * in time, we can still expect to get the same fault 124 * in time, we can still expect to get the same fault
120 * the immediately after the context restore. 125 * the immediately after the context restore.
121 */ 126 */
122 ea = ctx->csa.dar; 127 ea = ctx->csa.class_1_dar;
123 dsisr = ctx->csa.dsisr; 128 dsisr = ctx->csa.class_1_dsisr;
124 129
125 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 130 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
126 return 0; 131 return 0;
@@ -158,7 +163,7 @@ int spufs_handle_class1(struct spu_context *ctx)
158 * time slicing will not preempt the context while the page fault 163 * time slicing will not preempt the context while the page fault
159 * handler is running. Context switch code removes mappings. 164 * handler is running. Context switch code removes mappings.
160 */ 165 */
161 ctx->csa.dar = ctx->csa.dsisr = 0; 166 ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
162 167
163 /* 168 /*
164 * If we handled the fault successfully and are in runnable 169 * If we handled the fault successfully and are in runnable
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 0c32a05ab068..f407b2471855 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/file.h> 24#include <linux/file.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/fsnotify.h>
26#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
27#include <linux/init.h> 28#include <linux/init.h>
28#include <linux/ioctl.h> 29#include <linux/ioctl.h>
@@ -223,7 +224,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
223 parent = dir->d_parent->d_inode; 224 parent = dir->d_parent->d_inode;
224 ctx = SPUFS_I(dir->d_inode)->i_ctx; 225 ctx = SPUFS_I(dir->d_inode)->i_ctx;
225 226
226 mutex_lock(&parent->i_mutex); 227 mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
227 ret = spufs_rmdir(parent, dir); 228 ret = spufs_rmdir(parent, dir);
228 mutex_unlock(&parent->i_mutex); 229 mutex_unlock(&parent->i_mutex);
229 WARN_ON(ret); 230 WARN_ON(ret);
@@ -618,12 +619,15 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
618 mode &= ~current->fs->umask; 619 mode &= ~current->fs->umask;
619 620
620 if (flags & SPU_CREATE_GANG) 621 if (flags & SPU_CREATE_GANG)
621 return spufs_create_gang(nd->path.dentry->d_inode, 622 ret = spufs_create_gang(nd->path.dentry->d_inode,
622 dentry, nd->path.mnt, mode); 623 dentry, nd->path.mnt, mode);
623 else 624 else
624 return spufs_create_context(nd->path.dentry->d_inode, 625 ret = spufs_create_context(nd->path.dentry->d_inode,
625 dentry, nd->path.mnt, flags, mode, 626 dentry, nd->path.mnt, flags, mode,
626 filp); 627 filp);
628 if (ret >= 0)
629 fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
630 return ret;
627 631
628out_dput: 632out_dput:
629 dput(dentry); 633 dput(dentry);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index a9c35b7b719f..b7493b865812 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -11,7 +11,7 @@
11#include "spufs.h" 11#include "spufs.h"
12 12
13/* interrupt-level stop callback function. */ 13/* interrupt-level stop callback function. */
14void spufs_stop_callback(struct spu *spu) 14void spufs_stop_callback(struct spu *spu, int irq)
15{ 15{
16 struct spu_context *ctx = spu->ctx; 16 struct spu_context *ctx = spu->ctx;
17 17
@@ -24,9 +24,19 @@ void spufs_stop_callback(struct spu *spu)
24 */ 24 */
25 if (ctx) { 25 if (ctx) {
26 /* Copy exception arguments into module specific structure */ 26 /* Copy exception arguments into module specific structure */
27 ctx->csa.class_0_pending = spu->class_0_pending; 27 switch(irq) {
28 ctx->csa.dsisr = spu->dsisr; 28 case 0 :
29 ctx->csa.dar = spu->dar; 29 ctx->csa.class_0_pending = spu->class_0_pending;
30 ctx->csa.class_0_dsisr = spu->class_0_dsisr;
31 ctx->csa.class_0_dar = spu->class_0_dar;
32 break;
33 case 1 :
34 ctx->csa.class_1_dsisr = spu->class_1_dsisr;
35 ctx->csa.class_1_dar = spu->class_1_dar;
36 break;
37 case 2 :
38 break;
39 }
30 40
31 /* ensure that the exception status has hit memory before a 41 /* ensure that the exception status has hit memory before a
32 * thread waiting on the context's stop queue is woken */ 42 * thread waiting on the context's stop queue is woken */
@@ -34,11 +44,6 @@ void spufs_stop_callback(struct spu *spu)
34 44
35 wake_up_all(&ctx->stop_wq); 45 wake_up_all(&ctx->stop_wq);
36 } 46 }
37
38 /* Clear callback arguments from spu structure */
39 spu->class_0_pending = 0;
40 spu->dsisr = 0;
41 spu->dar = 0;
42} 47}
43 48
44int spu_stopped(struct spu_context *ctx, u32 *stat) 49int spu_stopped(struct spu_context *ctx, u32 *stat)
@@ -56,7 +61,11 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
56 if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) 61 if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
57 return 1; 62 return 1;
58 63
59 dsisr = ctx->csa.dsisr; 64 dsisr = ctx->csa.class_0_dsisr;
65 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
66 return 1;
67
68 dsisr = ctx->csa.class_1_dsisr;
60 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) 69 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
61 return 1; 70 return 1;
62 71
@@ -294,7 +303,7 @@ static int spu_process_callback(struct spu_context *ctx)
294 u32 ls_pointer, npc; 303 u32 ls_pointer, npc;
295 void __iomem *ls; 304 void __iomem *ls;
296 long spu_ret; 305 long spu_ret;
297 int ret, ret2; 306 int ret;
298 307
299 /* get syscall block from local store */ 308 /* get syscall block from local store */
300 npc = ctx->ops->npc_read(ctx) & ~3; 309 npc = ctx->ops->npc_read(ctx) & ~3;
@@ -316,11 +325,9 @@ static int spu_process_callback(struct spu_context *ctx)
316 if (spu_ret <= -ERESTARTSYS) { 325 if (spu_ret <= -ERESTARTSYS) {
317 ret = spu_handle_restartsys(ctx, &spu_ret, &npc); 326 ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
318 } 327 }
319 ret2 = spu_acquire(ctx); 328 mutex_lock(&ctx->state_mutex);
320 if (ret == -ERESTARTSYS) 329 if (ret == -ERESTARTSYS)
321 return ret; 330 return ret;
322 if (ret2)
323 return -EINTR;
324 } 331 }
325 332
326 /* need to re-get the ls, as it may have changed when we released the 333 /* need to re-get the ls, as it may have changed when we released the
@@ -343,13 +350,14 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
343 if (mutex_lock_interruptible(&ctx->run_mutex)) 350 if (mutex_lock_interruptible(&ctx->run_mutex))
344 return -ERESTARTSYS; 351 return -ERESTARTSYS;
345 352
346 spu_enable_spu(ctx);
347 ctx->event_return = 0; 353 ctx->event_return = 0;
348 354
349 ret = spu_acquire(ctx); 355 ret = spu_acquire(ctx);
350 if (ret) 356 if (ret)
351 goto out_unlock; 357 goto out_unlock;
352 358
359 spu_enable_spu(ctx);
360
353 spu_update_sched_info(ctx); 361 spu_update_sched_info(ctx);
354 362
355 ret = spu_run_init(ctx, npc); 363 ret = spu_run_init(ctx, npc);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 7298e7db2c83..2e411f23462b 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -140,6 +140,9 @@ void __spu_update_sched_info(struct spu_context *ctx)
140 * if it is timesliced or preempted. 140 * if it is timesliced or preempted.
141 */ 141 */
142 ctx->cpus_allowed = current->cpus_allowed; 142 ctx->cpus_allowed = current->cpus_allowed;
143
144 /* Save the current cpu id for spu interrupt routing. */
145 ctx->last_ran = raw_smp_processor_id();
143} 146}
144 147
145void spu_update_sched_info(struct spu_context *ctx) 148void spu_update_sched_info(struct spu_context *ctx)
@@ -243,7 +246,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
243 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 246 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
244 spu_restore(&ctx->csa, spu); 247 spu_restore(&ctx->csa, spu);
245 spu->timestamp = jiffies; 248 spu->timestamp = jiffies;
246 spu_cpu_affinity_set(spu, raw_smp_processor_id());
247 spu_switch_notify(spu, ctx); 249 spu_switch_notify(spu, ctx);
248 ctx->state = SPU_STATE_RUNNABLE; 250 ctx->state = SPU_STATE_RUNNABLE;
249 251
@@ -657,7 +659,8 @@ static struct spu *find_victim(struct spu_context *ctx)
657 659
658 victim->stats.invol_ctx_switch++; 660 victim->stats.invol_ctx_switch++;
659 spu->stats.invol_ctx_switch++; 661 spu->stats.invol_ctx_switch++;
660 spu_add_to_rq(victim); 662 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
663 spu_add_to_rq(victim);
661 664
662 mutex_unlock(&victim->state_mutex); 665 mutex_unlock(&victim->state_mutex);
663 666
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 7312745b7540..454c277c1457 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -121,6 +121,7 @@ struct spu_context {
121 cpumask_t cpus_allowed; 121 cpumask_t cpus_allowed;
122 int policy; 122 int policy;
123 int prio; 123 int prio;
124 int last_ran;
124 125
125 /* statistics */ 126 /* statistics */
126 struct { 127 struct {
@@ -331,7 +332,7 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
331/* irq callback funcs. */ 332/* irq callback funcs. */
332void spufs_ibox_callback(struct spu *spu); 333void spufs_ibox_callback(struct spu *spu);
333void spufs_wbox_callback(struct spu *spu); 334void spufs_wbox_callback(struct spu *spu);
334void spufs_stop_callback(struct spu *spu); 335void spufs_stop_callback(struct spu *spu, int irq);
335void spufs_mfc_callback(struct spu *spu); 336void spufs_mfc_callback(struct spu *spu);
336void spufs_dma_callback(struct spu *spu, int type); 337void spufs_dma_callback(struct spu *spu, int type);
337 338
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index d2a1249d36dd..3df9a36eb2f5 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -132,6 +132,14 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
132 spu_int_mask_set(spu, 2, 0ul); 132 spu_int_mask_set(spu, 2, 0ul);
133 eieio(); 133 eieio();
134 spin_unlock_irq(&spu->register_lock); 134 spin_unlock_irq(&spu->register_lock);
135
136 /*
137 * This flag needs to be set before calling synchronize_irq so
138 * that the update will be visible to the relevant handlers
139 * via a simple load.
140 */
141 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
142 clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
135 synchronize_irq(spu->irqs[0]); 143 synchronize_irq(spu->irqs[0]);
136 synchronize_irq(spu->irqs[1]); 144 synchronize_irq(spu->irqs[1]);
137 synchronize_irq(spu->irqs[2]); 145 synchronize_irq(spu->irqs[2]);
@@ -166,9 +174,8 @@ static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
166 /* Save, Step 7: 174 /* Save, Step 7:
167 * Restore, Step 5: 175 * Restore, Step 5:
168 * Set a software context switch pending flag. 176 * Set a software context switch pending flag.
177 * Done above in Step 3 - disable_interrupts().
169 */ 178 */
170 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
171 mb();
172} 179}
173 180
174static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) 181static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
@@ -186,20 +193,21 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
186 MFC_CNTL_SUSPEND_COMPLETE); 193 MFC_CNTL_SUSPEND_COMPLETE);
187 /* fall through */ 194 /* fall through */
188 case MFC_CNTL_SUSPEND_COMPLETE: 195 case MFC_CNTL_SUSPEND_COMPLETE:
189 if (csa) { 196 if (csa)
190 csa->priv2.mfc_control_RW = 197 csa->priv2.mfc_control_RW =
191 MFC_CNTL_SUSPEND_MASK | 198 in_be64(&priv2->mfc_control_RW) |
192 MFC_CNTL_SUSPEND_DMA_QUEUE; 199 MFC_CNTL_SUSPEND_DMA_QUEUE;
193 }
194 break; 200 break;
195 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: 201 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
196 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); 202 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
197 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & 203 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
198 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == 204 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
199 MFC_CNTL_SUSPEND_COMPLETE); 205 MFC_CNTL_SUSPEND_COMPLETE);
200 if (csa) { 206 if (csa)
201 csa->priv2.mfc_control_RW = 0; 207 csa->priv2.mfc_control_RW =
202 } 208 in_be64(&priv2->mfc_control_RW) &
209 ~MFC_CNTL_SUSPEND_DMA_QUEUE &
210 ~MFC_CNTL_SUSPEND_MASK;
203 break; 211 break;
204 } 212 }
205} 213}
@@ -249,16 +257,21 @@ static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
249 } 257 }
250} 258}
251 259
252static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu) 260static inline void save_mfc_stopped_status(struct spu_state *csa,
261 struct spu *spu)
253{ 262{
254 struct spu_priv2 __iomem *priv2 = spu->priv2; 263 struct spu_priv2 __iomem *priv2 = spu->priv2;
264 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
265 MFC_CNTL_DMA_QUEUES_EMPTY;
255 266
256 /* Save, Step 12: 267 /* Save, Step 12:
257 * Read MFC_CNTL[Ds]. Update saved copy of 268 * Read MFC_CNTL[Ds]. Update saved copy of
258 * CSA.MFC_CNTL[Ds]. 269 * CSA.MFC_CNTL[Ds].
270 *
271 * update: do the same with MFC_CNTL[Q].
259 */ 272 */
260 csa->priv2.mfc_control_RW |= 273 csa->priv2.mfc_control_RW &= ~mask;
261 in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING; 274 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
262} 275}
263 276
264static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) 277static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
@@ -462,7 +475,9 @@ static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
462 * Restore, Step 14. 475 * Restore, Step 14.
463 * Write MFC_CNTL[Pc]=1 (purge queue). 476 * Write MFC_CNTL[Pc]=1 (purge queue).
464 */ 477 */
465 out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); 478 out_be64(&priv2->mfc_control_RW,
479 MFC_CNTL_PURGE_DMA_REQUEST |
480 MFC_CNTL_SUSPEND_MASK);
466 eieio(); 481 eieio();
467} 482}
468 483
@@ -725,10 +740,14 @@ static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
725 /* Save, Step 48: 740 /* Save, Step 48:
726 * Restore, Step 23. 741 * Restore, Step 23.
727 * Change the software context switch pending flag 742 * Change the software context switch pending flag
728 * to context switch active. 743 * to context switch active. This implementation does
744 * not uses a switch active flag.
729 * 745 *
730 * This implementation does not uses a switch active flag. 746 * Now that we have saved the mfc in the csa, we can add in the
747 * restart command if an exception occurred.
731 */ 748 */
749 if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
750 csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
732 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); 751 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
733 mb(); 752 mb();
734} 753}
@@ -1690,6 +1709,13 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1690 eieio(); 1709 eieio();
1691} 1710}
1692 1711
1712static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1713{
1714 struct spu_context *ctx = spu->ctx;
1715
1716 spu_cpu_affinity_set(spu, ctx->last_ran);
1717}
1718
1693static inline void restore_other_spu_access(struct spu_state *csa, 1719static inline void restore_other_spu_access(struct spu_state *csa,
1694 struct spu *spu) 1720 struct spu *spu)
1695{ 1721{
@@ -1721,15 +1747,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1721 */ 1747 */
1722 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); 1748 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1723 eieio(); 1749 eieio();
1750
1724 /* 1751 /*
1725 * FIXME: this is to restart a DMA that we were processing 1752 * The queue is put back into the same state that was evident prior to
1726 * before the save. better remember the fault information 1753 * the context switch. The suspend flag is added to the saved state in
1727 * in the csa instead. 1754 * the csa, if the operational state was suspending or suspended. In
1755 * this case, the code that suspended the mfc is responsible for
1756 * continuing it. Note that SPE faults do not change the operational
1757 * state of the spu.
1728 */ 1758 */
1729 if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
1730 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
1731 eieio();
1732 }
1733} 1759}
1734 1760
1735static inline void enable_user_access(struct spu_state *csa, struct spu *spu) 1761static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
@@ -1788,7 +1814,7 @@ static int quiece_spu(struct spu_state *prev, struct spu *spu)
1788 save_spu_runcntl(prev, spu); /* Step 9. */ 1814 save_spu_runcntl(prev, spu); /* Step 9. */
1789 save_mfc_sr1(prev, spu); /* Step 10. */ 1815 save_mfc_sr1(prev, spu); /* Step 10. */
1790 save_spu_status(prev, spu); /* Step 11. */ 1816 save_spu_status(prev, spu); /* Step 11. */
1791 save_mfc_decr(prev, spu); /* Step 12. */ 1817 save_mfc_stopped_status(prev, spu); /* Step 12. */
1792 halt_mfc_decr(prev, spu); /* Step 13. */ 1818 halt_mfc_decr(prev, spu); /* Step 13. */
1793 save_timebase(prev, spu); /* Step 14. */ 1819 save_timebase(prev, spu); /* Step 14. */
1794 remove_other_spu_access(prev, spu); /* Step 15. */ 1820 remove_other_spu_access(prev, spu); /* Step 15. */
@@ -2000,6 +2026,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu)
2000 check_ppuint_mb_stat(next, spu); /* Step 67. */ 2026 check_ppuint_mb_stat(next, spu); /* Step 67. */
2001 spu_invalidate_slbs(spu); /* Modified Step 68. */ 2027 spu_invalidate_slbs(spu); /* Modified Step 68. */
2002 restore_mfc_sr1(next, spu); /* Step 69. */ 2028 restore_mfc_sr1(next, spu); /* Step 69. */
2029 set_int_route(next, spu); /* NEW */
2003 restore_other_spu_access(next, spu); /* Step 70. */ 2030 restore_other_spu_access(next, spu); /* Step 70. */
2004 restore_spu_runcntl(next, spu); /* Step 71. */ 2031 restore_spu_runcntl(next, spu); /* Step 71. */
2005 restore_mfc_cntl(next, spu); /* Step 72. */ 2032 restore_mfc_cntl(next, spu); /* Step 72. */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 52c74780f403..1702de9395ee 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2842,9 +2842,11 @@ static void dump_spu_fields(struct spu *spu)
2842 DUMP_FIELD(spu, "0x%lx", ls_size); 2842 DUMP_FIELD(spu, "0x%lx", ls_size);
2843 DUMP_FIELD(spu, "0x%x", node); 2843 DUMP_FIELD(spu, "0x%x", node);
2844 DUMP_FIELD(spu, "0x%lx", flags); 2844 DUMP_FIELD(spu, "0x%lx", flags);
2845 DUMP_FIELD(spu, "0x%lx", dar);
2846 DUMP_FIELD(spu, "0x%lx", dsisr);
2847 DUMP_FIELD(spu, "%d", class_0_pending); 2845 DUMP_FIELD(spu, "%d", class_0_pending);
2846 DUMP_FIELD(spu, "0x%lx", class_0_dar);
2847 DUMP_FIELD(spu, "0x%lx", class_0_dsisr);
2848 DUMP_FIELD(spu, "0x%lx", class_1_dar);
2849 DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
2848 DUMP_FIELD(spu, "0x%lx", irqs[0]); 2850 DUMP_FIELD(spu, "0x%lx", irqs[0]);
2849 DUMP_FIELD(spu, "0x%lx", irqs[1]); 2851 DUMP_FIELD(spu, "0x%lx", irqs[1]);
2850 DUMP_FIELD(spu, "0x%lx", irqs[2]); 2852 DUMP_FIELD(spu, "0x%lx", irqs[2]);