aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c76
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c4
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c5
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c8
-rw-r--r--arch/powerpc/platforms/cell/ras.c22
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c44
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c12
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c203
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c47
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c21
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h19
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c8
15 files changed, 353 insertions, 147 deletions
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index c39f5c225f2e..896548ba1ca1 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -14,6 +14,7 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/msi.h> 15#include <linux/msi.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/debugfs.h>
17 18
18#include <asm/dcr.h> 19#include <asm/dcr.h>
19#include <asm/machdep.h> 20#include <asm/machdep.h>
@@ -69,8 +70,19 @@ struct axon_msic {
69 dma_addr_t fifo_phys; 70 dma_addr_t fifo_phys;
70 dcr_host_t dcr_host; 71 dcr_host_t dcr_host;
71 u32 read_offset; 72 u32 read_offset;
73#ifdef DEBUG
74 u32 __iomem *trigger;
75#endif
72}; 76};
73 77
78#ifdef DEBUG
79void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
80#else
81static inline void axon_msi_debug_setup(struct device_node *dn,
82 struct axon_msic *msic) { }
83#endif
84
85
74static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) 86static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
75{ 87{
76 pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); 88 pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
@@ -346,7 +358,14 @@ static int axon_msi_probe(struct of_device *device,
346 goto out_free_msic; 358 goto out_free_msic;
347 } 359 }
348 360
349 msic->irq_host = irq_alloc_host(of_node_get(dn), IRQ_HOST_MAP_NOMAP, 361 virq = irq_of_parse_and_map(dn, 0);
362 if (virq == NO_IRQ) {
363 printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
364 dn->full_name);
365 goto out_free_fifo;
366 }
367
368 msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
350 NR_IRQS, &msic_host_ops, 0); 369 NR_IRQS, &msic_host_ops, 0);
351 if (!msic->irq_host) { 370 if (!msic->irq_host) {
352 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n", 371 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
@@ -356,13 +375,6 @@ static int axon_msi_probe(struct of_device *device,
356 375
357 msic->irq_host->host_data = msic; 376 msic->irq_host->host_data = msic;
358 377
359 virq = irq_of_parse_and_map(dn, 0);
360 if (virq == NO_IRQ) {
361 printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
362 dn->full_name);
363 goto out_free_host;
364 }
365
366 set_irq_data(virq, msic); 378 set_irq_data(virq, msic);
367 set_irq_chained_handler(virq, axon_msi_cascade); 379 set_irq_chained_handler(virq, axon_msi_cascade);
368 pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq); 380 pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
@@ -381,12 +393,12 @@ static int axon_msi_probe(struct of_device *device,
381 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs; 393 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
382 ppc_md.msi_check_device = axon_msi_check_device; 394 ppc_md.msi_check_device = axon_msi_check_device;
383 395
396 axon_msi_debug_setup(dn, msic);
397
384 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name); 398 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
385 399
386 return 0; 400 return 0;
387 401
388out_free_host:
389 kfree(msic->irq_host);
390out_free_fifo: 402out_free_fifo:
391 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, 403 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
392 msic->fifo_phys); 404 msic->fifo_phys);
@@ -418,3 +430,47 @@ static int __init axon_msi_init(void)
418 return of_register_platform_driver(&axon_msi_driver); 430 return of_register_platform_driver(&axon_msi_driver);
419} 431}
420subsys_initcall(axon_msi_init); 432subsys_initcall(axon_msi_init);
433
434
435#ifdef DEBUG
436static int msic_set(void *data, u64 val)
437{
438 struct axon_msic *msic = data;
439 out_le32(msic->trigger, val);
440 return 0;
441}
442
443static int msic_get(void *data, u64 *val)
444{
445 *val = 0;
446 return 0;
447}
448
449DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
450
451void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
452{
453 char name[8];
454 u64 addr;
455
456 addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
457 if (addr == OF_BAD_ADDR) {
458 pr_debug("axon_msi: couldn't translate reg property\n");
459 return;
460 }
461
462 msic->trigger = ioremap(addr, 0x4);
463 if (!msic->trigger) {
464 pr_debug("axon_msi: ioremap failed\n");
465 return;
466 }
467
468 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
469
470 if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
471 msic, &fops_msic)) {
472 pr_debug("axon_msi: debugfs_create_file failed!\n");
473 return;
474 }
475}
476#endif /* DEBUG */
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 81467ff055c8..2e67bd840e01 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -112,7 +112,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
112 if (!(vflags & HPTE_V_BOLTED)) 112 if (!(vflags & HPTE_V_BOLTED))
113 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 113 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
114 114
115 if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) 115 if (rflags & _PAGE_NO_CACHE)
116 hpte_r &= ~_PAGE_COHERENT; 116 hpte_r &= ~_PAGE_COHERENT;
117 117
118 spin_lock(&beat_htab_lock); 118 spin_lock(&beat_htab_lock);
@@ -334,7 +334,7 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
334 if (!(vflags & HPTE_V_BOLTED)) 334 if (!(vflags & HPTE_V_BOLTED))
335 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); 335 DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
336 336
337 if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) 337 if (rflags & _PAGE_NO_CACHE)
338 hpte_r &= ~_PAGE_COHERENT; 338 hpte_r &= ~_PAGE_COHERENT;
339 339
340 /* insert into not-volted entry */ 340 /* insert into not-volted entry */
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index 31da84c458d2..0e04f8fb152a 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -217,7 +217,7 @@ static u##size scc_pciex_in##name(unsigned long port) \
217static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \ 217static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \
218{ \ 218{ \
219 struct iowa_bus *bus = iowa_pio_find_bus(p); \ 219 struct iowa_bus *bus = iowa_pio_find_bus(p); \
220 u##size *dst = b; \ 220 __le##size *dst = b; \
221 for (; c != 0; c--, dst++) \ 221 for (; c != 0; c--, dst++) \
222 *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \ 222 *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \
223 scc_pciex_io_flush(bus); \ 223 scc_pciex_io_flush(bus); \
@@ -231,10 +231,11 @@ static void scc_pciex_outs##name(unsigned long p, const void *b, \
231 unsigned long c) \ 231 unsigned long c) \
232{ \ 232{ \
233 struct iowa_bus *bus = iowa_pio_find_bus(p); \ 233 struct iowa_bus *bus = iowa_pio_find_bus(p); \
234 const u##size *src = b; \ 234 const __le##size *src = b; \
235 for (; c != 0; c--, src++) \ 235 for (; c != 0; c--, src++) \
236 __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \ 236 __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \
237} 237}
238#define __le8 u8
238#define cpu_to_le8(x) (x) 239#define cpu_to_le8(x) (x)
239#define le8_to_cpu(x) (x) 240#define le8_to_cpu(x) (x)
240PCIEX_PIO_FUNC(8, b) 241PCIEX_PIO_FUNC(8, b)
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 5bf7df146022..2d5bb22d6c09 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -218,6 +218,7 @@ void iic_request_IPIs(void)
218{ 218{
219 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); 219 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
220 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); 220 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
221 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
221#ifdef CONFIG_DEBUGGER 222#ifdef CONFIG_DEBUGGER
222 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); 223 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
223#endif /* CONFIG_DEBUGGER */ 224#endif /* CONFIG_DEBUGGER */
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 45646b2b4af4..eeacb3a52ca1 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -540,7 +540,7 @@ static unsigned long cell_dma_direct_offset;
540static unsigned long dma_iommu_fixed_base; 540static unsigned long dma_iommu_fixed_base;
541struct dma_mapping_ops dma_iommu_fixed_ops; 541struct dma_mapping_ops dma_iommu_fixed_ops;
542 542
543static void cell_dma_dev_setup_iommu(struct device *dev) 543static struct iommu_table *cell_get_iommu_table(struct device *dev)
544{ 544{
545 struct iommu_window *window; 545 struct iommu_window *window;
546 struct cbe_iommu *iommu; 546 struct cbe_iommu *iommu;
@@ -555,11 +555,11 @@ static void cell_dma_dev_setup_iommu(struct device *dev)
555 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", 555 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
556 archdata->of_node ? archdata->of_node->full_name : "?", 556 archdata->of_node ? archdata->of_node->full_name : "?",
557 archdata->numa_node); 557 archdata->numa_node);
558 return; 558 return NULL;
559 } 559 }
560 window = list_entry(iommu->windows.next, struct iommu_window, list); 560 window = list_entry(iommu->windows.next, struct iommu_window, list);
561 561
562 archdata->dma_data = &window->table; 562 return &window->table;
563} 563}
564 564
565static void cell_dma_dev_setup_fixed(struct device *dev); 565static void cell_dma_dev_setup_fixed(struct device *dev);
@@ -572,7 +572,7 @@ static void cell_dma_dev_setup(struct device *dev)
572 if (get_dma_ops(dev) == &dma_iommu_fixed_ops) 572 if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
573 cell_dma_dev_setup_fixed(dev); 573 cell_dma_dev_setup_fixed(dev);
574 else if (get_pci_dma_ops() == &dma_iommu_ops) 574 else if (get_pci_dma_ops() == &dma_iommu_ops)
575 cell_dma_dev_setup_iommu(dev); 575 archdata->dma_data = cell_get_iommu_table(dev);
576 else if (get_pci_dma_ops() == &dma_direct_ops) 576 else if (get_pci_dma_ops() == &dma_direct_ops)
577 archdata->dma_data = (void *)cell_dma_direct_offset; 577 archdata->dma_data = (void *)cell_dma_direct_offset;
578 else 578 else
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 655704ad03cf..505f9b9bdf0c 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -17,6 +17,7 @@
17#include <asm/reg.h> 17#include <asm/reg.h>
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/kexec.h>
20#include <asm/machdep.h> 21#include <asm/machdep.h>
21#include <asm/rtas.h> 22#include <asm/rtas.h>
22#include <asm/cell-regs.h> 23#include <asm/cell-regs.h>
@@ -226,6 +227,11 @@ static int cbe_ptcal_notify_reboot(struct notifier_block *nb,
226 return cbe_ptcal_disable(); 227 return cbe_ptcal_disable();
227} 228}
228 229
230static void cbe_ptcal_crash_shutdown(void)
231{
232 cbe_ptcal_disable();
233}
234
229static struct notifier_block cbe_ptcal_reboot_notifier = { 235static struct notifier_block cbe_ptcal_reboot_notifier = {
230 .notifier_call = cbe_ptcal_notify_reboot 236 .notifier_call = cbe_ptcal_notify_reboot
231}; 237};
@@ -241,12 +247,20 @@ int __init cbe_ptcal_init(void)
241 return -ENODEV; 247 return -ENODEV;
242 248
243 ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier); 249 ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier);
244 if (ret) { 250 if (ret)
245 printk(KERN_ERR "Can't disable PTCAL, so not enabling\n"); 251 goto out1;
246 return ret; 252
247 } 253 ret = crash_shutdown_register(&cbe_ptcal_crash_shutdown);
254 if (ret)
255 goto out2;
248 256
249 return cbe_ptcal_enable(); 257 return cbe_ptcal_enable();
258
259out2:
260 unregister_reboot_notifier(&cbe_ptcal_reboot_notifier);
261out1:
262 printk(KERN_ERR "Can't disable PTCAL, so not enabling\n");
263 return ret;
250} 264}
251 265
252arch_initcall(cbe_ptcal_init); 266arch_initcall(cbe_ptcal_init);
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 3f4b4aef756d..4e5655624ae8 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -300,7 +300,7 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
300 panic("spider_pic: can't map registers !"); 300 panic("spider_pic: can't map registers !");
301 301
302 /* Allocate a host */ 302 /* Allocate a host */
303 pic->host = irq_alloc_host(of_node_get(of_node), IRQ_HOST_MAP_LINEAR, 303 pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR,
304 SPIDER_SRC_COUNT, &spider_host_ops, 304 SPIDER_SRC_COUNT, &spider_host_ops,
305 SPIDER_IRQ_INVALID); 305 SPIDER_IRQ_INVALID);
306 if (pic->host == NULL) 306 if (pic->host == NULL)
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 70c660121ec4..78f905bc6a42 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -219,15 +219,25 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX 219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) 220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
221{ 221{
222 int ret;
223
222 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); 224 pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea);
223 225
224 /* Handle kernel space hash faults immediately. 226 /*
225 User hash faults need to be deferred to process context. */ 227 * Handle kernel space hash faults immediately. User hash
226 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) 228 * faults need to be deferred to process context.
227 && REGION_ID(ea) != USER_REGION_ID 229 */
228 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { 230 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
229 spu_restart_dma(spu); 231 (REGION_ID(ea) != USER_REGION_ID)) {
230 return 0; 232
233 spin_unlock(&spu->register_lock);
234 ret = hash_page(ea, _PAGE_PRESENT, 0x300);
235 spin_lock(&spu->register_lock);
236
237 if (!ret) {
238 spu_restart_dma(spu);
239 return 0;
240 }
231 } 241 }
232 242
233 spu->class_1_dar = ea; 243 spu->class_1_dar = ea;
@@ -324,17 +334,13 @@ spu_irq_class_0(int irq, void *data)
324 stat = spu_int_stat_get(spu, 0) & mask; 334 stat = spu_int_stat_get(spu, 0) & mask;
325 335
326 spu->class_0_pending |= stat; 336 spu->class_0_pending |= stat;
327 spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
328 spu->class_0_dar = spu_mfc_dar_get(spu); 337 spu->class_0_dar = spu_mfc_dar_get(spu);
329 spin_unlock(&spu->register_lock);
330
331 spu->stop_callback(spu, 0); 338 spu->stop_callback(spu, 0);
332
333 spu->class_0_pending = 0; 339 spu->class_0_pending = 0;
334 spu->class_0_dsisr = 0;
335 spu->class_0_dar = 0; 340 spu->class_0_dar = 0;
336 341
337 spu_int_stat_clear(spu, 0, stat); 342 spu_int_stat_clear(spu, 0, stat);
343 spin_unlock(&spu->register_lock);
338 344
339 return IRQ_HANDLED; 345 return IRQ_HANDLED;
340} 346}
@@ -357,13 +363,12 @@ spu_irq_class_1(int irq, void *data)
357 spu_mfc_dsisr_set(spu, 0ul); 363 spu_mfc_dsisr_set(spu, 0ul);
358 spu_int_stat_clear(spu, 1, stat); 364 spu_int_stat_clear(spu, 1, stat);
359 365
360 if (stat & CLASS1_SEGMENT_FAULT_INTR)
361 __spu_trap_data_seg(spu, dar);
362
363 spin_unlock(&spu->register_lock);
364 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, 366 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
365 dar, dsisr); 367 dar, dsisr);
366 368
369 if (stat & CLASS1_SEGMENT_FAULT_INTR)
370 __spu_trap_data_seg(spu, dar);
371
367 if (stat & CLASS1_STORAGE_FAULT_INTR) 372 if (stat & CLASS1_STORAGE_FAULT_INTR)
368 __spu_trap_data_map(spu, dar, dsisr); 373 __spu_trap_data_map(spu, dar, dsisr);
369 374
@@ -376,6 +381,8 @@ spu_irq_class_1(int irq, void *data)
376 spu->class_1_dsisr = 0; 381 spu->class_1_dsisr = 0;
377 spu->class_1_dar = 0; 382 spu->class_1_dar = 0;
378 383
384 spin_unlock(&spu->register_lock);
385
379 return stat ? IRQ_HANDLED : IRQ_NONE; 386 return stat ? IRQ_HANDLED : IRQ_NONE;
380} 387}
381 388
@@ -394,14 +401,12 @@ spu_irq_class_2(int irq, void *data)
394 mask = spu_int_mask_get(spu, 2); 401 mask = spu_int_mask_get(spu, 2);
395 /* ignore interrupts we're not waiting for */ 402 /* ignore interrupts we're not waiting for */
396 stat &= mask; 403 stat &= mask;
397
398 /* mailbox interrupts are level triggered. mask them now before 404 /* mailbox interrupts are level triggered. mask them now before
399 * acknowledging */ 405 * acknowledging */
400 if (stat & mailbox_intrs) 406 if (stat & mailbox_intrs)
401 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); 407 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
402 /* acknowledge all interrupts before the callbacks */ 408 /* acknowledge all interrupts before the callbacks */
403 spu_int_stat_clear(spu, 2, stat); 409 spu_int_stat_clear(spu, 2, stat);
404 spin_unlock(&spu->register_lock);
405 410
406 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); 411 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
407 412
@@ -421,6 +426,9 @@ spu_irq_class_2(int irq, void *data)
421 spu->wbox_callback(spu); 426 spu->wbox_callback(spu);
422 427
423 spu->stats.class2_intr++; 428 spu->stats.class2_intr++;
429
430 spin_unlock(&spu->register_lock);
431
424 return stat ? IRQ_HANDLED : IRQ_NONE; 432 return stat ? IRQ_HANDLED : IRQ_NONE;
425} 433}
426 434
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 177735f79317..6653ddbed048 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -130,17 +130,17 @@ void spu_unmap_mappings(struct spu_context *ctx)
130 if (ctx->local_store) 130 if (ctx->local_store)
131 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); 131 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1);
132 if (ctx->mfc) 132 if (ctx->mfc)
133 unmap_mapping_range(ctx->mfc, 0, 0x1000, 1); 133 unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1);
134 if (ctx->cntl) 134 if (ctx->cntl)
135 unmap_mapping_range(ctx->cntl, 0, 0x1000, 1); 135 unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1);
136 if (ctx->signal1) 136 if (ctx->signal1)
137 unmap_mapping_range(ctx->signal1, 0, PAGE_SIZE, 1); 137 unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
138 if (ctx->signal2) 138 if (ctx->signal2)
139 unmap_mapping_range(ctx->signal2, 0, PAGE_SIZE, 1); 139 unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1);
140 if (ctx->mss) 140 if (ctx->mss)
141 unmap_mapping_range(ctx->mss, 0, 0x1000, 1); 141 unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1);
142 if (ctx->psmap) 142 if (ctx->psmap)
143 unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); 143 unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1);
144 mutex_unlock(&ctx->mapping_lock); 144 mutex_unlock(&ctx->mapping_lock);
145} 145}
146 146
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index c81341ff75b5..99c73066b82f 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -238,11 +238,13 @@ spufs_mem_write(struct file *file, const char __user *buffer,
238 return size; 238 return size;
239} 239}
240 240
241static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma, 241static int
242 unsigned long address) 242spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
243{ 243{
244 struct spu_context *ctx = vma->vm_file->private_data; 244 struct spu_context *ctx = vma->vm_file->private_data;
245 unsigned long pfn, offset, addr0 = address; 245 unsigned long address = (unsigned long)vmf->virtual_address;
246 unsigned long pfn, offset;
247
246#ifdef CONFIG_SPU_FS_64K_LS 248#ifdef CONFIG_SPU_FS_64K_LS
247 struct spu_state *csa = &ctx->csa; 249 struct spu_state *csa = &ctx->csa;
248 int psize; 250 int psize;
@@ -260,15 +262,15 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
260 } 262 }
261#endif /* CONFIG_SPU_FS_64K_LS */ 263#endif /* CONFIG_SPU_FS_64K_LS */
262 264
263 offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 265 offset = vmf->pgoff << PAGE_SHIFT;
264 if (offset >= LS_SIZE) 266 if (offset >= LS_SIZE)
265 return NOPFN_SIGBUS; 267 return VM_FAULT_SIGBUS;
266 268
267 pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n", 269 pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n",
268 addr0, address, offset); 270 address, offset);
269 271
270 if (spu_acquire(ctx)) 272 if (spu_acquire(ctx))
271 return NOPFN_REFAULT; 273 return VM_FAULT_NOPAGE;
272 274
273 if (ctx->state == SPU_STATE_SAVED) { 275 if (ctx->state == SPU_STATE_SAVED) {
274 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) 276 vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
@@ -283,12 +285,12 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
283 285
284 spu_release(ctx); 286 spu_release(ctx);
285 287
286 return NOPFN_REFAULT; 288 return VM_FAULT_NOPAGE;
287} 289}
288 290
289 291
290static struct vm_operations_struct spufs_mem_mmap_vmops = { 292static struct vm_operations_struct spufs_mem_mmap_vmops = {
291 .nopfn = spufs_mem_mmap_nopfn, 293 .fault = spufs_mem_mmap_fault,
292}; 294};
293 295
294static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) 296static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
@@ -351,20 +353,19 @@ static const struct file_operations spufs_mem_fops = {
351#endif 353#endif
352}; 354};
353 355
354static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, 356static int spufs_ps_fault(struct vm_area_struct *vma,
355 unsigned long address, 357 struct vm_fault *vmf,
356 unsigned long ps_offs, 358 unsigned long ps_offs,
357 unsigned long ps_size) 359 unsigned long ps_size)
358{ 360{
359 struct spu_context *ctx = vma->vm_file->private_data; 361 struct spu_context *ctx = vma->vm_file->private_data;
360 unsigned long area, offset = address - vma->vm_start; 362 unsigned long area, offset = vmf->pgoff << PAGE_SHIFT;
361 int ret = 0; 363 int ret = 0;
362 364
363 spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx); 365 spu_context_nospu_trace(spufs_ps_fault__enter, ctx);
364 366
365 offset += vma->vm_pgoff << PAGE_SHIFT;
366 if (offset >= ps_size) 367 if (offset >= ps_size)
367 return NOPFN_SIGBUS; 368 return VM_FAULT_SIGBUS;
368 369
369 /* 370 /*
370 * Because we release the mmap_sem, the context may be destroyed while 371 * Because we release the mmap_sem, the context may be destroyed while
@@ -378,7 +379,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
378 * pages to hand out to the user, but we don't want to wait 379 * pages to hand out to the user, but we don't want to wait
379 * with the mmap_sem held. 380 * with the mmap_sem held.
380 * It is possible to drop the mmap_sem here, but then we need 381 * It is possible to drop the mmap_sem here, but then we need
381 * to return NOPFN_REFAULT because the mappings may have 382 * to return VM_FAULT_NOPAGE because the mappings may have
382 * hanged. 383 * hanged.
383 */ 384 */
384 if (spu_acquire(ctx)) 385 if (spu_acquire(ctx))
@@ -386,14 +387,15 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
386 387
387 if (ctx->state == SPU_STATE_SAVED) { 388 if (ctx->state == SPU_STATE_SAVED) {
388 up_read(&current->mm->mmap_sem); 389 up_read(&current->mm->mmap_sem);
389 spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx); 390 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
390 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); 391 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
391 spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu); 392 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
392 down_read(&current->mm->mmap_sem); 393 down_read(&current->mm->mmap_sem);
393 } else { 394 } else {
394 area = ctx->spu->problem_phys + ps_offs; 395 area = ctx->spu->problem_phys + ps_offs;
395 vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); 396 vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
396 spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu); 397 (area + offset) >> PAGE_SHIFT);
398 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu);
397 } 399 }
398 400
399 if (!ret) 401 if (!ret)
@@ -401,18 +403,18 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
401 403
402refault: 404refault:
403 put_spu_context(ctx); 405 put_spu_context(ctx);
404 return NOPFN_REFAULT; 406 return VM_FAULT_NOPAGE;
405} 407}
406 408
407#if SPUFS_MMAP_4K 409#if SPUFS_MMAP_4K
408static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma, 410static int spufs_cntl_mmap_fault(struct vm_area_struct *vma,
409 unsigned long address) 411 struct vm_fault *vmf)
410{ 412{
411 return spufs_ps_nopfn(vma, address, 0x4000, 0x1000); 413 return spufs_ps_fault(vma, vmf, 0x4000, SPUFS_CNTL_MAP_SIZE);
412} 414}
413 415
414static struct vm_operations_struct spufs_cntl_mmap_vmops = { 416static struct vm_operations_struct spufs_cntl_mmap_vmops = {
415 .nopfn = spufs_cntl_mmap_nopfn, 417 .fault = spufs_cntl_mmap_fault,
416}; 418};
417 419
418/* 420/*
@@ -1097,23 +1099,23 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
1097 return 4; 1099 return 4;
1098} 1100}
1099 1101
1100static unsigned long spufs_signal1_mmap_nopfn(struct vm_area_struct *vma, 1102static int
1101 unsigned long address) 1103spufs_signal1_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1102{ 1104{
1103#if PAGE_SIZE == 0x1000 1105#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1104 return spufs_ps_nopfn(vma, address, 0x14000, 0x1000); 1106 return spufs_ps_fault(vma, vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE);
1105#elif PAGE_SIZE == 0x10000 1107#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1106 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1108 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1107 * signal 1 and 2 area 1109 * signal 1 and 2 area
1108 */ 1110 */
1109 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1111 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1110#else 1112#else
1111#error unsupported page size 1113#error unsupported page size
1112#endif 1114#endif
1113} 1115}
1114 1116
1115static struct vm_operations_struct spufs_signal1_mmap_vmops = { 1117static struct vm_operations_struct spufs_signal1_mmap_vmops = {
1116 .nopfn = spufs_signal1_mmap_nopfn, 1118 .fault = spufs_signal1_mmap_fault,
1117}; 1119};
1118 1120
1119static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) 1121static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1234,23 +1236,23 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
1234} 1236}
1235 1237
1236#if SPUFS_MMAP_4K 1238#if SPUFS_MMAP_4K
1237static unsigned long spufs_signal2_mmap_nopfn(struct vm_area_struct *vma, 1239static int
1238 unsigned long address) 1240spufs_signal2_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1239{ 1241{
1240#if PAGE_SIZE == 0x1000 1242#if SPUFS_SIGNAL_MAP_SIZE == 0x1000
1241 return spufs_ps_nopfn(vma, address, 0x1c000, 0x1000); 1243 return spufs_ps_fault(vma, vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE);
1242#elif PAGE_SIZE == 0x10000 1244#elif SPUFS_SIGNAL_MAP_SIZE == 0x10000
1243 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole 1245 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
1244 * signal 1 and 2 area 1246 * signal 1 and 2 area
1245 */ 1247 */
1246 return spufs_ps_nopfn(vma, address, 0x10000, 0x10000); 1248 return spufs_ps_fault(vma, vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE);
1247#else 1249#else
1248#error unsupported page size 1250#error unsupported page size
1249#endif 1251#endif
1250} 1252}
1251 1253
1252static struct vm_operations_struct spufs_signal2_mmap_vmops = { 1254static struct vm_operations_struct spufs_signal2_mmap_vmops = {
1253 .nopfn = spufs_signal2_mmap_nopfn, 1255 .fault = spufs_signal2_mmap_fault,
1254}; 1256};
1255 1257
1256static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) 1258static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
@@ -1362,14 +1364,14 @@ DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
1362 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); 1364 spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE);
1363 1365
1364#if SPUFS_MMAP_4K 1366#if SPUFS_MMAP_4K
1365static unsigned long spufs_mss_mmap_nopfn(struct vm_area_struct *vma, 1367static int
1366 unsigned long address) 1368spufs_mss_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1367{ 1369{
1368 return spufs_ps_nopfn(vma, address, 0x0000, 0x1000); 1370 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_MSS_MAP_SIZE);
1369} 1371}
1370 1372
1371static struct vm_operations_struct spufs_mss_mmap_vmops = { 1373static struct vm_operations_struct spufs_mss_mmap_vmops = {
1372 .nopfn = spufs_mss_mmap_nopfn, 1374 .fault = spufs_mss_mmap_fault,
1373}; 1375};
1374 1376
1375/* 1377/*
@@ -1424,14 +1426,14 @@ static const struct file_operations spufs_mss_fops = {
1424 .mmap = spufs_mss_mmap, 1426 .mmap = spufs_mss_mmap,
1425}; 1427};
1426 1428
1427static unsigned long spufs_psmap_mmap_nopfn(struct vm_area_struct *vma, 1429static int
1428 unsigned long address) 1430spufs_psmap_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1429{ 1431{
1430 return spufs_ps_nopfn(vma, address, 0x0000, 0x20000); 1432 return spufs_ps_fault(vma, vmf, 0x0000, SPUFS_PS_MAP_SIZE);
1431} 1433}
1432 1434
1433static struct vm_operations_struct spufs_psmap_mmap_vmops = { 1435static struct vm_operations_struct spufs_psmap_mmap_vmops = {
1434 .nopfn = spufs_psmap_mmap_nopfn, 1436 .fault = spufs_psmap_mmap_fault,
1435}; 1437};
1436 1438
1437/* 1439/*
@@ -1484,14 +1486,14 @@ static const struct file_operations spufs_psmap_fops = {
1484 1486
1485 1487
1486#if SPUFS_MMAP_4K 1488#if SPUFS_MMAP_4K
1487static unsigned long spufs_mfc_mmap_nopfn(struct vm_area_struct *vma, 1489static int
1488 unsigned long address) 1490spufs_mfc_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1489{ 1491{
1490 return spufs_ps_nopfn(vma, address, 0x3000, 0x1000); 1492 return spufs_ps_fault(vma, vmf, 0x3000, SPUFS_MFC_MAP_SIZE);
1491} 1493}
1492 1494
1493static struct vm_operations_struct spufs_mfc_mmap_vmops = { 1495static struct vm_operations_struct spufs_mfc_mmap_vmops = {
1494 .nopfn = spufs_mfc_mmap_nopfn, 1496 .fault = spufs_mfc_mmap_fault,
1495}; 1497};
1496 1498
1497/* 1499/*
@@ -2553,22 +2555,74 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
2553 wake_up(&ctx->switch_log->wait); 2555 wake_up(&ctx->switch_log->wait);
2554} 2556}
2555 2557
2556struct tree_descr spufs_dir_contents[] = { 2558static int spufs_show_ctx(struct seq_file *s, void *private)
2559{
2560 struct spu_context *ctx = s->private;
2561 u64 mfc_control_RW;
2562
2563 mutex_lock(&ctx->state_mutex);
2564 if (ctx->spu) {
2565 struct spu *spu = ctx->spu;
2566 struct spu_priv2 __iomem *priv2 = spu->priv2;
2567
2568 spin_lock_irq(&spu->register_lock);
2569 mfc_control_RW = in_be64(&priv2->mfc_control_RW);
2570 spin_unlock_irq(&spu->register_lock);
2571 } else {
2572 struct spu_state *csa = &ctx->csa;
2573
2574 mfc_control_RW = csa->priv2.mfc_control_RW;
2575 }
2576
2577 seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)"
2578 " %c %lx %lx %lx %lx %x %x\n",
2579 ctx->state == SPU_STATE_SAVED ? 'S' : 'R',
2580 ctx->flags,
2581 ctx->sched_flags,
2582 ctx->prio,
2583 ctx->time_slice,
2584 ctx->spu ? ctx->spu->number : -1,
2585 !list_empty(&ctx->rq) ? 'q' : ' ',
2586 ctx->csa.class_0_pending,
2587 ctx->csa.class_0_dar,
2588 ctx->csa.class_1_dsisr,
2589 mfc_control_RW,
2590 ctx->ops->runcntl_read(ctx),
2591 ctx->ops->status_read(ctx));
2592
2593 mutex_unlock(&ctx->state_mutex);
2594
2595 return 0;
2596}
2597
2598static int spufs_ctx_open(struct inode *inode, struct file *file)
2599{
2600 return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx);
2601}
2602
2603static const struct file_operations spufs_ctx_fops = {
2604 .open = spufs_ctx_open,
2605 .read = seq_read,
2606 .llseek = seq_lseek,
2607 .release = single_release,
2608};
2609
2610struct spufs_tree_descr spufs_dir_contents[] = {
2557 { "capabilities", &spufs_caps_fops, 0444, }, 2611 { "capabilities", &spufs_caps_fops, 0444, },
2558 { "mem", &spufs_mem_fops, 0666, }, 2612 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2559 { "regs", &spufs_regs_fops, 0666, }, 2613 { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), },
2560 { "mbox", &spufs_mbox_fops, 0444, }, 2614 { "mbox", &spufs_mbox_fops, 0444, },
2561 { "ibox", &spufs_ibox_fops, 0444, }, 2615 { "ibox", &spufs_ibox_fops, 0444, },
2562 { "wbox", &spufs_wbox_fops, 0222, }, 2616 { "wbox", &spufs_wbox_fops, 0222, },
2563 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2617 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2564 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2618 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2565 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2619 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2566 { "signal1", &spufs_signal1_fops, 0666, }, 2620 { "signal1", &spufs_signal1_fops, 0666, },
2567 { "signal2", &spufs_signal2_fops, 0666, }, 2621 { "signal2", &spufs_signal2_fops, 0666, },
2568 { "signal1_type", &spufs_signal1_type, 0666, }, 2622 { "signal1_type", &spufs_signal1_type, 0666, },
2569 { "signal2_type", &spufs_signal2_type, 0666, }, 2623 { "signal2_type", &spufs_signal2_type, 0666, },
2570 { "cntl", &spufs_cntl_fops, 0666, }, 2624 { "cntl", &spufs_cntl_fops, 0666, },
2571 { "fpcr", &spufs_fpcr_fops, 0666, }, 2625 { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), },
2572 { "lslr", &spufs_lslr_ops, 0444, }, 2626 { "lslr", &spufs_lslr_ops, 0444, },
2573 { "mfc", &spufs_mfc_fops, 0666, }, 2627 { "mfc", &spufs_mfc_fops, 0666, },
2574 { "mss", &spufs_mss_fops, 0666, }, 2628 { "mss", &spufs_mss_fops, 0666, },
@@ -2578,29 +2632,31 @@ struct tree_descr spufs_dir_contents[] = {
2578 { "decr_status", &spufs_decr_status_ops, 0666, }, 2632 { "decr_status", &spufs_decr_status_ops, 0666, },
2579 { "event_mask", &spufs_event_mask_ops, 0666, }, 2633 { "event_mask", &spufs_event_mask_ops, 0666, },
2580 { "event_status", &spufs_event_status_ops, 0444, }, 2634 { "event_status", &spufs_event_status_ops, 0444, },
2581 { "psmap", &spufs_psmap_fops, 0666, }, 2635 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2582 { "phys-id", &spufs_id_ops, 0666, }, 2636 { "phys-id", &spufs_id_ops, 0666, },
2583 { "object-id", &spufs_object_id_ops, 0666, }, 2637 { "object-id", &spufs_object_id_ops, 0666, },
2584 { "mbox_info", &spufs_mbox_info_fops, 0444, }, 2638 { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), },
2585 { "ibox_info", &spufs_ibox_info_fops, 0444, }, 2639 { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), },
2586 { "wbox_info", &spufs_wbox_info_fops, 0444, }, 2640 { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), },
2587 { "dma_info", &spufs_dma_info_fops, 0444, }, 2641 { "dma_info", &spufs_dma_info_fops, 0444,
2588 { "proxydma_info", &spufs_proxydma_info_fops, 0444, }, 2642 sizeof(struct spu_dma_info), },
2643 { "proxydma_info", &spufs_proxydma_info_fops, 0444,
2644 sizeof(struct spu_proxydma_info)},
2589 { "tid", &spufs_tid_fops, 0444, }, 2645 { "tid", &spufs_tid_fops, 0444, },
2590 { "stat", &spufs_stat_fops, 0444, }, 2646 { "stat", &spufs_stat_fops, 0444, },
2591 { "switch_log", &spufs_switch_log_fops, 0444 }, 2647 { "switch_log", &spufs_switch_log_fops, 0444 },
2592 {}, 2648 {},
2593}; 2649};
2594 2650
2595struct tree_descr spufs_dir_nosched_contents[] = { 2651struct spufs_tree_descr spufs_dir_nosched_contents[] = {
2596 { "capabilities", &spufs_caps_fops, 0444, }, 2652 { "capabilities", &spufs_caps_fops, 0444, },
2597 { "mem", &spufs_mem_fops, 0666, }, 2653 { "mem", &spufs_mem_fops, 0666, LS_SIZE, },
2598 { "mbox", &spufs_mbox_fops, 0444, }, 2654 { "mbox", &spufs_mbox_fops, 0444, },
2599 { "ibox", &spufs_ibox_fops, 0444, }, 2655 { "ibox", &spufs_ibox_fops, 0444, },
2600 { "wbox", &spufs_wbox_fops, 0222, }, 2656 { "wbox", &spufs_wbox_fops, 0222, },
2601 { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, 2657 { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), },
2602 { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, 2658 { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), },
2603 { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, 2659 { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), },
2604 { "signal1", &spufs_signal1_nosched_fops, 0222, }, 2660 { "signal1", &spufs_signal1_nosched_fops, 0222, },
2605 { "signal2", &spufs_signal2_nosched_fops, 0222, }, 2661 { "signal2", &spufs_signal2_nosched_fops, 0222, },
2606 { "signal1_type", &spufs_signal1_type, 0666, }, 2662 { "signal1_type", &spufs_signal1_type, 0666, },
@@ -2609,7 +2665,7 @@ struct tree_descr spufs_dir_nosched_contents[] = {
2609 { "mfc", &spufs_mfc_fops, 0666, }, 2665 { "mfc", &spufs_mfc_fops, 0666, },
2610 { "cntl", &spufs_cntl_fops, 0666, }, 2666 { "cntl", &spufs_cntl_fops, 0666, },
2611 { "npc", &spufs_npc_ops, 0666, }, 2667 { "npc", &spufs_npc_ops, 0666, },
2612 { "psmap", &spufs_psmap_fops, 0666, }, 2668 { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, },
2613 { "phys-id", &spufs_id_ops, 0666, }, 2669 { "phys-id", &spufs_id_ops, 0666, },
2614 { "object-id", &spufs_object_id_ops, 0666, }, 2670 { "object-id", &spufs_object_id_ops, 0666, },
2615 { "tid", &spufs_tid_fops, 0444, }, 2671 { "tid", &spufs_tid_fops, 0444, },
@@ -2617,6 +2673,11 @@ struct tree_descr spufs_dir_nosched_contents[] = {
2617 {}, 2673 {},
2618}; 2674};
2619 2675
2676struct spufs_tree_descr spufs_dir_debug_contents[] = {
2677 { ".ctx", &spufs_ctx_fops, 0444, },
2678 {},
2679};
2680
2620struct spufs_coredump_reader spufs_coredump_read[] = { 2681struct spufs_coredump_reader spufs_coredump_read[] = {
2621 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])}, 2682 { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
2622 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) }, 2683 { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index f407b2471855..7123472801d9 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -42,10 +42,19 @@
42 42
43#include "spufs.h" 43#include "spufs.h"
44 44
45struct spufs_sb_info {
46 int debug;
47};
48
45static struct kmem_cache *spufs_inode_cache; 49static struct kmem_cache *spufs_inode_cache;
46char *isolated_loader; 50char *isolated_loader;
47static int isolated_loader_size; 51static int isolated_loader_size;
48 52
53static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
54{
55 return sb->s_fs_info;
56}
57
49static struct inode * 58static struct inode *
50spufs_alloc_inode(struct super_block *sb) 59spufs_alloc_inode(struct super_block *sb)
51{ 60{
@@ -109,7 +118,7 @@ spufs_setattr(struct dentry *dentry, struct iattr *attr)
109static int 118static int
110spufs_new_file(struct super_block *sb, struct dentry *dentry, 119spufs_new_file(struct super_block *sb, struct dentry *dentry,
111 const struct file_operations *fops, int mode, 120 const struct file_operations *fops, int mode,
112 struct spu_context *ctx) 121 size_t size, struct spu_context *ctx)
113{ 122{
114 static struct inode_operations spufs_file_iops = { 123 static struct inode_operations spufs_file_iops = {
115 .setattr = spufs_setattr, 124 .setattr = spufs_setattr,
@@ -125,6 +134,7 @@ spufs_new_file(struct super_block *sb, struct dentry *dentry,
125 ret = 0; 134 ret = 0;
126 inode->i_op = &spufs_file_iops; 135 inode->i_op = &spufs_file_iops;
127 inode->i_fop = fops; 136 inode->i_fop = fops;
137 inode->i_size = size;
128 inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx); 138 inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
129 d_add(dentry, inode); 139 d_add(dentry, inode);
130out: 140out:
@@ -177,7 +187,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir)
177 return simple_rmdir(parent, dir); 187 return simple_rmdir(parent, dir);
178} 188}
179 189
180static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files, 190static int spufs_fill_dir(struct dentry *dir, struct spufs_tree_descr *files,
181 int mode, struct spu_context *ctx) 191 int mode, struct spu_context *ctx)
182{ 192{
183 struct dentry *dentry, *tmp; 193 struct dentry *dentry, *tmp;
@@ -189,7 +199,7 @@ static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
189 if (!dentry) 199 if (!dentry)
190 goto out; 200 goto out;
191 ret = spufs_new_file(dir->d_sb, dentry, files->ops, 201 ret = spufs_new_file(dir->d_sb, dentry, files->ops,
192 files->mode & mode, ctx); 202 files->mode & mode, files->size, ctx);
193 if (ret) 203 if (ret)
194 goto out; 204 goto out;
195 files++; 205 files++;
@@ -279,6 +289,13 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
279 if (ret) 289 if (ret)
280 goto out_free_ctx; 290 goto out_free_ctx;
281 291
292 if (spufs_get_sb_info(dir->i_sb)->debug)
293 ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
294 mode, ctx);
295
296 if (ret)
297 goto out_free_ctx;
298
282 d_instantiate(dentry, inode); 299 d_instantiate(dentry, inode);
283 dget(dentry); 300 dget(dentry);
284 dir->i_nlink++; 301 dir->i_nlink++;
@@ -639,18 +656,19 @@ out:
639 656
640/* File system initialization */ 657/* File system initialization */
641enum { 658enum {
642 Opt_uid, Opt_gid, Opt_mode, Opt_err, 659 Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err,
643}; 660};
644 661
645static match_table_t spufs_tokens = { 662static match_table_t spufs_tokens = {
646 { Opt_uid, "uid=%d" }, 663 { Opt_uid, "uid=%d" },
647 { Opt_gid, "gid=%d" }, 664 { Opt_gid, "gid=%d" },
648 { Opt_mode, "mode=%o" }, 665 { Opt_mode, "mode=%o" },
649 { Opt_err, NULL }, 666 { Opt_debug, "debug" },
667 { Opt_err, NULL },
650}; 668};
651 669
652static int 670static int
653spufs_parse_options(char *options, struct inode *root) 671spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
654{ 672{
655 char *p; 673 char *p;
656 substring_t args[MAX_OPT_ARGS]; 674 substring_t args[MAX_OPT_ARGS];
@@ -678,6 +696,9 @@ spufs_parse_options(char *options, struct inode *root)
678 return 0; 696 return 0;
679 root->i_mode = option | S_IFDIR; 697 root->i_mode = option | S_IFDIR;
680 break; 698 break;
699 case Opt_debug:
700 spufs_get_sb_info(sb)->debug = 1;
701 break;
681 default: 702 default:
682 return 0; 703 return 0;
683 } 704 }
@@ -736,7 +757,7 @@ spufs_create_root(struct super_block *sb, void *data)
736 SPUFS_I(inode)->i_ctx = NULL; 757 SPUFS_I(inode)->i_ctx = NULL;
737 758
738 ret = -EINVAL; 759 ret = -EINVAL;
739 if (!spufs_parse_options(data, inode)) 760 if (!spufs_parse_options(sb, data, inode))
740 goto out_iput; 761 goto out_iput;
741 762
742 ret = -ENOMEM; 763 ret = -ENOMEM;
@@ -754,6 +775,7 @@ out:
754static int 775static int
755spufs_fill_super(struct super_block *sb, void *data, int silent) 776spufs_fill_super(struct super_block *sb, void *data, int silent)
756{ 777{
778 struct spufs_sb_info *info;
757 static struct super_operations s_ops = { 779 static struct super_operations s_ops = {
758 .alloc_inode = spufs_alloc_inode, 780 .alloc_inode = spufs_alloc_inode,
759 .destroy_inode = spufs_destroy_inode, 781 .destroy_inode = spufs_destroy_inode,
@@ -765,11 +787,16 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
765 787
766 save_mount_options(sb, data); 788 save_mount_options(sb, data);
767 789
790 info = kzalloc(sizeof(*info), GFP_KERNEL);
791 if (!info)
792 return -ENOMEM;
793
768 sb->s_maxbytes = MAX_LFS_FILESIZE; 794 sb->s_maxbytes = MAX_LFS_FILESIZE;
769 sb->s_blocksize = PAGE_CACHE_SIZE; 795 sb->s_blocksize = PAGE_CACHE_SIZE;
770 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 796 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
771 sb->s_magic = SPUFS_MAGIC; 797 sb->s_magic = SPUFS_MAGIC;
772 sb->s_op = &s_ops; 798 sb->s_op = &s_ops;
799 sb->s_fs_info = info;
773 800
774 return spufs_create_root(sb, data); 801 return spufs_create_root(sb, data);
775} 802}
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index b7493b865812..f7edba6cb795 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -27,7 +27,6 @@ void spufs_stop_callback(struct spu *spu, int irq)
27 switch(irq) { 27 switch(irq) {
28 case 0 : 28 case 0 :
29 ctx->csa.class_0_pending = spu->class_0_pending; 29 ctx->csa.class_0_pending = spu->class_0_pending;
30 ctx->csa.class_0_dsisr = spu->class_0_dsisr;
31 ctx->csa.class_0_dar = spu->class_0_dar; 30 ctx->csa.class_0_dar = spu->class_0_dar;
32 break; 31 break;
33 case 1 : 32 case 1 :
@@ -51,18 +50,22 @@ int spu_stopped(struct spu_context *ctx, u32 *stat)
51 u64 dsisr; 50 u64 dsisr;
52 u32 stopped; 51 u32 stopped;
53 52
54 *stat = ctx->ops->status_read(ctx);
55
56 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
57 return 1;
58
59 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | 53 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
60 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; 54 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
61 if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped)) 55
56top:
57 *stat = ctx->ops->status_read(ctx);
58 if (*stat & stopped) {
59 /*
60 * If the spu hasn't finished stopping, we need to
61 * re-read the register to get the stopped value.
62 */
63 if (*stat & SPU_STATUS_RUNNING)
64 goto top;
62 return 1; 65 return 1;
66 }
63 67
64 dsisr = ctx->csa.class_0_dsisr; 68 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
65 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
66 return 1; 69 return 1;
67 70
68 dsisr = ctx->csa.class_1_dsisr; 71 dsisr = ctx->csa.class_1_dsisr;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 745dd51ec37f..34654743363d 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -230,19 +230,23 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
230 ctx->stats.slb_flt_base = spu->stats.slb_flt; 230 ctx->stats.slb_flt_base = spu->stats.slb_flt;
231 ctx->stats.class2_intr_base = spu->stats.class2_intr; 231 ctx->stats.class2_intr_base = spu->stats.class2_intr;
232 232
233 spu_associate_mm(spu, ctx->owner);
234
235 spin_lock_irq(&spu->register_lock);
233 spu->ctx = ctx; 236 spu->ctx = ctx;
234 spu->flags = 0; 237 spu->flags = 0;
235 ctx->spu = spu; 238 ctx->spu = spu;
236 ctx->ops = &spu_hw_ops; 239 ctx->ops = &spu_hw_ops;
237 spu->pid = current->pid; 240 spu->pid = current->pid;
238 spu->tgid = current->tgid; 241 spu->tgid = current->tgid;
239 spu_associate_mm(spu, ctx->owner);
240 spu->ibox_callback = spufs_ibox_callback; 242 spu->ibox_callback = spufs_ibox_callback;
241 spu->wbox_callback = spufs_wbox_callback; 243 spu->wbox_callback = spufs_wbox_callback;
242 spu->stop_callback = spufs_stop_callback; 244 spu->stop_callback = spufs_stop_callback;
243 spu->mfc_callback = spufs_mfc_callback; 245 spu->mfc_callback = spufs_mfc_callback;
244 mb(); 246 spin_unlock_irq(&spu->register_lock);
247
245 spu_unmap_mappings(ctx); 248 spu_unmap_mappings(ctx);
249
246 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 250 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
247 spu_restore(&ctx->csa, spu); 251 spu_restore(&ctx->csa, spu);
248 spu->timestamp = jiffies; 252 spu->timestamp = jiffies;
@@ -403,6 +407,8 @@ static int has_affinity(struct spu_context *ctx)
403 */ 407 */
404static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) 408static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
405{ 409{
410 u32 status;
411
406 spu_context_trace(spu_unbind_context__enter, ctx, spu); 412 spu_context_trace(spu_unbind_context__enter, ctx, spu);
407 413
408 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); 414 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
@@ -423,18 +429,22 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
423 spu_unmap_mappings(ctx); 429 spu_unmap_mappings(ctx);
424 spu_save(&ctx->csa, spu); 430 spu_save(&ctx->csa, spu);
425 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); 431 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
432
433 spin_lock_irq(&spu->register_lock);
426 spu->timestamp = jiffies; 434 spu->timestamp = jiffies;
427 ctx->state = SPU_STATE_SAVED; 435 ctx->state = SPU_STATE_SAVED;
428 spu->ibox_callback = NULL; 436 spu->ibox_callback = NULL;
429 spu->wbox_callback = NULL; 437 spu->wbox_callback = NULL;
430 spu->stop_callback = NULL; 438 spu->stop_callback = NULL;
431 spu->mfc_callback = NULL; 439 spu->mfc_callback = NULL;
432 spu_associate_mm(spu, NULL);
433 spu->pid = 0; 440 spu->pid = 0;
434 spu->tgid = 0; 441 spu->tgid = 0;
435 ctx->ops = &spu_backing_ops; 442 ctx->ops = &spu_backing_ops;
436 spu->flags = 0; 443 spu->flags = 0;
437 spu->ctx = NULL; 444 spu->ctx = NULL;
445 spin_unlock_irq(&spu->register_lock);
446
447 spu_associate_mm(spu, NULL);
438 448
439 ctx->stats.slb_flt += 449 ctx->stats.slb_flt +=
440 (spu->stats.slb_flt - ctx->stats.slb_flt_base); 450 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
@@ -444,6 +454,9 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
444 /* This maps the underlying spu state to idle */ 454 /* This maps the underlying spu state to idle */
445 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); 455 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
446 ctx->spu = NULL; 456 ctx->spu = NULL;
457
458 if (spu_stopped(ctx, &status))
459 wake_up_all(&ctx->stop_wq);
447} 460}
448 461
449/** 462/**
@@ -886,7 +899,8 @@ static noinline void spusched_tick(struct spu_context *ctx)
886 spu_add_to_rq(ctx); 899 spu_add_to_rq(ctx);
887 } else { 900 } else {
888 spu_context_nospu_trace(spusched_tick__newslice, ctx); 901 spu_context_nospu_trace(spusched_tick__newslice, ctx);
889 ctx->time_slice++; 902 if (!ctx->time_slice)
903 ctx->time_slice++;
890 } 904 }
891out: 905out:
892 spu_release(ctx); 906 spu_release(ctx);
@@ -980,6 +994,7 @@ void spuctx_switch_state(struct spu_context *ctx,
980 struct timespec ts; 994 struct timespec ts;
981 struct spu *spu; 995 struct spu *spu;
982 enum spu_utilization_state old_state; 996 enum spu_utilization_state old_state;
997 int node;
983 998
984 ktime_get_ts(&ts); 999 ktime_get_ts(&ts);
985 curtime = timespec_to_ns(&ts); 1000 curtime = timespec_to_ns(&ts);
@@ -1001,6 +1016,11 @@ void spuctx_switch_state(struct spu_context *ctx,
1001 spu->stats.times[old_state] += delta; 1016 spu->stats.times[old_state] += delta;
1002 spu->stats.util_state = new_state; 1017 spu->stats.util_state = new_state;
1003 spu->stats.tstamp = curtime; 1018 spu->stats.tstamp = curtime;
1019 node = spu->node;
1020 if (old_state == SPU_UTIL_USER)
1021 atomic_dec(&cbe_spu_info[node].busy_spus);
1022 if (new_state == SPU_UTIL_USER);
1023 atomic_inc(&cbe_spu_info[node].busy_spus);
1004 } 1024 }
1005} 1025}
1006 1026
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 454c277c1457..8ae8ef9dfc22 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -32,6 +32,13 @@
32#include <asm/spu_csa.h> 32#include <asm/spu_csa.h>
33#include <asm/spu_info.h> 33#include <asm/spu_info.h>
34 34
35#define SPUFS_PS_MAP_SIZE 0x20000
36#define SPUFS_MFC_MAP_SIZE 0x1000
37#define SPUFS_CNTL_MAP_SIZE 0x1000
38#define SPUFS_CNTL_MAP_SIZE 0x1000
39#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE
40#define SPUFS_MSS_MAP_SIZE 0x1000
41
35/* The magic number for our file system */ 42/* The magic number for our file system */
36enum { 43enum {
37 SPUFS_MAGIC = 0x23c9b64e, 44 SPUFS_MAGIC = 0x23c9b64e,
@@ -228,8 +235,16 @@ struct spufs_inode_info {
228#define SPUFS_I(inode) \ 235#define SPUFS_I(inode) \
229 container_of(inode, struct spufs_inode_info, vfs_inode) 236 container_of(inode, struct spufs_inode_info, vfs_inode)
230 237
231extern struct tree_descr spufs_dir_contents[]; 238struct spufs_tree_descr {
232extern struct tree_descr spufs_dir_nosched_contents[]; 239 const char *name;
240 const struct file_operations *ops;
241 int mode;
242 size_t size;
243};
244
245extern struct spufs_tree_descr spufs_dir_contents[];
246extern struct spufs_tree_descr spufs_dir_nosched_contents[];
247extern struct spufs_tree_descr spufs_dir_debug_contents[];
233 248
234/* system call implementation */ 249/* system call implementation */
235extern struct spufs_calls spufs_calls; 250extern struct spufs_calls spufs_calls;
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 53202422ba72..8c0e95766a62 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -182,10 +182,10 @@ struct spu_probe spu_probes[] = {
182 { "spu_yield__enter", "ctx %p", spu_context_nospu_event }, 182 { "spu_yield__enter", "ctx %p", spu_context_nospu_event },
183 { "spu_deactivate__enter", "ctx %p", spu_context_nospu_event }, 183 { "spu_deactivate__enter", "ctx %p", spu_context_nospu_event },
184 { "__spu_deactivate__unload", "ctx %p spu %p", spu_context_event }, 184 { "__spu_deactivate__unload", "ctx %p spu %p", spu_context_event },
185 { "spufs_ps_nopfn__enter", "ctx %p", spu_context_nospu_event }, 185 { "spufs_ps_fault__enter", "ctx %p", spu_context_nospu_event },
186 { "spufs_ps_nopfn__sleep", "ctx %p", spu_context_nospu_event }, 186 { "spufs_ps_fault__sleep", "ctx %p", spu_context_nospu_event },
187 { "spufs_ps_nopfn__wake", "ctx %p spu %p", spu_context_event }, 187 { "spufs_ps_fault__wake", "ctx %p spu %p", spu_context_event },
188 { "spufs_ps_nopfn__insert", "ctx %p spu %p", spu_context_event }, 188 { "spufs_ps_fault__insert", "ctx %p spu %p", spu_context_event },
189 { "spu_acquire_saved__enter", "ctx %p", spu_context_nospu_event }, 189 { "spu_acquire_saved__enter", "ctx %p", spu_context_nospu_event },
190 { "destroy_spu_context__enter", "ctx %p", spu_context_nospu_event }, 190 { "destroy_spu_context__enter", "ctx %p", spu_context_nospu_event },
191 { "spufs_stop_callback__enter", "ctx %p spu %p", spu_context_event }, 191 { "spufs_stop_callback__enter", "ctx %p spu %p", spu_context_event },