aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-25 07:51:46 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-25 07:51:46 -0500
commit0b271ef4521756010675b1611bef20fd3096790d (patch)
tree2c9d22a2c74122a9904e533df27f41d63ffef394 /arch/powerpc/platforms
parentb19b3c74c7bbec45a848631b8f970ac110665a01 (diff)
parent4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff)
Merge commit 'v2.6.28' into core/core
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c39
-rw-r--r--arch/powerpc/platforms/cell/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c3
3 files changed, 44 insertions, 7 deletions
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 896548ba1ca1..0ce45c2b42f8 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -95,6 +95,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
95 struct axon_msic *msic = get_irq_data(irq); 95 struct axon_msic *msic = get_irq_data(irq);
96 u32 write_offset, msi; 96 u32 write_offset, msi;
97 int idx; 97 int idx;
98 int retry = 0;
98 99
99 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); 100 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
100 pr_debug("axon_msi: original write_offset 0x%x\n", write_offset); 101 pr_debug("axon_msi: original write_offset 0x%x\n", write_offset);
@@ -102,7 +103,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
102 /* write_offset doesn't wrap properly, so we have to mask it */ 103 /* write_offset doesn't wrap properly, so we have to mask it */
103 write_offset &= MSIC_FIFO_SIZE_MASK; 104 write_offset &= MSIC_FIFO_SIZE_MASK;
104 105
105 while (msic->read_offset != write_offset) { 106 while (msic->read_offset != write_offset && retry < 100) {
106 idx = msic->read_offset / sizeof(__le32); 107 idx = msic->read_offset / sizeof(__le32);
107 msi = le32_to_cpu(msic->fifo_virt[idx]); 108 msi = le32_to_cpu(msic->fifo_virt[idx]);
108 msi &= 0xFFFF; 109 msi &= 0xFFFF;
@@ -110,13 +111,37 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
110 pr_debug("axon_msi: woff %x roff %x msi %x\n", 111 pr_debug("axon_msi: woff %x roff %x msi %x\n",
111 write_offset, msic->read_offset, msi); 112 write_offset, msic->read_offset, msi);
112 113
114 if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
115 generic_handle_irq(msi);
116 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
117 } else {
118 /*
119 * Reading the MSIC_WRITE_OFFSET_REG does not
120 * reliably flush the outstanding DMA to the
121 * FIFO buffer. Here we were reading stale
122 * data, so we need to retry.
123 */
124 udelay(1);
125 retry++;
126 pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
127 continue;
128 }
129
130 if (retry) {
131 pr_debug("axon_msi: late irq 0x%x, retry %d\n",
132 msi, retry);
133 retry = 0;
134 }
135
113 msic->read_offset += MSIC_FIFO_ENTRY_SIZE; 136 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
114 msic->read_offset &= MSIC_FIFO_SIZE_MASK; 137 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
138 }
115 139
116 if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) 140 if (retry) {
117 generic_handle_irq(msi); 141 printk(KERN_WARNING "axon_msi: irq timed out\n");
118 else 142
119 pr_debug("axon_msi: invalid irq 0x%x!\n", msi); 143 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
144 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
120 } 145 }
121 146
122 desc->chip->eoi(irq); 147 desc->chip->eoi(irq);
@@ -364,6 +389,7 @@ static int axon_msi_probe(struct of_device *device,
364 dn->full_name); 389 dn->full_name);
365 goto out_free_fifo; 390 goto out_free_fifo;
366 } 391 }
392 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
367 393
368 msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, 394 msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
369 NR_IRQS, &msic_host_ops, 0); 395 NR_IRQS, &msic_host_ops, 0);
@@ -387,6 +413,9 @@ static int axon_msi_probe(struct of_device *device,
387 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | 413 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
388 MSIC_CTRL_FIFO_SIZE); 414 MSIC_CTRL_FIFO_SIZE);
389 415
416 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
417 & MSIC_FIFO_SIZE_MASK;
418
390 device->dev.platform_data = msic; 419 device->dev.platform_data = msic;
391 420
392 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs; 421 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index c0d86e1f56ea..9046803c8276 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -129,10 +129,15 @@ static int __init smp_iic_probe(void)
129 return cpus_weight(cpu_possible_map); 129 return cpus_weight(cpu_possible_map);
130} 130}
131 131
132static void __devinit smp_iic_setup_cpu(int cpu) 132static void __devinit smp_cell_setup_cpu(int cpu)
133{ 133{
134 if (cpu != boot_cpuid) 134 if (cpu != boot_cpuid)
135 iic_setup_cpu(); 135 iic_setup_cpu();
136
137 /*
138 * change default DABRX to allow user watchpoints
139 */
140 mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
136} 141}
137 142
138static DEFINE_SPINLOCK(timebase_lock); 143static DEFINE_SPINLOCK(timebase_lock);
@@ -192,7 +197,7 @@ static struct smp_ops_t bpa_iic_smp_ops = {
192 .message_pass = smp_iic_message_pass, 197 .message_pass = smp_iic_message_pass,
193 .probe = smp_iic_probe, 198 .probe = smp_iic_probe,
194 .kick_cpu = smp_cell_kick_cpu, 199 .kick_cpu = smp_cell_kick_cpu,
195 .setup_cpu = smp_iic_setup_cpu, 200 .setup_cpu = smp_cell_setup_cpu,
196 .cpu_bootable = smp_cell_cpu_bootable, 201 .cpu_bootable = smp_cell_cpu_bootable,
197}; 202};
198 203
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index b73c369cc6f1..1b26071a86ca 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -390,6 +390,9 @@ static int spufs_ps_fault(struct vm_area_struct *vma,
390 if (offset >= ps_size) 390 if (offset >= ps_size)
391 return VM_FAULT_SIGBUS; 391 return VM_FAULT_SIGBUS;
392 392
393 if (fatal_signal_pending(current))
394 return VM_FAULT_SIGBUS;
395
393 /* 396 /*
394 * Because we release the mmap_sem, the context may be destroyed while 397 * Because we release the mmap_sem, the context may be destroyed while
395 * we're in spu_wait. Grab an extra reference so it isn't destroyed 398 * we're in spu_wait. Grab an extra reference so it isn't destroyed