diff options
| -rw-r--r-- | arch/powerpc/platforms/cell/axon_msi.c | 36 |
1 files changed, 31 insertions, 5 deletions
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 896548ba1ca1..442cf36aa172 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
| @@ -95,6 +95,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
| 95 | struct axon_msic *msic = get_irq_data(irq); | 95 | struct axon_msic *msic = get_irq_data(irq); |
| 96 | u32 write_offset, msi; | 96 | u32 write_offset, msi; |
| 97 | int idx; | 97 | int idx; |
| 98 | int retry = 0; | ||
| 98 | 99 | ||
| 99 | write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); | 100 | write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); |
| 100 | pr_debug("axon_msi: original write_offset 0x%x\n", write_offset); | 101 | pr_debug("axon_msi: original write_offset 0x%x\n", write_offset); |
| @@ -102,7 +103,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
| 102 | /* write_offset doesn't wrap properly, so we have to mask it */ | 103 | /* write_offset doesn't wrap properly, so we have to mask it */ |
| 103 | write_offset &= MSIC_FIFO_SIZE_MASK; | 104 | write_offset &= MSIC_FIFO_SIZE_MASK; |
| 104 | 105 | ||
| 105 | while (msic->read_offset != write_offset) { | 106 | while (msic->read_offset != write_offset && retry < 100) { |
| 106 | idx = msic->read_offset / sizeof(__le32); | 107 | idx = msic->read_offset / sizeof(__le32); |
| 107 | msi = le32_to_cpu(msic->fifo_virt[idx]); | 108 | msi = le32_to_cpu(msic->fifo_virt[idx]); |
| 108 | msi &= 0xFFFF; | 109 | msi &= 0xFFFF; |
| @@ -110,13 +111,37 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
| 110 | pr_debug("axon_msi: woff %x roff %x msi %x\n", | 111 | pr_debug("axon_msi: woff %x roff %x msi %x\n", |
| 111 | write_offset, msic->read_offset, msi); | 112 | write_offset, msic->read_offset, msi); |
| 112 | 113 | ||
| 114 | if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { | ||
| 115 | generic_handle_irq(msi); | ||
| 116 | msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); | ||
| 117 | } else { | ||
| 118 | /* | ||
| 119 | * Reading the MSIC_WRITE_OFFSET_REG does not | ||
| 120 | * reliably flush the outstanding DMA to the | ||
| 121 | * FIFO buffer. Here we were reading stale | ||
| 122 | * data, so we need to retry. | ||
| 123 | */ | ||
| 124 | udelay(1); | ||
| 125 | retry++; | ||
| 126 | pr_debug("axon_msi: invalid irq 0x%x!\n", msi); | ||
| 127 | continue; | ||
| 128 | } | ||
| 129 | |||
| 130 | if (retry) { | ||
| 131 | pr_debug("axon_msi: late irq 0x%x, retry %d\n", | ||
| 132 | msi, retry); | ||
| 133 | retry = 0; | ||
| 134 | } | ||
| 135 | |||
| 113 | msic->read_offset += MSIC_FIFO_ENTRY_SIZE; | 136 | msic->read_offset += MSIC_FIFO_ENTRY_SIZE; |
| 114 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; | 137 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; |
| 138 | } | ||
| 115 | 139 | ||
| 116 | if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) | 140 | if (retry) { |
| 117 | generic_handle_irq(msi); | 141 | printk(KERN_WARNING "axon_msi: irq timed out\n"); |
| 118 | else | 142 | |
| 119 | pr_debug("axon_msi: invalid irq 0x%x!\n", msi); | 143 | msic->read_offset += MSIC_FIFO_ENTRY_SIZE; |
| 144 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; | ||
| 120 | } | 145 | } |
| 121 | 146 | ||
| 122 | desc->chip->eoi(irq); | 147 | desc->chip->eoi(irq); |
| @@ -364,6 +389,7 @@ static int axon_msi_probe(struct of_device *device, | |||
| 364 | dn->full_name); | 389 | dn->full_name); |
| 365 | goto out_free_fifo; | 390 | goto out_free_fifo; |
| 366 | } | 391 | } |
| 392 | memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); | ||
| 367 | 393 | ||
| 368 | msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, | 394 | msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, |
| 369 | NR_IRQS, &msic_host_ops, 0); | 395 | NR_IRQS, &msic_host_ops, 0); |
