diff options
author | Arnd Bergmann <arnd@arndb.de> | 2008-11-28 04:51:22 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-11-30 17:40:18 -0500 |
commit | d015fe9951641b2d869a7ae4a690be2a05a9dc7f (patch) | |
tree | 7b3016f485758111e124c77da23a282a1b71757d /arch/powerpc/platforms | |
parent | 4a6186696e7f15b3ea4dafcdb64ee0703e0e4487 (diff) |
powerpc/cell/axon-msi: Retry on missing interrupt
The MSI capture logic for the axon bridge can sometimes
lose interrupts in case of high DMA and interrupt load,
when it signals an MSI interrupt to the MPIC interrupt
controller while we are already handling another MSI.
Each MSI vector gets written into a FIFO buffer in main
memory using DMA, and that DMA access is normally flushed
by the actual interrupt packet on the IOIF. An MMIO
register in the MSIC holds the position of the last
entry in the FIFO buffer that was written. However,
reading that position does not flush the DMA, so that
we can observe stale data in the buffer.
In a stress test, we have observed the DMA to arrive
up to 14 microseconds after reading the register.
This patch works around this problem by retrying the
access to the FIFO buffer.
We can reliably detect the conditioning by writing
an invalid MSI vector into the FIFO buffer after
reading from it, assuming that all MSIs we get
are valid. After detecting an invalid MSI vector,
we udelay(1) in the interrupt cascade for up to
100 times before giving up.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/cell/axon_msi.c | 36 |
1 files changed, 31 insertions, 5 deletions
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 896548ba1ca1..442cf36aa172 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -95,6 +95,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
95 | struct axon_msic *msic = get_irq_data(irq); | 95 | struct axon_msic *msic = get_irq_data(irq); |
96 | u32 write_offset, msi; | 96 | u32 write_offset, msi; |
97 | int idx; | 97 | int idx; |
98 | int retry = 0; | ||
98 | 99 | ||
99 | write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); | 100 | write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); |
100 | pr_debug("axon_msi: original write_offset 0x%x\n", write_offset); | 101 | pr_debug("axon_msi: original write_offset 0x%x\n", write_offset); |
@@ -102,7 +103,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
102 | /* write_offset doesn't wrap properly, so we have to mask it */ | 103 | /* write_offset doesn't wrap properly, so we have to mask it */ |
103 | write_offset &= MSIC_FIFO_SIZE_MASK; | 104 | write_offset &= MSIC_FIFO_SIZE_MASK; |
104 | 105 | ||
105 | while (msic->read_offset != write_offset) { | 106 | while (msic->read_offset != write_offset && retry < 100) { |
106 | idx = msic->read_offset / sizeof(__le32); | 107 | idx = msic->read_offset / sizeof(__le32); |
107 | msi = le32_to_cpu(msic->fifo_virt[idx]); | 108 | msi = le32_to_cpu(msic->fifo_virt[idx]); |
108 | msi &= 0xFFFF; | 109 | msi &= 0xFFFF; |
@@ -110,13 +111,37 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
110 | pr_debug("axon_msi: woff %x roff %x msi %x\n", | 111 | pr_debug("axon_msi: woff %x roff %x msi %x\n", |
111 | write_offset, msic->read_offset, msi); | 112 | write_offset, msic->read_offset, msi); |
112 | 113 | ||
114 | if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { | ||
115 | generic_handle_irq(msi); | ||
116 | msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); | ||
117 | } else { | ||
118 | /* | ||
119 | * Reading the MSIC_WRITE_OFFSET_REG does not | ||
120 | * reliably flush the outstanding DMA to the | ||
121 | * FIFO buffer. Here we were reading stale | ||
122 | * data, so we need to retry. | ||
123 | */ | ||
124 | udelay(1); | ||
125 | retry++; | ||
126 | pr_debug("axon_msi: invalid irq 0x%x!\n", msi); | ||
127 | continue; | ||
128 | } | ||
129 | |||
130 | if (retry) { | ||
131 | pr_debug("axon_msi: late irq 0x%x, retry %d\n", | ||
132 | msi, retry); | ||
133 | retry = 0; | ||
134 | } | ||
135 | |||
113 | msic->read_offset += MSIC_FIFO_ENTRY_SIZE; | 136 | msic->read_offset += MSIC_FIFO_ENTRY_SIZE; |
114 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; | 137 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; |
138 | } | ||
115 | 139 | ||
116 | if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) | 140 | if (retry) { |
117 | generic_handle_irq(msi); | 141 | printk(KERN_WARNING "axon_msi: irq timed out\n"); |
118 | else | 142 | |
119 | pr_debug("axon_msi: invalid irq 0x%x!\n", msi); | 143 | msic->read_offset += MSIC_FIFO_ENTRY_SIZE; |
144 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; | ||
120 | } | 145 | } |
121 | 146 | ||
122 | desc->chip->eoi(irq); | 147 | desc->chip->eoi(irq); |
@@ -364,6 +389,7 @@ static int axon_msi_probe(struct of_device *device, | |||
364 | dn->full_name); | 389 | dn->full_name); |
365 | goto out_free_fifo; | 390 | goto out_free_fifo; |
366 | } | 391 | } |
392 | memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); | ||
367 | 393 | ||
368 | msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, | 394 | msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, |
369 | NR_IRQS, &msic_host_ops, 0); | 395 | NR_IRQS, &msic_host_ops, 0); |