diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2009-03-20 09:26:41 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-21 22:06:55 -0400 |
commit | a9de9a74c69f75e9456cd6b45ecab44ff4c81d04 (patch) | |
tree | 86c231f2a8f831f4c3dc55bdf42a63a08c0df2cf | |
parent | bb145a9e28c32a37f35308bb32180b59e358a3a1 (diff) |
sfc: Work around unreliable legacy interrupt status
In rare cases, reading the legacy interrupt status register can
acknowledge an event queue whose attention flag has not yet been set
in the register. Until we service this event queue it will not
generate any more interrupts. Therefore, as a secondary check, poll
the next slot in each active event queue whose flag is not set.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/sfc/falcon.c | 26 |
1 files changed, 14 insertions, 12 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 2ae51fd6f9c1..92ea6147b3f2 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -1435,6 +1435,7 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | |||
1435 | { | 1435 | { |
1436 | struct efx_nic *efx = dev_id; | 1436 | struct efx_nic *efx = dev_id; |
1437 | efx_oword_t *int_ker = efx->irq_status.addr; | 1437 | efx_oword_t *int_ker = efx->irq_status.addr; |
1438 | irqreturn_t result = IRQ_NONE; | ||
1438 | struct efx_channel *channel; | 1439 | struct efx_channel *channel; |
1439 | efx_dword_t reg; | 1440 | efx_dword_t reg; |
1440 | u32 queues; | 1441 | u32 queues; |
@@ -1449,23 +1450,24 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | |||
1449 | if (unlikely(syserr)) | 1450 | if (unlikely(syserr)) |
1450 | return falcon_fatal_interrupt(efx); | 1451 | return falcon_fatal_interrupt(efx); |
1451 | 1452 | ||
1452 | if (queues == 0) | ||
1453 | return IRQ_NONE; | ||
1454 | |||
1455 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1456 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1457 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1458 | |||
1459 | /* Schedule processing of any interrupting queues */ | 1453 | /* Schedule processing of any interrupting queues */ |
1460 | channel = &efx->channel[0]; | 1454 | efx_for_each_channel(channel, efx) { |
1461 | while (queues) { | 1455 | if ((queues & 1) || |
1462 | if (queues & 0x01) | 1456 | falcon_event_present( |
1457 | falcon_event(channel, channel->eventq_read_ptr))) { | ||
1463 | efx_schedule_channel(channel); | 1458 | efx_schedule_channel(channel); |
1464 | channel++; | 1459 | result = IRQ_HANDLED; |
1460 | } | ||
1465 | queues >>= 1; | 1461 | queues >>= 1; |
1466 | } | 1462 | } |
1467 | 1463 | ||
1468 | return IRQ_HANDLED; | 1464 | if (result == IRQ_HANDLED) { |
1465 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1466 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
1467 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
1468 | } | ||
1469 | |||
1470 | return result; | ||
1469 | } | 1471 | } |
1470 | 1472 | ||
1471 | 1473 | ||