diff options
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/sfc/efx.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/falcon.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/net_driver.h | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/nic.c | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/selftest.c | 12 |
5 files changed, 36 insertions, 28 deletions
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index a3541ac6ea01..e0b66d158d79 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h | |||
@@ -145,6 +145,12 @@ static inline void efx_schedule_channel(struct efx_channel *channel) | |||
145 | napi_schedule(&channel->napi_str); | 145 | napi_schedule(&channel->napi_str); |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline void efx_schedule_channel_irq(struct efx_channel *channel) | ||
149 | { | ||
150 | channel->last_irq_cpu = raw_smp_processor_id(); | ||
151 | efx_schedule_channel(channel); | ||
152 | } | ||
153 | |||
148 | extern void efx_link_status_changed(struct efx_nic *efx); | 154 | extern void efx_link_status_changed(struct efx_nic *efx); |
149 | extern void efx_link_set_advertising(struct efx_nic *efx, u32); | 155 | extern void efx_link_set_advertising(struct efx_nic *efx, u32); |
150 | extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); | 156 | extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); |
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index b4e91edec0fa..98285115df10 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c | |||
@@ -189,9 +189,9 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
189 | falcon_irq_ack_a1(efx); | 189 | falcon_irq_ack_a1(efx); |
190 | 190 | ||
191 | if (queues & 1) | 191 | if (queues & 1) |
192 | efx_schedule_channel(efx_get_channel(efx, 0)); | 192 | efx_schedule_channel_irq(efx_get_channel(efx, 0)); |
193 | if (queues & 2) | 193 | if (queues & 2) |
194 | efx_schedule_channel(efx_get_channel(efx, 1)); | 194 | efx_schedule_channel_irq(efx_get_channel(efx, 1)); |
195 | return IRQ_HANDLED; | 195 | return IRQ_HANDLED; |
196 | } | 196 | } |
197 | /************************************************************************** | 197 | /************************************************************************** |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 8ce4d068bba5..a4cf8cb8180c 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
@@ -325,6 +325,7 @@ enum efx_rx_alloc_method { | |||
325 | * @eventq_mask: Event queue pointer mask | 325 | * @eventq_mask: Event queue pointer mask |
326 | * @eventq_read_ptr: Event queue read pointer | 326 | * @eventq_read_ptr: Event queue read pointer |
327 | * @last_eventq_read_ptr: Last event queue read pointer value. | 327 | * @last_eventq_read_ptr: Last event queue read pointer value. |
328 | * @last_irq_cpu: Last CPU to handle interrupt for this channel | ||
328 | * @irq_count: Number of IRQs since last adaptive moderation decision | 329 | * @irq_count: Number of IRQs since last adaptive moderation decision |
329 | * @irq_mod_score: IRQ moderation score | 330 | * @irq_mod_score: IRQ moderation score |
330 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | 331 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors |
@@ -355,6 +356,7 @@ struct efx_channel { | |||
355 | unsigned int eventq_read_ptr; | 356 | unsigned int eventq_read_ptr; |
356 | unsigned int last_eventq_read_ptr; | 357 | unsigned int last_eventq_read_ptr; |
357 | 358 | ||
359 | int last_irq_cpu; | ||
358 | unsigned int irq_count; | 360 | unsigned int irq_count; |
359 | unsigned int irq_mod_score; | 361 | unsigned int irq_mod_score; |
360 | #ifdef CONFIG_RFS_ACCEL | 362 | #ifdef CONFIG_RFS_ACCEL |
@@ -648,7 +650,7 @@ struct efx_filter_state; | |||
648 | * @int_error_expire: Time at which error count will be expired | 650 | * @int_error_expire: Time at which error count will be expired |
649 | * @irq_status: Interrupt status buffer | 651 | * @irq_status: Interrupt status buffer |
650 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 | 652 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 |
651 | * @fatal_irq_level: IRQ level (bit number) used for serious errors | 653 | * @irq_level: IRQ level/index for IRQs not triggered by an event queue |
652 | * @mtd_list: List of MTDs attached to the NIC | 654 | * @mtd_list: List of MTDs attached to the NIC |
653 | * @nic_data: Hardware dependent state | 655 | * @nic_data: Hardware dependent state |
654 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, | 656 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, |
@@ -679,10 +681,9 @@ struct efx_filter_state; | |||
679 | * @loopback_selftest: Offline self-test private state | 681 | * @loopback_selftest: Offline self-test private state |
680 | * @monitor_work: Hardware monitor workitem | 682 | * @monitor_work: Hardware monitor workitem |
681 | * @biu_lock: BIU (bus interface unit) lock | 683 | * @biu_lock: BIU (bus interface unit) lock |
682 | * @last_irq_cpu: Last CPU to handle interrupt. | 684 | * @last_irq_cpu: Last CPU to handle a possible test interrupt. This |
683 | * This register is written with the SMP processor ID whenever an | 685 | * field is used by efx_test_interrupts() to verify that an |
684 | * interrupt is handled. It is used by efx_nic_test_interrupt() | 686 | * interrupt has occurred. |
685 | * to verify that an interrupt has occurred. | ||
686 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | 687 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count |
687 | * @mac_stats: MAC statistics. These include all statistics the MACs | 688 | * @mac_stats: MAC statistics. These include all statistics the MACs |
688 | * can provide. Generic code converts these into a standard | 689 | * can provide. Generic code converts these into a standard |
@@ -735,7 +736,7 @@ struct efx_nic { | |||
735 | 736 | ||
736 | struct efx_buffer irq_status; | 737 | struct efx_buffer irq_status; |
737 | unsigned irq_zero_count; | 738 | unsigned irq_zero_count; |
738 | unsigned fatal_irq_level; | 739 | unsigned irq_level; |
739 | 740 | ||
740 | #ifdef CONFIG_SFC_MTD | 741 | #ifdef CONFIG_SFC_MTD |
741 | struct list_head mtd_list; | 742 | struct list_head mtd_list; |
@@ -779,7 +780,7 @@ struct efx_nic { | |||
779 | 780 | ||
780 | struct delayed_work monitor_work ____cacheline_aligned_in_smp; | 781 | struct delayed_work monitor_work ____cacheline_aligned_in_smp; |
781 | spinlock_t biu_lock; | 782 | spinlock_t biu_lock; |
782 | volatile signed int last_irq_cpu; | 783 | int last_irq_cpu; |
783 | unsigned n_rx_nodesc_drop_cnt; | 784 | unsigned n_rx_nodesc_drop_cnt; |
784 | struct efx_mac_stats mac_stats; | 785 | struct efx_mac_stats mac_stats; |
785 | spinlock_t stats_lock; | 786 | spinlock_t stats_lock; |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index bf07bd0488cf..de7aa1c8ebda 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -1311,7 +1311,7 @@ static inline void efx_nic_interrupts(struct efx_nic *efx, | |||
1311 | efx_oword_t int_en_reg_ker; | 1311 | efx_oword_t int_en_reg_ker; |
1312 | 1312 | ||
1313 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | 1313 | EFX_POPULATE_OWORD_3(int_en_reg_ker, |
1314 | FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, | 1314 | FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, |
1315 | FRF_AZ_KER_INT_KER, force, | 1315 | FRF_AZ_KER_INT_KER, force, |
1316 | FRF_AZ_DRV_INT_EN_KER, enabled); | 1316 | FRF_AZ_DRV_INT_EN_KER, enabled); |
1317 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | 1317 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); |
@@ -1427,11 +1427,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |||
1427 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | 1427 | efx_readd(efx, ®, FR_BZ_INT_ISR0); |
1428 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | 1428 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); |
1429 | 1429 | ||
1430 | /* Check to see if we have a serious error condition */ | 1430 | /* Handle non-event-queue sources */ |
1431 | if (queues & (1U << efx->fatal_irq_level)) { | 1431 | if (queues & (1U << efx->irq_level)) { |
1432 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | 1432 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1433 | if (unlikely(syserr)) | 1433 | if (unlikely(syserr)) |
1434 | return efx_nic_fatal_interrupt(efx); | 1434 | return efx_nic_fatal_interrupt(efx); |
1435 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1435 | } | 1436 | } |
1436 | 1437 | ||
1437 | if (queues != 0) { | 1438 | if (queues != 0) { |
@@ -1441,7 +1442,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |||
1441 | /* Schedule processing of any interrupting queues */ | 1442 | /* Schedule processing of any interrupting queues */ |
1442 | efx_for_each_channel(channel, efx) { | 1443 | efx_for_each_channel(channel, efx) { |
1443 | if (queues & 1) | 1444 | if (queues & 1) |
1444 | efx_schedule_channel(channel); | 1445 | efx_schedule_channel_irq(channel); |
1445 | queues >>= 1; | 1446 | queues >>= 1; |
1446 | } | 1447 | } |
1447 | result = IRQ_HANDLED; | 1448 | result = IRQ_HANDLED; |
@@ -1458,18 +1459,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | |||
1458 | efx_for_each_channel(channel, efx) { | 1459 | efx_for_each_channel(channel, efx) { |
1459 | event = efx_event(channel, channel->eventq_read_ptr); | 1460 | event = efx_event(channel, channel->eventq_read_ptr); |
1460 | if (efx_event_present(event)) | 1461 | if (efx_event_present(event)) |
1461 | efx_schedule_channel(channel); | 1462 | efx_schedule_channel_irq(channel); |
1462 | else | 1463 | else |
1463 | efx_nic_eventq_read_ack(channel); | 1464 | efx_nic_eventq_read_ack(channel); |
1464 | } | 1465 | } |
1465 | } | 1466 | } |
1466 | 1467 | ||
1467 | if (result == IRQ_HANDLED) { | 1468 | if (result == IRQ_HANDLED) |
1468 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1469 | netif_vdbg(efx, intr, efx->net_dev, | 1469 | netif_vdbg(efx, intr, efx->net_dev, |
1470 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | 1470 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
1471 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | 1471 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); |
1472 | } | ||
1473 | 1472 | ||
1474 | return result; | 1473 | return result; |
1475 | } | 1474 | } |
@@ -1488,20 +1487,20 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | |||
1488 | efx_oword_t *int_ker = efx->irq_status.addr; | 1487 | efx_oword_t *int_ker = efx->irq_status.addr; |
1489 | int syserr; | 1488 | int syserr; |
1490 | 1489 | ||
1491 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1492 | netif_vdbg(efx, intr, efx->net_dev, | 1490 | netif_vdbg(efx, intr, efx->net_dev, |
1493 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1491 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1494 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1492 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1495 | 1493 | ||
1496 | /* Check to see if we have a serious error condition */ | 1494 | /* Handle non-event-queue sources */ |
1497 | if (channel->channel == efx->fatal_irq_level) { | 1495 | if (channel->channel == efx->irq_level) { |
1498 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | 1496 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1499 | if (unlikely(syserr)) | 1497 | if (unlikely(syserr)) |
1500 | return efx_nic_fatal_interrupt(efx); | 1498 | return efx_nic_fatal_interrupt(efx); |
1499 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
1501 | } | 1500 | } |
1502 | 1501 | ||
1503 | /* Schedule processing of the channel */ | 1502 | /* Schedule processing of the channel */ |
1504 | efx_schedule_channel(channel); | 1503 | efx_schedule_channel_irq(channel); |
1505 | 1504 | ||
1506 | return IRQ_HANDLED; | 1505 | return IRQ_HANDLED; |
1507 | } | 1506 | } |
@@ -1640,10 +1639,10 @@ void efx_nic_init_common(struct efx_nic *efx) | |||
1640 | 1639 | ||
1641 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | 1640 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) |
1642 | /* Use an interrupt level unused by event queues */ | 1641 | /* Use an interrupt level unused by event queues */ |
1643 | efx->fatal_irq_level = 0x1f; | 1642 | efx->irq_level = 0x1f; |
1644 | else | 1643 | else |
1645 | /* Use a valid MSI-X vector */ | 1644 | /* Use a valid MSI-X vector */ |
1646 | efx->fatal_irq_level = 0; | 1645 | efx->irq_level = 0; |
1647 | 1646 | ||
1648 | /* Enable all the genuinely fatal interrupts. (They are still | 1647 | /* Enable all the genuinely fatal interrupts. (They are still |
1649 | * masked by the overall interrupt mask, controlled by | 1648 | * masked by the overall interrupt mask, controlled by |
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 0f8478924475..7def480570c3 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c | |||
@@ -130,6 +130,8 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | |||
130 | static int efx_test_interrupts(struct efx_nic *efx, | 130 | static int efx_test_interrupts(struct efx_nic *efx, |
131 | struct efx_self_tests *tests) | 131 | struct efx_self_tests *tests) |
132 | { | 132 | { |
133 | int cpu; | ||
134 | |||
133 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); | 135 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); |
134 | tests->interrupt = -1; | 136 | tests->interrupt = -1; |
135 | 137 | ||
@@ -142,7 +144,8 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
142 | /* Wait for arrival of test interrupt. */ | 144 | /* Wait for arrival of test interrupt. */ |
143 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); | 145 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); |
144 | schedule_timeout_uninterruptible(HZ / 10); | 146 | schedule_timeout_uninterruptible(HZ / 10); |
145 | if (efx->last_irq_cpu >= 0) | 147 | cpu = ACCESS_ONCE(efx->last_irq_cpu); |
148 | if (cpu >= 0) | ||
146 | goto success; | 149 | goto success; |
147 | 150 | ||
148 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); | 151 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); |
@@ -150,8 +153,7 @@ static int efx_test_interrupts(struct efx_nic *efx, | |||
150 | 153 | ||
151 | success: | 154 | success: |
152 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", | 155 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", |
153 | INT_MODE(efx), | 156 | INT_MODE(efx), cpu); |
154 | efx->last_irq_cpu); | ||
155 | tests->interrupt = 1; | 157 | tests->interrupt = 1; |
156 | return 0; | 158 | return 0; |
157 | } | 159 | } |
@@ -165,7 +167,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
165 | bool napi_ran, dma_seen, int_seen; | 167 | bool napi_ran, dma_seen, int_seen; |
166 | 168 | ||
167 | read_ptr = channel->eventq_read_ptr; | 169 | read_ptr = channel->eventq_read_ptr; |
168 | channel->efx->last_irq_cpu = -1; | 170 | channel->last_irq_cpu = -1; |
169 | smp_wmb(); | 171 | smp_wmb(); |
170 | 172 | ||
171 | efx_nic_generate_test_event(channel); | 173 | efx_nic_generate_test_event(channel); |
@@ -182,7 +184,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, | |||
182 | } else { | 184 | } else { |
183 | napi_ran = false; | 185 | napi_ran = false; |
184 | dma_seen = efx_nic_event_present(channel); | 186 | dma_seen = efx_nic_event_present(channel); |
185 | int_seen = efx->last_irq_cpu >= 0; | 187 | int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0; |
186 | } | 188 | } |
187 | napi_enable(&channel->napi_str); | 189 | napi_enable(&channel->napi_str); |
188 | efx_nic_eventq_read_ack(channel); | 190 | efx_nic_eventq_read_ack(channel); |