diff options
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r-- | drivers/net/sfc/Makefile | 4 | ||||
-rw-r--r-- | drivers/net/sfc/bitfield.h | 7 | ||||
-rw-r--r-- | drivers/net/sfc/boards.c | 9 | ||||
-rw-r--r-- | drivers/net/sfc/boards.h | 2 | ||||
-rw-r--r-- | drivers/net/sfc/efx.c | 88 | ||||
-rw-r--r-- | drivers/net/sfc/enum.h | 49 | ||||
-rw-r--r-- | drivers/net/sfc/ethtool.c | 259 | ||||
-rw-r--r-- | drivers/net/sfc/falcon.c | 97 | ||||
-rw-r--r-- | drivers/net/sfc/falcon.h | 5 | ||||
-rw-r--r-- | drivers/net/sfc/falcon_hwdefs.h | 20 | ||||
-rw-r--r-- | drivers/net/sfc/falcon_io.h | 29 | ||||
-rw-r--r-- | drivers/net/sfc/falcon_xmac.c | 92 | ||||
-rw-r--r-- | drivers/net/sfc/mdio_10g.c | 78 | ||||
-rw-r--r-- | drivers/net/sfc/mdio_10g.h | 24 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 72 | ||||
-rw-r--r-- | drivers/net/sfc/rx.c | 59 | ||||
-rw-r--r-- | drivers/net/sfc/selftest.c | 719 | ||||
-rw-r--r-- | drivers/net/sfc/selftest.h | 50 | ||||
-rw-r--r-- | drivers/net/sfc/sfe4001.c | 28 | ||||
-rw-r--r-- | drivers/net/sfc/tenxpress.c | 93 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 669 | ||||
-rw-r--r-- | drivers/net/sfc/workarounds.h | 2 | ||||
-rw-r--r-- | drivers/net/sfc/xfp_phy.c | 38 |
23 files changed, 2300 insertions, 193 deletions
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile index 0f023447eafd..1d2daeec7ac1 100644 --- a/drivers/net/sfc/Makefile +++ b/drivers/net/sfc/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ | 1 | sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ |
2 | i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ | 2 | i2c-direct.o selftest.o ethtool.o xfp_phy.o \ |
3 | tenxpress.o boards.o sfe4001.o | 3 | mdio_10g.o tenxpress.o boards.o sfe4001.o |
4 | 4 | ||
5 | obj-$(CONFIG_SFC) += sfc.o | 5 | obj-$(CONFIG_SFC) += sfc.o |
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index 2806201644cc..2c79d27404e0 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h | |||
@@ -483,7 +483,7 @@ typedef union efx_oword { | |||
483 | #endif | 483 | #endif |
484 | 484 | ||
485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ | 485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ |
486 | if (FALCON_REV(efx) >= FALCON_REV_B0) { \ | 486 | if (falcon_rev(efx) >= FALCON_REV_B0) { \ |
487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ | 487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ |
488 | } else { \ | 488 | } else { \ |
489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ | 489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ |
@@ -491,7 +491,7 @@ typedef union efx_oword { | |||
491 | } while (0) | 491 | } while (0) |
492 | 492 | ||
493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ | 493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ |
494 | (FALCON_REV(efx) >= FALCON_REV_B0 ? \ | 494 | (falcon_rev(efx) >= FALCON_REV_B0 ? \ |
495 | EFX_QWORD_FIELD((qword), field##_B0) : \ | 495 | EFX_QWORD_FIELD((qword), field##_B0) : \ |
496 | EFX_QWORD_FIELD((qword), field##_A1)) | 496 | EFX_QWORD_FIELD((qword), field##_A1)) |
497 | 497 | ||
@@ -501,8 +501,5 @@ typedef union efx_oword { | |||
501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) | 501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) |
502 | #define EFX_DMA_TYPE_WIDTH(width) \ | 502 | #define EFX_DMA_TYPE_WIDTH(width) \ |
503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) | 503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) |
504 | #define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ | ||
505 | ~((u64) 0) : ~((u32) 0)) | ||
506 | #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) | ||
507 | 504 | ||
508 | #endif /* EFX_BITFIELD_H */ | 505 | #endif /* EFX_BITFIELD_H */ |
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c index eecaa6d58584..7fc0328dc055 100644 --- a/drivers/net/sfc/boards.c +++ b/drivers/net/sfc/boards.c | |||
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context) | |||
27 | struct efx_blinker *bl = &efx->board_info.blinker; | 27 | struct efx_blinker *bl = &efx->board_info.blinker; |
28 | efx->board_info.set_fault_led(efx, bl->state); | 28 | efx->board_info.set_fault_led(efx, bl->state); |
29 | bl->state = !bl->state; | 29 | bl->state = !bl->state; |
30 | if (bl->resubmit) { | 30 | if (bl->resubmit) |
31 | bl->timer.expires = jiffies + BLINK_INTERVAL; | 31 | mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); |
32 | add_timer(&bl->timer); | ||
33 | } | ||
34 | } | 32 | } |
35 | 33 | ||
36 | static void board_blink(struct efx_nic *efx, int blink) | 34 | static void board_blink(struct efx_nic *efx, int blink) |
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink) | |||
44 | blinker->state = 0; | 42 | blinker->state = 0; |
45 | setup_timer(&blinker->timer, blink_led_timer, | 43 | setup_timer(&blinker->timer, blink_led_timer, |
46 | (unsigned long)efx); | 44 | (unsigned long)efx); |
47 | blinker->timer.expires = jiffies + BLINK_INTERVAL; | 45 | mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); |
48 | add_timer(&blinker->timer); | ||
49 | } else { | 46 | } else { |
50 | blinker->resubmit = 0; | 47 | blinker->resubmit = 0; |
51 | if (blinker->timer.function) | 48 | if (blinker->timer.function) |
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h index f56341d428e1..695764dc2e64 100644 --- a/drivers/net/sfc/boards.h +++ b/drivers/net/sfc/boards.h | |||
@@ -22,5 +22,7 @@ enum efx_board_type { | |||
22 | extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); | 22 | extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); |
23 | extern int sfe4001_poweron(struct efx_nic *efx); | 23 | extern int sfe4001_poweron(struct efx_nic *efx); |
24 | extern void sfe4001_poweroff(struct efx_nic *efx); | 24 | extern void sfe4001_poweroff(struct efx_nic *efx); |
25 | /* Are we putting the PHY into flash config mode */ | ||
26 | extern unsigned int sfe4001_phy_flash_cfg; | ||
25 | 27 | ||
26 | #endif | 28 | #endif |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 59edcf793c19..449760642e31 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) | |||
199 | */ | 199 | */ |
200 | static inline void efx_channel_processed(struct efx_channel *channel) | 200 | static inline void efx_channel_processed(struct efx_channel *channel) |
201 | { | 201 | { |
202 | /* Write to EVQ_RPTR_REG. If a new event arrived in a race | 202 | /* The interrupt handler for this channel may set work_pending |
203 | * with finishing processing, a new interrupt will be raised. | 203 | * as soon as we acknowledge the events we've seen. Make sure |
204 | */ | 204 | * it's cleared before then. */ |
205 | channel->work_pending = 0; | 205 | channel->work_pending = 0; |
206 | smp_wmb(); /* Ensure channel updated before any new interrupt. */ | 206 | smp_wmb(); |
207 | |||
207 | falcon_eventq_read_ack(channel); | 208 | falcon_eventq_read_ack(channel); |
208 | } | 209 | } |
209 | 210 | ||
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
265 | napi_disable(&channel->napi_str); | 266 | napi_disable(&channel->napi_str); |
266 | 267 | ||
267 | /* Poll the channel */ | 268 | /* Poll the channel */ |
268 | (void) efx_process_channel(channel, efx->type->evq_size); | 269 | efx_process_channel(channel, efx->type->evq_size); |
269 | 270 | ||
270 | /* Ack the eventq. This may cause an interrupt to be generated | 271 | /* Ack the eventq. This may cause an interrupt to be generated |
271 | * when they are reenabled */ | 272 | * when they are reenabled */ |
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel) | |||
317 | * | 318 | * |
318 | *************************************************************************/ | 319 | *************************************************************************/ |
319 | 320 | ||
320 | /* Setup per-NIC RX buffer parameters. | ||
321 | * Calculate the rx buffer allocation parameters required to support | ||
322 | * the current MTU, including padding for header alignment and overruns. | ||
323 | */ | ||
324 | static void efx_calc_rx_buffer_params(struct efx_nic *efx) | ||
325 | { | ||
326 | unsigned int order, len; | ||
327 | |||
328 | len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
329 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
330 | efx->type->rx_buffer_padding); | ||
331 | |||
332 | /* Calculate page-order */ | ||
333 | for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) | ||
334 | ; | ||
335 | |||
336 | efx->rx_buffer_len = len; | ||
337 | efx->rx_buffer_order = order; | ||
338 | } | ||
339 | |||
340 | static int efx_probe_channel(struct efx_channel *channel) | 321 | static int efx_probe_channel(struct efx_channel *channel) |
341 | { | 322 | { |
342 | struct efx_tx_queue *tx_queue; | 323 | struct efx_tx_queue *tx_queue; |
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx) | |||
387 | struct efx_channel *channel; | 368 | struct efx_channel *channel; |
388 | int rc = 0; | 369 | int rc = 0; |
389 | 370 | ||
390 | efx_calc_rx_buffer_params(efx); | 371 | /* Calculate the rx buffer allocation parameters required to |
372 | * support the current MTU, including padding for header | ||
373 | * alignment and overruns. | ||
374 | */ | ||
375 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
376 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
377 | efx->type->rx_buffer_padding); | ||
378 | efx->rx_buffer_order = get_order(efx->rx_buffer_len); | ||
391 | 379 | ||
392 | /* Initialise the channels */ | 380 | /* Initialise the channels */ |
393 | efx_for_each_channel(channel, efx) { | 381 | efx_for_each_channel(channel, efx) { |
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel) | |||
440 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 428 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
441 | efx_poll, napi_weight); | 429 | efx_poll, napi_weight); |
442 | 430 | ||
431 | /* The interrupt handler for this channel may set work_pending | ||
432 | * as soon as we enable it. Make sure it's cleared before | ||
433 | * then. Similarly, make sure it sees the enabled flag set. */ | ||
443 | channel->work_pending = 0; | 434 | channel->work_pending = 0; |
444 | channel->enabled = 1; | 435 | channel->enabled = 1; |
445 | smp_wmb(); /* ensure channel updated before first interrupt */ | 436 | smp_wmb(); |
446 | 437 | ||
447 | napi_enable(&channel->napi_str); | 438 | napi_enable(&channel->napi_str); |
448 | 439 | ||
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx) | |||
704 | mutex_unlock(&efx->mac_lock); | 695 | mutex_unlock(&efx->mac_lock); |
705 | 696 | ||
706 | /* Serialise against efx_set_multicast_list() */ | 697 | /* Serialise against efx_set_multicast_list() */ |
707 | if (NET_DEV_REGISTERED(efx)) { | 698 | if (efx_dev_registered(efx)) { |
708 | netif_tx_lock_bh(efx->net_dev); | 699 | netif_tx_lock_bh(efx->net_dev); |
709 | netif_tx_unlock_bh(efx->net_dev); | 700 | netif_tx_unlock_bh(efx->net_dev); |
710 | } | 701 | } |
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx) | |||
791 | efx->membase = ioremap_nocache(efx->membase_phys, | 782 | efx->membase = ioremap_nocache(efx->membase_phys, |
792 | efx->type->mem_map_size); | 783 | efx->type->mem_map_size); |
793 | if (!efx->membase) { | 784 | if (!efx->membase) { |
794 | EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", | 785 | EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", |
795 | efx->type->mem_bar, efx->membase_phys, | 786 | efx->type->mem_bar, |
787 | (unsigned long long)efx->membase_phys, | ||
796 | efx->type->mem_map_size); | 788 | efx->type->mem_map_size); |
797 | rc = -ENOMEM; | 789 | rc = -ENOMEM; |
798 | goto fail4; | 790 | goto fail4; |
799 | } | 791 | } |
800 | EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", | 792 | EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", |
801 | efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, | 793 | efx->type->mem_bar, (unsigned long long)efx->membase_phys, |
802 | efx->membase); | 794 | efx->type->mem_map_size, efx->membase); |
803 | 795 | ||
804 | return 0; | 796 | return 0; |
805 | 797 | ||
806 | fail4: | 798 | fail4: |
807 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); | 799 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); |
808 | fail3: | 800 | fail3: |
809 | efx->membase_phys = 0UL; | 801 | efx->membase_phys = 0; |
810 | fail2: | 802 | fail2: |
811 | pci_disable_device(efx->pci_dev); | 803 | pci_disable_device(efx->pci_dev); |
812 | fail1: | 804 | fail1: |
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx) | |||
824 | 816 | ||
825 | if (efx->membase_phys) { | 817 | if (efx->membase_phys) { |
826 | pci_release_region(efx->pci_dev, efx->type->mem_bar); | 818 | pci_release_region(efx->pci_dev, efx->type->mem_bar); |
827 | efx->membase_phys = 0UL; | 819 | efx->membase_phys = 0; |
828 | } | 820 | } |
829 | 821 | ||
830 | pci_disable_device(efx->pci_dev); | 822 | pci_disable_device(efx->pci_dev); |
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx) | |||
1043 | return; | 1035 | return; |
1044 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | 1036 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) |
1045 | return; | 1037 | return; |
1046 | if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) | 1038 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) |
1047 | return; | 1039 | return; |
1048 | 1040 | ||
1049 | /* Mark the port as enabled so port reconfigurations can start, then | 1041 | /* Mark the port as enabled so port reconfigurations can start, then |
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx) | |||
1073 | cancel_delayed_work_sync(&efx->monitor_work); | 1065 | cancel_delayed_work_sync(&efx->monitor_work); |
1074 | 1066 | ||
1075 | /* Ensure that all RX slow refills are complete. */ | 1067 | /* Ensure that all RX slow refills are complete. */ |
1076 | efx_for_each_rx_queue(rx_queue, efx) { | 1068 | efx_for_each_rx_queue(rx_queue, efx) |
1077 | cancel_delayed_work_sync(&rx_queue->work); | 1069 | cancel_delayed_work_sync(&rx_queue->work); |
1078 | } | ||
1079 | 1070 | ||
1080 | /* Stop scheduled port reconfigurations */ | 1071 | /* Stop scheduled port reconfigurations */ |
1081 | cancel_work_sync(&efx->reconfigure_work); | 1072 | cancel_work_sync(&efx->reconfigure_work); |
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1101 | falcon_disable_interrupts(efx); | 1092 | falcon_disable_interrupts(efx); |
1102 | if (efx->legacy_irq) | 1093 | if (efx->legacy_irq) |
1103 | synchronize_irq(efx->legacy_irq); | 1094 | synchronize_irq(efx->legacy_irq); |
1104 | efx_for_each_channel_with_interrupt(channel, efx) | 1095 | efx_for_each_channel_with_interrupt(channel, efx) { |
1105 | if (channel->irq) | 1096 | if (channel->irq) |
1106 | synchronize_irq(channel->irq); | 1097 | synchronize_irq(channel->irq); |
1098 | } | ||
1107 | 1099 | ||
1108 | /* Stop all NAPI processing and synchronous rx refills */ | 1100 | /* Stop all NAPI processing and synchronous rx refills */ |
1109 | efx_for_each_channel(channel, efx) | 1101 | efx_for_each_channel(channel, efx) |
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1125 | /* Stop the kernel transmit interface late, so the watchdog | 1117 | /* Stop the kernel transmit interface late, so the watchdog |
1126 | * timer isn't ticking over the flush */ | 1118 | * timer isn't ticking over the flush */ |
1127 | efx_stop_queue(efx); | 1119 | efx_stop_queue(efx); |
1128 | if (NET_DEV_REGISTERED(efx)) { | 1120 | if (efx_dev_registered(efx)) { |
1129 | netif_tx_lock_bh(efx->net_dev); | 1121 | netif_tx_lock_bh(efx->net_dev); |
1130 | netif_tx_unlock_bh(efx->net_dev); | 1122 | netif_tx_unlock_bh(efx->net_dev); |
1131 | } | 1123 | } |
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev) | |||
1344 | return 0; | 1336 | return 0; |
1345 | } | 1337 | } |
1346 | 1338 | ||
1347 | /* Context: process, dev_base_lock held, non-blocking. */ | 1339 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1348 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | 1340 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) |
1349 | { | 1341 | { |
1350 | struct efx_nic *efx = net_dev->priv; | 1342 | struct efx_nic *efx = net_dev->priv; |
1351 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1343 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
1352 | struct net_device_stats *stats = &net_dev->stats; | 1344 | struct net_device_stats *stats = &net_dev->stats; |
1353 | 1345 | ||
1346 | /* Update stats if possible, but do not wait if another thread | ||
1347 | * is updating them (or resetting the NIC); slightly stale | ||
1348 | * stats are acceptable. | ||
1349 | */ | ||
1354 | if (!spin_trylock(&efx->stats_lock)) | 1350 | if (!spin_trylock(&efx->stats_lock)) |
1355 | return stats; | 1351 | return stats; |
1356 | if (efx->state == STATE_RUNNING) { | 1352 | if (efx->state == STATE_RUNNING) { |
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) | |||
1494 | static int efx_netdev_event(struct notifier_block *this, | 1490 | static int efx_netdev_event(struct notifier_block *this, |
1495 | unsigned long event, void *ptr) | 1491 | unsigned long event, void *ptr) |
1496 | { | 1492 | { |
1497 | struct net_device *net_dev = (struct net_device *)ptr; | 1493 | struct net_device *net_dev = ptr; |
1498 | 1494 | ||
1499 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { | 1495 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { |
1500 | struct efx_nic *efx = net_dev->priv; | 1496 | struct efx_nic *efx = net_dev->priv; |
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
1563 | efx_for_each_tx_queue(tx_queue, efx) | 1559 | efx_for_each_tx_queue(tx_queue, efx) |
1564 | efx_release_tx_buffers(tx_queue); | 1560 | efx_release_tx_buffers(tx_queue); |
1565 | 1561 | ||
1566 | if (NET_DEV_REGISTERED(efx)) { | 1562 | if (efx_dev_registered(efx)) { |
1567 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 1563 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
1568 | unregister_netdev(efx->net_dev); | 1564 | unregister_netdev(efx->net_dev); |
1569 | } | 1565 | } |
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx) | |||
1688 | if (method == RESET_TYPE_DISABLE) { | 1684 | if (method == RESET_TYPE_DISABLE) { |
1689 | /* Reinitialise the device anyway so the driver unload sequence | 1685 | /* Reinitialise the device anyway so the driver unload sequence |
1690 | * can talk to the external SRAM */ | 1686 | * can talk to the external SRAM */ |
1691 | (void) falcon_init_nic(efx); | 1687 | falcon_init_nic(efx); |
1692 | rc = -EIO; | 1688 | rc = -EIO; |
1693 | goto fail4; | 1689 | goto fail4; |
1694 | } | 1690 | } |
@@ -1873,6 +1869,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | |||
1873 | tx_queue->queue = i; | 1869 | tx_queue->queue = i; |
1874 | tx_queue->buffer = NULL; | 1870 | tx_queue->buffer = NULL; |
1875 | tx_queue->channel = &efx->channel[0]; /* for safety */ | 1871 | tx_queue->channel = &efx->channel[0]; /* for safety */ |
1872 | tx_queue->tso_headers_free = NULL; | ||
1876 | } | 1873 | } |
1877 | for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { | 1874 | for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { |
1878 | rx_queue = &efx->rx_queue[i]; | 1875 | rx_queue = &efx->rx_queue[i]; |
@@ -2071,7 +2068,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | |||
2071 | net_dev = alloc_etherdev(sizeof(*efx)); | 2068 | net_dev = alloc_etherdev(sizeof(*efx)); |
2072 | if (!net_dev) | 2069 | if (!net_dev) |
2073 | return -ENOMEM; | 2070 | return -ENOMEM; |
2074 | net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; | 2071 | net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | |
2072 | NETIF_F_HIGHDMA | NETIF_F_TSO); | ||
2075 | if (lro) | 2073 | if (lro) |
2076 | net_dev->features |= NETIF_F_LRO; | 2074 | net_dev->features |= NETIF_F_LRO; |
2077 | efx = net_dev->priv; | 2075 | efx = net_dev->priv; |
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h index 43663a4619da..c53290d08e2b 100644 --- a/drivers/net/sfc/enum.h +++ b/drivers/net/sfc/enum.h | |||
@@ -10,6 +10,55 @@ | |||
10 | #ifndef EFX_ENUM_H | 10 | #ifndef EFX_ENUM_H |
11 | #define EFX_ENUM_H | 11 | #define EFX_ENUM_H |
12 | 12 | ||
13 | /** | ||
14 | * enum efx_loopback_mode - loopback modes | ||
15 | * @LOOPBACK_NONE: no loopback | ||
16 | * @LOOPBACK_XGMII: loopback within MAC at XGMII level | ||
17 | * @LOOPBACK_XGXS: loopback within MAC at XGXS level | ||
18 | * @LOOPBACK_XAUI: loopback within MAC at XAUI level | ||
19 | * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level | ||
20 | * @LOOPBACK_PCS: loopback within PHY at PCS level | ||
21 | * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level | ||
22 | * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) | ||
23 | */ | ||
24 | /* Please keep in order and up-to-date w.r.t the following two #defines */ | ||
25 | enum efx_loopback_mode { | ||
26 | LOOPBACK_NONE = 0, | ||
27 | LOOPBACK_MAC = 1, | ||
28 | LOOPBACK_XGMII = 2, | ||
29 | LOOPBACK_XGXS = 3, | ||
30 | LOOPBACK_XAUI = 4, | ||
31 | LOOPBACK_PHY = 5, | ||
32 | LOOPBACK_PHYXS = 6, | ||
33 | LOOPBACK_PCS = 7, | ||
34 | LOOPBACK_PMAPMD = 8, | ||
35 | LOOPBACK_NETWORK = 9, | ||
36 | LOOPBACK_MAX | ||
37 | }; | ||
38 | |||
39 | #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD | ||
40 | |||
41 | extern const char *efx_loopback_mode_names[]; | ||
42 | #define LOOPBACK_MODE_NAME(mode) \ | ||
43 | STRING_TABLE_LOOKUP(mode, efx_loopback_mode) | ||
44 | #define LOOPBACK_MODE(efx) \ | ||
45 | LOOPBACK_MODE_NAME(efx->loopback_mode) | ||
46 | |||
47 | /* These loopbacks occur within the controller */ | ||
48 | #define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \ | ||
49 | (1 << LOOPBACK_XGXS) | \ | ||
50 | (1 << LOOPBACK_XAUI)) | ||
51 | |||
52 | #define LOOPBACK_MASK(_efx) \ | ||
53 | (1 << (_efx)->loopback_mode) | ||
54 | |||
55 | #define LOOPBACK_INTERNAL(_efx) \ | ||
56 | ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) | ||
57 | |||
58 | #define LOOPBACK_OUT_OF(_from, _to, _mask) \ | ||
59 | (((LOOPBACK_MASK(_from) & (_mask)) && \ | ||
60 | ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0) | ||
61 | |||
13 | /*****************************************************************************/ | 62 | /*****************************************************************************/ |
14 | 63 | ||
15 | /** | 64 | /** |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index ad541badbd98..e2c75d101610 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -12,12 +12,26 @@ | |||
12 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
13 | #include <linux/rtnetlink.h> | 13 | #include <linux/rtnetlink.h> |
14 | #include "net_driver.h" | 14 | #include "net_driver.h" |
15 | #include "selftest.h" | ||
15 | #include "efx.h" | 16 | #include "efx.h" |
16 | #include "ethtool.h" | 17 | #include "ethtool.h" |
17 | #include "falcon.h" | 18 | #include "falcon.h" |
18 | #include "gmii.h" | 19 | #include "gmii.h" |
19 | #include "mac.h" | 20 | #include "mac.h" |
20 | 21 | ||
22 | const char *efx_loopback_mode_names[] = { | ||
23 | [LOOPBACK_NONE] = "NONE", | ||
24 | [LOOPBACK_MAC] = "MAC", | ||
25 | [LOOPBACK_XGMII] = "XGMII", | ||
26 | [LOOPBACK_XGXS] = "XGXS", | ||
27 | [LOOPBACK_XAUI] = "XAUI", | ||
28 | [LOOPBACK_PHY] = "PHY", | ||
29 | [LOOPBACK_PHYXS] = "PHY(XS)", | ||
30 | [LOOPBACK_PCS] = "PHY(PCS)", | ||
31 | [LOOPBACK_PMAPMD] = "PHY(PMAPMD)", | ||
32 | [LOOPBACK_NETWORK] = "NETWORK", | ||
33 | }; | ||
34 | |||
21 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); | 35 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); |
22 | 36 | ||
23 | struct ethtool_string { | 37 | struct ethtool_string { |
@@ -217,23 +231,179 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, | |||
217 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); | 231 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); |
218 | } | 232 | } |
219 | 233 | ||
234 | /** | ||
235 | * efx_fill_test - fill in an individual self-test entry | ||
236 | * @test_index: Index of the test | ||
237 | * @strings: Ethtool strings, or %NULL | ||
238 | * @data: Ethtool test results, or %NULL | ||
239 | * @test: Pointer to test result (used only if data != %NULL) | ||
240 | * @unit_format: Unit name format (e.g. "channel\%d") | ||
241 | * @unit_id: Unit id (e.g. 0 for "channel0") | ||
242 | * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") | ||
243 | * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent") | ||
244 | * | ||
245 | * Fill in an individual self-test entry. | ||
246 | */ | ||
247 | static void efx_fill_test(unsigned int test_index, | ||
248 | struct ethtool_string *strings, u64 *data, | ||
249 | int *test, const char *unit_format, int unit_id, | ||
250 | const char *test_format, const char *test_id) | ||
251 | { | ||
252 | struct ethtool_string unit_str, test_str; | ||
253 | |||
254 | /* Fill data value, if applicable */ | ||
255 | if (data) | ||
256 | data[test_index] = *test; | ||
257 | |||
258 | /* Fill string, if applicable */ | ||
259 | if (strings) { | ||
260 | snprintf(unit_str.name, sizeof(unit_str.name), | ||
261 | unit_format, unit_id); | ||
262 | snprintf(test_str.name, sizeof(test_str.name), | ||
263 | test_format, test_id); | ||
264 | snprintf(strings[test_index].name, | ||
265 | sizeof(strings[test_index].name), | ||
266 | "%-9s%-17s", unit_str.name, test_str.name); | ||
267 | } | ||
268 | } | ||
269 | |||
270 | #define EFX_PORT_NAME "port%d", 0 | ||
271 | #define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel | ||
272 | #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue | ||
273 | #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue | ||
274 | #define EFX_LOOPBACK_NAME(_mode, _counter) \ | ||
275 | "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) | ||
276 | |||
277 | /** | ||
278 | * efx_fill_loopback_test - fill in a block of loopback self-test entries | ||
279 | * @efx: Efx NIC | ||
280 | * @lb_tests: Efx loopback self-test results structure | ||
281 | * @mode: Loopback test mode | ||
282 | * @test_index: Starting index of the test | ||
283 | * @strings: Ethtool strings, or %NULL | ||
284 | * @data: Ethtool test results, or %NULL | ||
285 | */ | ||
286 | static int efx_fill_loopback_test(struct efx_nic *efx, | ||
287 | struct efx_loopback_self_tests *lb_tests, | ||
288 | enum efx_loopback_mode mode, | ||
289 | unsigned int test_index, | ||
290 | struct ethtool_string *strings, u64 *data) | ||
291 | { | ||
292 | struct efx_tx_queue *tx_queue; | ||
293 | |||
294 | efx_for_each_tx_queue(tx_queue, efx) { | ||
295 | efx_fill_test(test_index++, strings, data, | ||
296 | &lb_tests->tx_sent[tx_queue->queue], | ||
297 | EFX_TX_QUEUE_NAME(tx_queue), | ||
298 | EFX_LOOPBACK_NAME(mode, "tx_sent")); | ||
299 | efx_fill_test(test_index++, strings, data, | ||
300 | &lb_tests->tx_done[tx_queue->queue], | ||
301 | EFX_TX_QUEUE_NAME(tx_queue), | ||
302 | EFX_LOOPBACK_NAME(mode, "tx_done")); | ||
303 | } | ||
304 | efx_fill_test(test_index++, strings, data, | ||
305 | &lb_tests->rx_good, | ||
306 | EFX_PORT_NAME, | ||
307 | EFX_LOOPBACK_NAME(mode, "rx_good")); | ||
308 | efx_fill_test(test_index++, strings, data, | ||
309 | &lb_tests->rx_bad, | ||
310 | EFX_PORT_NAME, | ||
311 | EFX_LOOPBACK_NAME(mode, "rx_bad")); | ||
312 | |||
313 | return test_index; | ||
314 | } | ||
315 | |||
316 | /** | ||
317 | * efx_ethtool_fill_self_tests - get self-test details | ||
318 | * @efx: Efx NIC | ||
319 | * @tests: Efx self-test results structure, or %NULL | ||
320 | * @strings: Ethtool strings, or %NULL | ||
321 | * @data: Ethtool test results, or %NULL | ||
322 | */ | ||
323 | static int efx_ethtool_fill_self_tests(struct efx_nic *efx, | ||
324 | struct efx_self_tests *tests, | ||
325 | struct ethtool_string *strings, | ||
326 | u64 *data) | ||
327 | { | ||
328 | struct efx_channel *channel; | ||
329 | unsigned int n = 0; | ||
330 | enum efx_loopback_mode mode; | ||
331 | |||
332 | /* Interrupt */ | ||
333 | efx_fill_test(n++, strings, data, &tests->interrupt, | ||
334 | "core", 0, "interrupt", NULL); | ||
335 | |||
336 | /* Event queues */ | ||
337 | efx_for_each_channel(channel, efx) { | ||
338 | efx_fill_test(n++, strings, data, | ||
339 | &tests->eventq_dma[channel->channel], | ||
340 | EFX_CHANNEL_NAME(channel), | ||
341 | "eventq.dma", NULL); | ||
342 | efx_fill_test(n++, strings, data, | ||
343 | &tests->eventq_int[channel->channel], | ||
344 | EFX_CHANNEL_NAME(channel), | ||
345 | "eventq.int", NULL); | ||
346 | efx_fill_test(n++, strings, data, | ||
347 | &tests->eventq_poll[channel->channel], | ||
348 | EFX_CHANNEL_NAME(channel), | ||
349 | "eventq.poll", NULL); | ||
350 | } | ||
351 | |||
352 | /* PHY presence */ | ||
353 | efx_fill_test(n++, strings, data, &tests->phy_ok, | ||
354 | EFX_PORT_NAME, "phy_ok", NULL); | ||
355 | |||
356 | /* Loopback tests */ | ||
357 | efx_fill_test(n++, strings, data, &tests->loopback_speed, | ||
358 | EFX_PORT_NAME, "loopback.speed", NULL); | ||
359 | efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, | ||
360 | EFX_PORT_NAME, "loopback.full_duplex", NULL); | ||
361 | for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { | ||
362 | if (!(efx->loopback_modes & (1 << mode))) | ||
363 | continue; | ||
364 | n = efx_fill_loopback_test(efx, | ||
365 | &tests->loopback[mode], mode, n, | ||
366 | strings, data); | ||
367 | } | ||
368 | |||
369 | return n; | ||
370 | } | ||
371 | |||
220 | static int efx_ethtool_get_stats_count(struct net_device *net_dev) | 372 | static int efx_ethtool_get_stats_count(struct net_device *net_dev) |
221 | { | 373 | { |
222 | return EFX_ETHTOOL_NUM_STATS; | 374 | return EFX_ETHTOOL_NUM_STATS; |
223 | } | 375 | } |
224 | 376 | ||
377 | static int efx_ethtool_self_test_count(struct net_device *net_dev) | ||
378 | { | ||
379 | struct efx_nic *efx = net_dev->priv; | ||
380 | |||
381 | return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); | ||
382 | } | ||
383 | |||
225 | static void efx_ethtool_get_strings(struct net_device *net_dev, | 384 | static void efx_ethtool_get_strings(struct net_device *net_dev, |
226 | u32 string_set, u8 *strings) | 385 | u32 string_set, u8 *strings) |
227 | { | 386 | { |
387 | struct efx_nic *efx = net_dev->priv; | ||
228 | struct ethtool_string *ethtool_strings = | 388 | struct ethtool_string *ethtool_strings = |
229 | (struct ethtool_string *)strings; | 389 | (struct ethtool_string *)strings; |
230 | int i; | 390 | int i; |
231 | 391 | ||
232 | if (string_set == ETH_SS_STATS) | 392 | switch (string_set) { |
393 | case ETH_SS_STATS: | ||
233 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) | 394 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) |
234 | strncpy(ethtool_strings[i].name, | 395 | strncpy(ethtool_strings[i].name, |
235 | efx_ethtool_stats[i].name, | 396 | efx_ethtool_stats[i].name, |
236 | sizeof(ethtool_strings[i].name)); | 397 | sizeof(ethtool_strings[i].name)); |
398 | break; | ||
399 | case ETH_SS_TEST: | ||
400 | efx_ethtool_fill_self_tests(efx, NULL, | ||
401 | ethtool_strings, NULL); | ||
402 | break; | ||
403 | default: | ||
404 | /* No other string sets */ | ||
405 | break; | ||
406 | } | ||
237 | } | 407 | } |
238 | 408 | ||
239 | static void efx_ethtool_get_stats(struct net_device *net_dev, | 409 | static void efx_ethtool_get_stats(struct net_device *net_dev, |
@@ -272,6 +442,22 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, | |||
272 | } | 442 | } |
273 | } | 443 | } |
274 | 444 | ||
445 | static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) | ||
446 | { | ||
447 | int rc; | ||
448 | |||
449 | /* Our TSO requires TX checksumming, so force TX checksumming | ||
450 | * on when TSO is enabled. | ||
451 | */ | ||
452 | if (enable) { | ||
453 | rc = efx_ethtool_set_tx_csum(net_dev, 1); | ||
454 | if (rc) | ||
455 | return rc; | ||
456 | } | ||
457 | |||
458 | return ethtool_op_set_tso(net_dev, enable); | ||
459 | } | ||
460 | |||
275 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) | 461 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) |
276 | { | 462 | { |
277 | struct efx_nic *efx = net_dev->priv; | 463 | struct efx_nic *efx = net_dev->priv; |
@@ -283,6 +469,15 @@ static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) | |||
283 | 469 | ||
284 | efx_flush_queues(efx); | 470 | efx_flush_queues(efx); |
285 | 471 | ||
472 | /* Our TSO requires TX checksumming, so disable TSO when | ||
473 | * checksumming is disabled | ||
474 | */ | ||
475 | if (!enable) { | ||
476 | rc = efx_ethtool_set_tso(net_dev, 0); | ||
477 | if (rc) | ||
478 | return rc; | ||
479 | } | ||
480 | |||
286 | return 0; | 481 | return 0; |
287 | } | 482 | } |
288 | 483 | ||
@@ -305,6 +500,64 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) | |||
305 | return efx->rx_checksum_enabled; | 500 | return efx->rx_checksum_enabled; |
306 | } | 501 | } |
307 | 502 | ||
503 | static void efx_ethtool_self_test(struct net_device *net_dev, | ||
504 | struct ethtool_test *test, u64 *data) | ||
505 | { | ||
506 | struct efx_nic *efx = net_dev->priv; | ||
507 | struct efx_self_tests efx_tests; | ||
508 | int offline, already_up; | ||
509 | int rc; | ||
510 | |||
511 | ASSERT_RTNL(); | ||
512 | if (efx->state != STATE_RUNNING) { | ||
513 | rc = -EIO; | ||
514 | goto fail1; | ||
515 | } | ||
516 | |||
517 | /* We need rx buffers and interrupts. */ | ||
518 | already_up = (efx->net_dev->flags & IFF_UP); | ||
519 | if (!already_up) { | ||
520 | rc = dev_open(efx->net_dev); | ||
521 | if (rc) { | ||
522 | EFX_ERR(efx, "failed opening device.\n"); | ||
523 | goto fail2; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | memset(&efx_tests, 0, sizeof(efx_tests)); | ||
528 | offline = (test->flags & ETH_TEST_FL_OFFLINE); | ||
529 | |||
530 | /* Perform online self tests first */ | ||
531 | rc = efx_online_test(efx, &efx_tests); | ||
532 | if (rc) | ||
533 | goto out; | ||
534 | |||
535 | /* Perform offline tests only if online tests passed */ | ||
536 | if (offline) { | ||
537 | /* Stop the kernel from sending packets during the test. */ | ||
538 | efx_stop_queue(efx); | ||
539 | rc = efx_flush_queues(efx); | ||
540 | if (!rc) | ||
541 | rc = efx_offline_test(efx, &efx_tests, | ||
542 | efx->loopback_modes); | ||
543 | efx_wake_queue(efx); | ||
544 | } | ||
545 | |||
546 | out: | ||
547 | if (!already_up) | ||
548 | dev_close(efx->net_dev); | ||
549 | |||
550 | EFX_LOG(efx, "%s all %sline self-tests\n", | ||
551 | rc == 0 ? "passed" : "failed", offline ? "off" : "on"); | ||
552 | |||
553 | fail2: | ||
554 | fail1: | ||
555 | /* Fill ethtool results structures */ | ||
556 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); | ||
557 | if (rc) | ||
558 | test->flags |= ETH_TEST_FL_FAILED; | ||
559 | } | ||
560 | |||
308 | /* Restart autonegotiation */ | 561 | /* Restart autonegotiation */ |
309 | static int efx_ethtool_nway_reset(struct net_device *net_dev) | 562 | static int efx_ethtool_nway_reset(struct net_device *net_dev) |
310 | { | 563 | { |
@@ -451,8 +704,12 @@ struct ethtool_ops efx_ethtool_ops = { | |||
451 | .set_tx_csum = efx_ethtool_set_tx_csum, | 704 | .set_tx_csum = efx_ethtool_set_tx_csum, |
452 | .get_sg = ethtool_op_get_sg, | 705 | .get_sg = ethtool_op_get_sg, |
453 | .set_sg = ethtool_op_set_sg, | 706 | .set_sg = ethtool_op_set_sg, |
707 | .get_tso = ethtool_op_get_tso, | ||
708 | .set_tso = efx_ethtool_set_tso, | ||
454 | .get_flags = ethtool_op_get_flags, | 709 | .get_flags = ethtool_op_get_flags, |
455 | .set_flags = ethtool_op_set_flags, | 710 | .set_flags = ethtool_op_set_flags, |
711 | .self_test_count = efx_ethtool_self_test_count, | ||
712 | .self_test = efx_ethtool_self_test, | ||
456 | .get_strings = efx_ethtool_get_strings, | 713 | .get_strings = efx_ethtool_get_strings, |
457 | .phys_id = efx_ethtool_phys_id, | 714 | .phys_id = efx_ethtool_phys_id, |
458 | .get_stats_count = efx_ethtool_get_stats_count, | 715 | .get_stats_count = efx_ethtool_get_stats_count, |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 46db549ce580..790db89db345 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
116 | ************************************************************************** | 116 | ************************************************************************** |
117 | */ | 117 | */ |
118 | 118 | ||
119 | /* DMA address mask (up to 46-bit, avoiding compiler warnings) | 119 | /* DMA address mask */ |
120 | * | 120 | #define FALCON_DMA_MASK DMA_BIT_MASK(46) |
121 | * Note that it is possible to have a platform with 64-bit longs and | ||
122 | * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the | ||
123 | * platform DMA mask. | ||
124 | */ | ||
125 | #if BITS_PER_LONG == 64 | ||
126 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) | ||
127 | #else | ||
128 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) | ||
129 | #endif | ||
130 | 121 | ||
131 | /* TX DMA length mask (13-bit) */ | 122 | /* TX DMA length mask (13-bit) */ |
132 | #define FALCON_TX_DMA_MASK (4096 - 1) | 123 | #define FALCON_TX_DMA_MASK (4096 - 1) |
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
145 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 | 136 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 |
146 | 137 | ||
147 | #define FALCON_IS_DUAL_FUNC(efx) \ | 138 | #define FALCON_IS_DUAL_FUNC(efx) \ |
148 | (FALCON_REV(efx) < FALCON_REV_B0) | 139 | (falcon_rev(efx) < FALCON_REV_B0) |
149 | 140 | ||
150 | /************************************************************************** | 141 | /************************************************************************** |
151 | * | 142 | * |
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
465 | TX_DESCQ_TYPE, 0, | 456 | TX_DESCQ_TYPE, 0, |
466 | TX_NON_IP_DROP_DIS_B0, 1); | 457 | TX_NON_IP_DROP_DIS_B0, 1); |
467 | 458 | ||
468 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 459 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
469 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); | 460 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); |
470 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); | 461 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); |
471 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); | 462 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); |
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
474 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 465 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
475 | tx_queue->queue); | 466 | tx_queue->queue); |
476 | 467 | ||
477 | if (FALCON_REV(efx) < FALCON_REV_B0) { | 468 | if (falcon_rev(efx) < FALCON_REV_B0) { |
478 | efx_oword_t reg; | 469 | efx_oword_t reg; |
479 | 470 | ||
480 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ | 471 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ |
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
635 | efx_oword_t rx_desc_ptr; | 626 | efx_oword_t rx_desc_ptr; |
636 | struct efx_nic *efx = rx_queue->efx; | 627 | struct efx_nic *efx = rx_queue->efx; |
637 | int rc; | 628 | int rc; |
638 | int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; | 629 | int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; |
639 | int iscsi_digest_en = is_b0; | 630 | int iscsi_digest_en = is_b0; |
640 | 631 | ||
641 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 632 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", |
@@ -742,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue) | |||
742 | continue; | 733 | continue; |
743 | break; | 734 | break; |
744 | } | 735 | } |
745 | if (rc) | 736 | if (rc) { |
746 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); | 737 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); |
738 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | ||
739 | } | ||
747 | 740 | ||
748 | /* Remove RX descriptor ring from card */ | 741 | /* Remove RX descriptor ring from card */ |
749 | EFX_ZERO_OWORD(rx_desc_ptr); | 742 | EFX_ZERO_OWORD(rx_desc_ptr); |
@@ -822,10 +815,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel, | |||
822 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | 815 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); |
823 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 816 | tx_queue = &efx->tx_queue[tx_ev_q_label]; |
824 | 817 | ||
825 | if (NET_DEV_REGISTERED(efx)) | 818 | if (efx_dev_registered(efx)) |
826 | netif_tx_lock(efx->net_dev); | 819 | netif_tx_lock(efx->net_dev); |
827 | falcon_notify_tx_desc(tx_queue); | 820 | falcon_notify_tx_desc(tx_queue); |
828 | if (NET_DEV_REGISTERED(efx)) | 821 | if (efx_dev_registered(efx)) |
829 | netif_tx_unlock(efx->net_dev); | 822 | netif_tx_unlock(efx->net_dev); |
830 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | 823 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && |
831 | EFX_WORKAROUND_10727(efx)) { | 824 | EFX_WORKAROUND_10727(efx)) { |
@@ -884,7 +877,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
884 | RX_EV_TCP_UDP_CHKSUM_ERR); | 877 | RX_EV_TCP_UDP_CHKSUM_ERR); |
885 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | 878 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); |
886 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | 879 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); |
887 | rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? | 880 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? |
888 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | 881 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); |
889 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | 882 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); |
890 | 883 | ||
@@ -1065,7 +1058,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
1065 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) | 1058 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) |
1066 | is_phy_event = 1; | 1059 | is_phy_event = 1; |
1067 | 1060 | ||
1068 | if ((FALCON_REV(efx) >= FALCON_REV_B0) && | 1061 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
1069 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | 1062 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) |
1070 | is_phy_event = 1; | 1063 | is_phy_event = 1; |
1071 | 1064 | ||
@@ -1129,6 +1122,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel, | |||
1129 | case RX_RECOVERY_EV_DECODE: | 1122 | case RX_RECOVERY_EV_DECODE: |
1130 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | 1123 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " |
1131 | "Resetting.\n", channel->channel); | 1124 | "Resetting.\n", channel->channel); |
1125 | atomic_inc(&efx->rx_reset); | ||
1132 | efx_schedule_reset(efx, | 1126 | efx_schedule_reset(efx, |
1133 | EFX_WORKAROUND_6555(efx) ? | 1127 | EFX_WORKAROUND_6555(efx) ? |
1134 | RESET_TYPE_RX_RECOVERY : | 1128 | RESET_TYPE_RX_RECOVERY : |
@@ -1404,7 +1398,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) | |||
1404 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | 1398 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) |
1405 | { | 1399 | { |
1406 | struct falcon_nic_data *nic_data = efx->nic_data; | 1400 | struct falcon_nic_data *nic_data = efx->nic_data; |
1407 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1401 | efx_oword_t *int_ker = efx->irq_status.addr; |
1408 | efx_oword_t fatal_intr; | 1402 | efx_oword_t fatal_intr; |
1409 | int error, mem_perr; | 1403 | int error, mem_perr; |
1410 | static int n_int_errors; | 1404 | static int n_int_errors; |
@@ -1450,8 +1444,8 @@ out: | |||
1450 | */ | 1444 | */ |
1451 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | 1445 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) |
1452 | { | 1446 | { |
1453 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1447 | struct efx_nic *efx = dev_id; |
1454 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1448 | efx_oword_t *int_ker = efx->irq_status.addr; |
1455 | struct efx_channel *channel; | 1449 | struct efx_channel *channel; |
1456 | efx_dword_t reg; | 1450 | efx_dword_t reg; |
1457 | u32 queues; | 1451 | u32 queues; |
@@ -1488,8 +1482,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | |||
1488 | 1482 | ||
1489 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | 1483 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) |
1490 | { | 1484 | { |
1491 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1485 | struct efx_nic *efx = dev_id; |
1492 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1486 | efx_oword_t *int_ker = efx->irq_status.addr; |
1493 | struct efx_channel *channel; | 1487 | struct efx_channel *channel; |
1494 | int syserr; | 1488 | int syserr; |
1495 | int queues; | 1489 | int queues; |
@@ -1541,9 +1535,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
1541 | */ | 1535 | */ |
1542 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) | 1536 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) |
1543 | { | 1537 | { |
1544 | struct efx_channel *channel = (struct efx_channel *)dev_id; | 1538 | struct efx_channel *channel = dev_id; |
1545 | struct efx_nic *efx = channel->efx; | 1539 | struct efx_nic *efx = channel->efx; |
1546 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1540 | efx_oword_t *int_ker = efx->irq_status.addr; |
1547 | int syserr; | 1541 | int syserr; |
1548 | 1542 | ||
1549 | efx->last_irq_cpu = raw_smp_processor_id(); | 1543 | efx->last_irq_cpu = raw_smp_processor_id(); |
@@ -1571,7 +1565,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) | |||
1571 | unsigned long offset; | 1565 | unsigned long offset; |
1572 | efx_dword_t dword; | 1566 | efx_dword_t dword; |
1573 | 1567 | ||
1574 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1568 | if (falcon_rev(efx) < FALCON_REV_B0) |
1575 | return; | 1569 | return; |
1576 | 1570 | ||
1577 | for (offset = RX_RSS_INDIR_TBL_B0; | 1571 | for (offset = RX_RSS_INDIR_TBL_B0; |
@@ -1594,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx) | |||
1594 | 1588 | ||
1595 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 1589 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
1596 | irq_handler_t handler; | 1590 | irq_handler_t handler; |
1597 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1591 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1598 | handler = falcon_legacy_interrupt_b0; | 1592 | handler = falcon_legacy_interrupt_b0; |
1599 | else | 1593 | else |
1600 | handler = falcon_legacy_interrupt_a1; | 1594 | handler = falcon_legacy_interrupt_a1; |
@@ -1635,12 +1629,13 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1635 | efx_oword_t reg; | 1629 | efx_oword_t reg; |
1636 | 1630 | ||
1637 | /* Disable MSI/MSI-X interrupts */ | 1631 | /* Disable MSI/MSI-X interrupts */ |
1638 | efx_for_each_channel_with_interrupt(channel, efx) | 1632 | efx_for_each_channel_with_interrupt(channel, efx) { |
1639 | if (channel->irq) | 1633 | if (channel->irq) |
1640 | free_irq(channel->irq, channel); | 1634 | free_irq(channel->irq, channel); |
1635 | } | ||
1641 | 1636 | ||
1642 | /* ACK legacy interrupt */ | 1637 | /* ACK legacy interrupt */ |
1643 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1638 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1644 | falcon_read(efx, ®, INT_ISR0_B0); | 1639 | falcon_read(efx, ®, INT_ISR0_B0); |
1645 | else | 1640 | else |
1646 | falcon_irq_ack_a1(efx); | 1641 | falcon_irq_ack_a1(efx); |
@@ -1731,7 +1726,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) | |||
1731 | efx_oword_t temp; | 1726 | efx_oword_t temp; |
1732 | int count; | 1727 | int count; |
1733 | 1728 | ||
1734 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1729 | if ((falcon_rev(efx) < FALCON_REV_B0) || |
1730 | (efx->loopback_mode != LOOPBACK_NONE)) | ||
1735 | return; | 1731 | return; |
1736 | 1732 | ||
1737 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 1733 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); |
@@ -1783,7 +1779,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | |||
1783 | { | 1779 | { |
1784 | efx_oword_t temp; | 1780 | efx_oword_t temp; |
1785 | 1781 | ||
1786 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1782 | if (falcon_rev(efx) < FALCON_REV_B0) |
1787 | return; | 1783 | return; |
1788 | 1784 | ||
1789 | /* Isolate the MAC -> RX */ | 1785 | /* Isolate the MAC -> RX */ |
@@ -1821,7 +1817,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1821 | MAC_SPEED, link_speed); | 1817 | MAC_SPEED, link_speed); |
1822 | /* On B0, MAC backpressure can be disabled and packets get | 1818 | /* On B0, MAC backpressure can be disabled and packets get |
1823 | * discarded. */ | 1819 | * discarded. */ |
1824 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1820 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1825 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | 1821 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, |
1826 | !efx->link_up); | 1822 | !efx->link_up); |
1827 | } | 1823 | } |
@@ -1839,7 +1835,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1839 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | 1835 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); |
1840 | 1836 | ||
1841 | /* Unisolate the MAC -> RX */ | 1837 | /* Unisolate the MAC -> RX */ |
1842 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1838 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1843 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 1839 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); |
1844 | falcon_write(efx, ®, RX_CFG_REG_KER); | 1840 | falcon_write(efx, ®, RX_CFG_REG_KER); |
1845 | } | 1841 | } |
@@ -1854,7 +1850,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
1854 | return 0; | 1850 | return 0; |
1855 | 1851 | ||
1856 | /* Statistics fetch will fail if the MAC is in TX drain */ | 1852 | /* Statistics fetch will fail if the MAC is in TX drain */ |
1857 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1853 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1858 | efx_oword_t temp; | 1854 | efx_oword_t temp; |
1859 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 1855 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); |
1860 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | 1856 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) |
@@ -1938,7 +1934,7 @@ static int falcon_gmii_wait(struct efx_nic *efx) | |||
1938 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | 1934 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, |
1939 | int addr, int value) | 1935 | int addr, int value) |
1940 | { | 1936 | { |
1941 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 1937 | struct efx_nic *efx = net_dev->priv; |
1942 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; | 1938 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; |
1943 | efx_oword_t reg; | 1939 | efx_oword_t reg; |
1944 | 1940 | ||
@@ -2006,7 +2002,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | |||
2006 | * could be read, -1 will be returned. */ | 2002 | * could be read, -1 will be returned. */ |
2007 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) | 2003 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) |
2008 | { | 2004 | { |
2009 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 2005 | struct efx_nic *efx = net_dev->priv; |
2010 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; | 2006 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; |
2011 | efx_oword_t reg; | 2007 | efx_oword_t reg; |
2012 | int value = -1; | 2008 | int value = -1; |
@@ -2091,6 +2087,8 @@ static int falcon_probe_phy(struct efx_nic *efx) | |||
2091 | efx->phy_type); | 2087 | efx->phy_type); |
2092 | return -1; | 2088 | return -1; |
2093 | } | 2089 | } |
2090 | |||
2091 | efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; | ||
2094 | return 0; | 2092 | return 0; |
2095 | } | 2093 | } |
2096 | 2094 | ||
@@ -2109,7 +2107,7 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2109 | falcon_init_mdio(&efx->mii); | 2107 | falcon_init_mdio(&efx->mii); |
2110 | 2108 | ||
2111 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | 2109 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ |
2112 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2110 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2113 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; | 2111 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; |
2114 | else | 2112 | else |
2115 | efx->flow_control = EFX_FC_RX; | 2113 | efx->flow_control = EFX_FC_RX; |
@@ -2369,7 +2367,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2369 | return -ENODEV; | 2367 | return -ENODEV; |
2370 | } | 2368 | } |
2371 | 2369 | ||
2372 | switch (FALCON_REV(efx)) { | 2370 | switch (falcon_rev(efx)) { |
2373 | case FALCON_REV_A0: | 2371 | case FALCON_REV_A0: |
2374 | case 0xff: | 2372 | case 0xff: |
2375 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | 2373 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); |
@@ -2395,7 +2393,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2395 | break; | 2393 | break; |
2396 | 2394 | ||
2397 | default: | 2395 | default: |
2398 | EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); | 2396 | EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); |
2399 | return -ENODEV; | 2397 | return -ENODEV; |
2400 | } | 2398 | } |
2401 | 2399 | ||
@@ -2415,7 +2413,7 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2415 | 2413 | ||
2416 | /* Allocate storage for hardware specific data */ | 2414 | /* Allocate storage for hardware specific data */ |
2417 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | 2415 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
2418 | efx->nic_data = (void *) nic_data; | 2416 | efx->nic_data = nic_data; |
2419 | 2417 | ||
2420 | /* Determine number of ports etc. */ | 2418 | /* Determine number of ports etc. */ |
2421 | rc = falcon_probe_nic_variant(efx); | 2419 | rc = falcon_probe_nic_variant(efx); |
@@ -2468,14 +2466,12 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2468 | fail5: | 2466 | fail5: |
2469 | falcon_free_buffer(efx, &efx->irq_status); | 2467 | falcon_free_buffer(efx, &efx->irq_status); |
2470 | fail4: | 2468 | fail4: |
2471 | /* fall-thru */ | ||
2472 | fail3: | 2469 | fail3: |
2473 | if (nic_data->pci_dev2) { | 2470 | if (nic_data->pci_dev2) { |
2474 | pci_dev_put(nic_data->pci_dev2); | 2471 | pci_dev_put(nic_data->pci_dev2); |
2475 | nic_data->pci_dev2 = NULL; | 2472 | nic_data->pci_dev2 = NULL; |
2476 | } | 2473 | } |
2477 | fail2: | 2474 | fail2: |
2478 | /* fall-thru */ | ||
2479 | fail1: | 2475 | fail1: |
2480 | kfree(efx->nic_data); | 2476 | kfree(efx->nic_data); |
2481 | return rc; | 2477 | return rc; |
@@ -2487,13 +2483,10 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
2487 | */ | 2483 | */ |
2488 | int falcon_init_nic(struct efx_nic *efx) | 2484 | int falcon_init_nic(struct efx_nic *efx) |
2489 | { | 2485 | { |
2490 | struct falcon_nic_data *data; | ||
2491 | efx_oword_t temp; | 2486 | efx_oword_t temp; |
2492 | unsigned thresh; | 2487 | unsigned thresh; |
2493 | int rc; | 2488 | int rc; |
2494 | 2489 | ||
2495 | data = (struct falcon_nic_data *)efx->nic_data; | ||
2496 | |||
2497 | /* Set up the address region register. This is only needed | 2490 | /* Set up the address region register. This is only needed |
2498 | * for the B0 FPGA, but since we are just pushing in the | 2491 | * for the B0 FPGA, but since we are just pushing in the |
2499 | * reset defaults this may as well be unconditional. */ | 2492 | * reset defaults this may as well be unconditional. */ |
@@ -2560,7 +2553,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2560 | 2553 | ||
2561 | /* Set number of RSS queues for receive path. */ | 2554 | /* Set number of RSS queues for receive path. */ |
2562 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | 2555 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); |
2563 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2556 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2564 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); | 2557 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); |
2565 | else | 2558 | else |
2566 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); | 2559 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); |
@@ -2598,7 +2591,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2598 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 2591 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
2599 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | 2592 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); |
2600 | /* Squash TX of packets of 16 bytes or less */ | 2593 | /* Squash TX of packets of 16 bytes or less */ |
2601 | if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | 2594 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) |
2602 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | 2595 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); |
2603 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | 2596 | falcon_write(efx, &temp, TX_CFG2_REG_KER); |
2604 | 2597 | ||
@@ -2615,7 +2608,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2615 | if (EFX_WORKAROUND_7575(efx)) | 2608 | if (EFX_WORKAROUND_7575(efx)) |
2616 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, | 2609 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, |
2617 | (3 * 4096) / 32); | 2610 | (3 * 4096) / 32); |
2618 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2611 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2619 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); | 2612 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); |
2620 | 2613 | ||
2621 | /* RX FIFO flow control thresholds */ | 2614 | /* RX FIFO flow control thresholds */ |
@@ -2631,7 +2624,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2631 | falcon_write(efx, &temp, RX_CFG_REG_KER); | 2624 | falcon_write(efx, &temp, RX_CFG_REG_KER); |
2632 | 2625 | ||
2633 | /* Set destination of both TX and RX Flush events */ | 2626 | /* Set destination of both TX and RX Flush events */ |
2634 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 2627 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
2635 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | 2628 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); |
2636 | falcon_write(efx, &temp, DP_CTRL_REG); | 2629 | falcon_write(efx, &temp, DP_CTRL_REG); |
2637 | } | 2630 | } |
@@ -2645,7 +2638,7 @@ void falcon_remove_nic(struct efx_nic *efx) | |||
2645 | 2638 | ||
2646 | falcon_free_buffer(efx, &efx->irq_status); | 2639 | falcon_free_buffer(efx, &efx->irq_status); |
2647 | 2640 | ||
2648 | (void) falcon_reset_hw(efx, RESET_TYPE_ALL); | 2641 | falcon_reset_hw(efx, RESET_TYPE_ALL); |
2649 | 2642 | ||
2650 | /* Release the second function after the reset */ | 2643 | /* Release the second function after the reset */ |
2651 | if (nic_data->pci_dev2) { | 2644 | if (nic_data->pci_dev2) { |
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h index 6117403b0c03..492f9bc28840 100644 --- a/drivers/net/sfc/falcon.h +++ b/drivers/net/sfc/falcon.h | |||
@@ -23,7 +23,10 @@ enum falcon_revision { | |||
23 | FALCON_REV_B0 = 2, | 23 | FALCON_REV_B0 = 2, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | #define FALCON_REV(efx) ((efx)->pci_dev->revision) | 26 | static inline int falcon_rev(struct efx_nic *efx) |
27 | { | ||
28 | return efx->pci_dev->revision; | ||
29 | } | ||
27 | 30 | ||
28 | extern struct efx_nic_type falcon_a_nic_type; | 31 | extern struct efx_nic_type falcon_a_nic_type; |
29 | extern struct efx_nic_type falcon_b_nic_type; | 32 | extern struct efx_nic_type falcon_b_nic_type; |
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h index 0485a63eaff6..6d003114eeab 100644 --- a/drivers/net/sfc/falcon_hwdefs.h +++ b/drivers/net/sfc/falcon_hwdefs.h | |||
@@ -636,6 +636,14 @@ | |||
636 | #define XX_HIDRVA_WIDTH 1 | 636 | #define XX_HIDRVA_WIDTH 1 |
637 | #define XX_LODRVA_LBN 8 | 637 | #define XX_LODRVA_LBN 8 |
638 | #define XX_LODRVA_WIDTH 1 | 638 | #define XX_LODRVA_WIDTH 1 |
639 | #define XX_LPBKD_LBN 3 | ||
640 | #define XX_LPBKD_WIDTH 1 | ||
641 | #define XX_LPBKC_LBN 2 | ||
642 | #define XX_LPBKC_WIDTH 1 | ||
643 | #define XX_LPBKB_LBN 1 | ||
644 | #define XX_LPBKB_WIDTH 1 | ||
645 | #define XX_LPBKA_LBN 0 | ||
646 | #define XX_LPBKA_WIDTH 1 | ||
639 | 647 | ||
640 | #define XX_TXDRV_CTL_REG_MAC 0x12 | 648 | #define XX_TXDRV_CTL_REG_MAC 0x12 |
641 | #define XX_DEQD_LBN 28 | 649 | #define XX_DEQD_LBN 28 |
@@ -656,8 +664,14 @@ | |||
656 | #define XX_DTXA_WIDTH 4 | 664 | #define XX_DTXA_WIDTH 4 |
657 | 665 | ||
658 | /* XAUI XGXS core status register */ | 666 | /* XAUI XGXS core status register */ |
659 | #define XX_FORCE_SIG_DECODE_FORCED 0xff | ||
660 | #define XX_CORE_STAT_REG_MAC 0x16 | 667 | #define XX_CORE_STAT_REG_MAC 0x16 |
668 | #define XX_FORCE_SIG_LBN 24 | ||
669 | #define XX_FORCE_SIG_WIDTH 8 | ||
670 | #define XX_FORCE_SIG_DECODE_FORCED 0xff | ||
671 | #define XX_XGXS_LB_EN_LBN 23 | ||
672 | #define XX_XGXS_LB_EN_WIDTH 1 | ||
673 | #define XX_XGMII_LB_EN_LBN 22 | ||
674 | #define XX_XGMII_LB_EN_WIDTH 1 | ||
661 | #define XX_ALIGN_DONE_LBN 20 | 675 | #define XX_ALIGN_DONE_LBN 20 |
662 | #define XX_ALIGN_DONE_WIDTH 1 | 676 | #define XX_ALIGN_DONE_WIDTH 1 |
663 | #define XX_SYNC_STAT_LBN 16 | 677 | #define XX_SYNC_STAT_LBN 16 |
@@ -1111,7 +1125,7 @@ struct falcon_nvconfig_board_v2 { | |||
1111 | u8 port1_phy_type; | 1125 | u8 port1_phy_type; |
1112 | __le16 asic_sub_revision; | 1126 | __le16 asic_sub_revision; |
1113 | __le16 board_revision; | 1127 | __le16 board_revision; |
1114 | } __attribute__ ((packed)); | 1128 | } __packed; |
1115 | 1129 | ||
1116 | #define NVCONFIG_BASE 0x300 | 1130 | #define NVCONFIG_BASE 0x300 |
1117 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | 1131 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C |
@@ -1130,6 +1144,6 @@ struct falcon_nvconfig { | |||
1130 | __le16 board_struct_ver; | 1144 | __le16 board_struct_ver; |
1131 | __le16 board_checksum; | 1145 | __le16 board_checksum; |
1132 | struct falcon_nvconfig_board_v2 board_v2; | 1146 | struct falcon_nvconfig_board_v2 board_v2; |
1133 | } __attribute__ ((packed)); | 1147 | } __packed; |
1134 | 1148 | ||
1135 | #endif /* EFX_FALCON_HWDEFS_H */ | 1149 | #endif /* EFX_FALCON_HWDEFS_H */ |
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h index ea08184ddfa9..6670cdfc41ab 100644 --- a/drivers/net/sfc/falcon_io.h +++ b/drivers/net/sfc/falcon_io.h | |||
@@ -56,14 +56,27 @@ | |||
56 | #define FALCON_USE_QWORD_IO 1 | 56 | #define FALCON_USE_QWORD_IO 1 |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #define _falcon_writeq(efx, value, reg) \ | 59 | #ifdef FALCON_USE_QWORD_IO |
60 | __raw_writeq((__force u64) (value), (efx)->membase + (reg)) | 60 | static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, |
61 | #define _falcon_writel(efx, value, reg) \ | 61 | unsigned int reg) |
62 | __raw_writel((__force u32) (value), (efx)->membase + (reg)) | 62 | { |
63 | #define _falcon_readq(efx, reg) \ | 63 | __raw_writeq((__force u64)value, efx->membase + reg); |
64 | ((__force __le64) __raw_readq((efx)->membase + (reg))) | 64 | } |
65 | #define _falcon_readl(efx, reg) \ | 65 | static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) |
66 | ((__force __le32) __raw_readl((efx)->membase + (reg))) | 66 | { |
67 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | static inline void _falcon_writel(struct efx_nic *efx, __le32 value, | ||
72 | unsigned int reg) | ||
73 | { | ||
74 | __raw_writel((__force u32)value, efx->membase + reg); | ||
75 | } | ||
76 | static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) | ||
77 | { | ||
78 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
79 | } | ||
67 | 80 | ||
68 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ | 81 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ |
69 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, | 82 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, |
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index aa7521b24a5d..55c0d9760be8 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
@@ -32,7 +32,7 @@ | |||
32 | (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) | 32 | (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) |
33 | 33 | ||
34 | void falcon_xmac_writel(struct efx_nic *efx, | 34 | void falcon_xmac_writel(struct efx_nic *efx, |
35 | efx_dword_t *value, unsigned int mac_reg) | 35 | efx_dword_t *value, unsigned int mac_reg) |
36 | { | 36 | { |
37 | efx_oword_t temp; | 37 | efx_oword_t temp; |
38 | 38 | ||
@@ -69,6 +69,10 @@ static int falcon_reset_xmac(struct efx_nic *efx) | |||
69 | udelay(10); | 69 | udelay(10); |
70 | } | 70 | } |
71 | 71 | ||
72 | /* This often fails when DSP is disabled, ignore it */ | ||
73 | if (sfe4001_phy_flash_cfg != 0) | ||
74 | return 0; | ||
75 | |||
72 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); | 76 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); |
73 | return -ETIMEDOUT; | 77 | return -ETIMEDOUT; |
74 | } | 78 | } |
@@ -217,13 +221,13 @@ static int falcon_xgmii_status(struct efx_nic *efx) | |||
217 | { | 221 | { |
218 | efx_dword_t reg; | 222 | efx_dword_t reg; |
219 | 223 | ||
220 | if (FALCON_REV(efx) < FALCON_REV_B0) | 224 | if (falcon_rev(efx) < FALCON_REV_B0) |
221 | return 1; | 225 | return 1; |
222 | 226 | ||
223 | /* The ISR latches, so clear it and re-read */ | 227 | /* The ISR latches, so clear it and re-read */ |
224 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); | 228 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); |
225 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); | 229 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); |
226 | 230 | ||
227 | if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || | 231 | if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || |
228 | EFX_DWORD_FIELD(reg, XM_RMTFLT)) { | 232 | EFX_DWORD_FIELD(reg, XM_RMTFLT)) { |
229 | EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); | 233 | EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); |
@@ -237,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) | |||
237 | { | 241 | { |
238 | efx_dword_t reg; | 242 | efx_dword_t reg; |
239 | 243 | ||
240 | if (FALCON_REV(efx) < FALCON_REV_B0) | 244 | if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) |
241 | return; | 245 | return; |
242 | 246 | ||
243 | /* Flush the ISR */ | 247 | /* Flush the ISR */ |
@@ -284,6 +288,9 @@ int falcon_xaui_link_ok(struct efx_nic *efx) | |||
284 | efx_dword_t reg; | 288 | efx_dword_t reg; |
285 | int align_done, sync_status, link_ok = 0; | 289 | int align_done, sync_status, link_ok = 0; |
286 | 290 | ||
291 | if (LOOPBACK_INTERNAL(efx)) | ||
292 | return 1; | ||
293 | |||
287 | /* Read link status */ | 294 | /* Read link status */ |
288 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); | 295 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); |
289 | 296 | ||
@@ -374,6 +381,61 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx) | |||
374 | falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC); | 381 | falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC); |
375 | } | 382 | } |
376 | 383 | ||
384 | static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | ||
385 | { | ||
386 | efx_dword_t reg; | ||
387 | int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; | ||
388 | int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; | ||
389 | int xgmii_loopback = | ||
390 | (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0; | ||
391 | |||
392 | /* XGXS block is flaky and will need to be reset if moving | ||
393 | * into our out of XGMII, XGXS or XAUI loopbacks. */ | ||
394 | if (EFX_WORKAROUND_5147(efx)) { | ||
395 | int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; | ||
396 | int reset_xgxs; | ||
397 | |||
398 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); | ||
399 | old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); | ||
400 | old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); | ||
401 | |||
402 | falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); | ||
403 | old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); | ||
404 | |||
405 | /* The PHY driver may have turned XAUI off */ | ||
406 | reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || | ||
407 | (xaui_loopback != old_xaui_loopback) || | ||
408 | (xgmii_loopback != old_xgmii_loopback)); | ||
409 | if (reset_xgxs) { | ||
410 | falcon_xmac_readl(efx, ®, XX_PWR_RST_REG_MAC); | ||
411 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); | ||
412 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); | ||
413 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
414 | udelay(1); | ||
415 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0); | ||
416 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0); | ||
417 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
418 | udelay(1); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); | ||
423 | EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, | ||
424 | (xgxs_loopback || xaui_loopback) ? | ||
425 | XX_FORCE_SIG_DECODE_FORCED : 0); | ||
426 | EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); | ||
427 | EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); | ||
428 | falcon_xmac_writel(efx, ®, XX_CORE_STAT_REG_MAC); | ||
429 | |||
430 | falcon_xmac_readl(efx, ®, XX_SD_CTL_REG_MAC); | ||
431 | EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); | ||
432 | EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); | ||
433 | EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); | ||
434 | EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); | ||
435 | falcon_xmac_writel(efx, ®, XX_SD_CTL_REG_MAC); | ||
436 | } | ||
437 | |||
438 | |||
377 | /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails | 439 | /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails |
378 | * to come back up. Bash it until it comes back up */ | 440 | * to come back up. Bash it until it comes back up */ |
379 | static int falcon_check_xaui_link_up(struct efx_nic *efx) | 441 | static int falcon_check_xaui_link_up(struct efx_nic *efx) |
@@ -382,7 +444,8 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) | |||
382 | tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; | 444 | tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; |
383 | max_tries = tries; | 445 | max_tries = tries; |
384 | 446 | ||
385 | if (efx->phy_type == PHY_TYPE_NONE) | 447 | if ((efx->loopback_mode == LOOPBACK_NETWORK) || |
448 | (efx->phy_type == PHY_TYPE_NONE)) | ||
386 | return 0; | 449 | return 0; |
387 | 450 | ||
388 | while (tries) { | 451 | while (tries) { |
@@ -391,12 +454,12 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) | |||
391 | 454 | ||
392 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", | 455 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", |
393 | __func__, tries); | 456 | __func__, tries); |
394 | (void) falcon_reset_xaui(efx); | 457 | falcon_reset_xaui(efx); |
395 | udelay(200); | 458 | udelay(200); |
396 | tries--; | 459 | tries--; |
397 | } | 460 | } |
398 | 461 | ||
399 | EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n", | 462 | EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n", |
400 | max_tries); | 463 | max_tries); |
401 | return 0; | 464 | return 0; |
402 | } | 465 | } |
@@ -408,8 +471,13 @@ void falcon_reconfigure_xmac(struct efx_nic *efx) | |||
408 | falcon_mask_status_intr(efx, 0); | 471 | falcon_mask_status_intr(efx, 0); |
409 | 472 | ||
410 | falcon_deconfigure_mac_wrapper(efx); | 473 | falcon_deconfigure_mac_wrapper(efx); |
474 | |||
475 | efx->tx_disabled = LOOPBACK_INTERNAL(efx); | ||
411 | efx->phy_op->reconfigure(efx); | 476 | efx->phy_op->reconfigure(efx); |
477 | |||
478 | falcon_reconfigure_xgxs_core(efx); | ||
412 | falcon_reconfigure_xmac_core(efx); | 479 | falcon_reconfigure_xmac_core(efx); |
480 | |||
413 | falcon_reconfigure_mac_wrapper(efx); | 481 | falcon_reconfigure_mac_wrapper(efx); |
414 | 482 | ||
415 | /* Ensure XAUI link is up */ | 483 | /* Ensure XAUI link is up */ |
@@ -491,18 +559,20 @@ void falcon_update_stats_xmac(struct efx_nic *efx) | |||
491 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes); | 559 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes); |
492 | } | 560 | } |
493 | 561 | ||
494 | #define EFX_XAUI_RETRAIN_MAX 8 | ||
495 | |||
496 | int falcon_check_xmac(struct efx_nic *efx) | 562 | int falcon_check_xmac(struct efx_nic *efx) |
497 | { | 563 | { |
498 | unsigned xaui_link_ok; | 564 | unsigned xaui_link_ok; |
499 | int rc; | 565 | int rc; |
500 | 566 | ||
567 | if ((efx->loopback_mode == LOOPBACK_NETWORK) || | ||
568 | (efx->phy_type == PHY_TYPE_NONE)) | ||
569 | return 0; | ||
570 | |||
501 | falcon_mask_status_intr(efx, 0); | 571 | falcon_mask_status_intr(efx, 0); |
502 | xaui_link_ok = falcon_xaui_link_ok(efx); | 572 | xaui_link_ok = falcon_xaui_link_ok(efx); |
503 | 573 | ||
504 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) | 574 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) |
505 | (void) falcon_reset_xaui(efx); | 575 | falcon_reset_xaui(efx); |
506 | 576 | ||
507 | /* Call the PHY check_hw routine */ | 577 | /* Call the PHY check_hw routine */ |
508 | rc = efx->phy_op->check_hw(efx); | 578 | rc = efx->phy_op->check_hw(efx); |
@@ -569,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) | |||
569 | reset = ((flow_control & EFX_FC_TX) && | 639 | reset = ((flow_control & EFX_FC_TX) && |
570 | !(efx->flow_control & EFX_FC_TX)); | 640 | !(efx->flow_control & EFX_FC_TX)); |
571 | if (EFX_WORKAROUND_11482(efx) && reset) { | 641 | if (EFX_WORKAROUND_11482(efx) && reset) { |
572 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 642 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
573 | /* Recover by resetting the EM block */ | 643 | /* Recover by resetting the EM block */ |
574 | if (efx->link_up) | 644 | if (efx->link_up) |
575 | falcon_drain_tx_fifo(efx); | 645 | falcon_drain_tx_fifo(efx); |
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index dc06bb0aa575..c4f540e93b79 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c | |||
@@ -44,6 +44,9 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd, | |||
44 | int status; | 44 | int status; |
45 | int phy_id = efx->mii.phy_id; | 45 | int phy_id = efx->mii.phy_id; |
46 | 46 | ||
47 | if (LOOPBACK_INTERNAL(efx)) | ||
48 | return 0; | ||
49 | |||
47 | /* Read MMD STATUS2 to check it is responding. */ | 50 | /* Read MMD STATUS2 to check it is responding. */ |
48 | status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); | 51 | status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); |
49 | if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & | 52 | if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & |
@@ -164,6 +167,22 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | |||
164 | int mmd = 0; | 167 | int mmd = 0; |
165 | int good; | 168 | int good; |
166 | 169 | ||
170 | /* If the port is in loopback, then we should only consider a subset | ||
171 | * of mmd's */ | ||
172 | if (LOOPBACK_INTERNAL(efx)) | ||
173 | return 1; | ||
174 | else if (efx->loopback_mode == LOOPBACK_NETWORK) | ||
175 | return 0; | ||
176 | else if (efx->loopback_mode == LOOPBACK_PHYXS) | ||
177 | mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | | ||
178 | MDIO_MMDREG_DEVS0_PCS | | ||
179 | MDIO_MMDREG_DEVS0_PMAPMD); | ||
180 | else if (efx->loopback_mode == LOOPBACK_PCS) | ||
181 | mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS | | ||
182 | MDIO_MMDREG_DEVS0_PMAPMD); | ||
183 | else if (efx->loopback_mode == LOOPBACK_PMAPMD) | ||
184 | mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD; | ||
185 | |||
167 | while (mmd_mask) { | 186 | while (mmd_mask) { |
168 | if (mmd_mask & 1) { | 187 | if (mmd_mask & 1) { |
169 | /* Double reads because link state is latched, and a | 188 | /* Double reads because link state is latched, and a |
@@ -182,6 +201,65 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | |||
182 | return ok; | 201 | return ok; |
183 | } | 202 | } |
184 | 203 | ||
204 | void mdio_clause45_transmit_disable(struct efx_nic *efx) | ||
205 | { | ||
206 | int phy_id = efx->mii.phy_id; | ||
207 | int ctrl1, ctrl2; | ||
208 | |||
209 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | ||
210 | MDIO_MMDREG_TXDIS); | ||
211 | if (efx->tx_disabled) | ||
212 | ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); | ||
213 | else | ||
214 | ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); | ||
215 | if (ctrl1 != ctrl2) | ||
216 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, | ||
217 | MDIO_MMDREG_TXDIS, ctrl2); | ||
218 | } | ||
219 | |||
220 | void mdio_clause45_phy_reconfigure(struct efx_nic *efx) | ||
221 | { | ||
222 | int phy_id = efx->mii.phy_id; | ||
223 | int ctrl1, ctrl2; | ||
224 | |||
225 | /* Handle (with debouncing) PMA/PMD loopback */ | ||
226 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | ||
227 | MDIO_MMDREG_CTRL1); | ||
228 | |||
229 | if (efx->loopback_mode == LOOPBACK_PMAPMD) | ||
230 | ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); | ||
231 | else | ||
232 | ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); | ||
233 | |||
234 | if (ctrl1 != ctrl2) | ||
235 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, | ||
236 | MDIO_MMDREG_CTRL1, ctrl2); | ||
237 | |||
238 | /* Handle (with debouncing) PCS loopback */ | ||
239 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS, | ||
240 | MDIO_MMDREG_CTRL1); | ||
241 | if (efx->loopback_mode == LOOPBACK_PCS) | ||
242 | ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
243 | else | ||
244 | ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
245 | |||
246 | if (ctrl1 != ctrl2) | ||
247 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS, | ||
248 | MDIO_MMDREG_CTRL1, ctrl2); | ||
249 | |||
250 | /* Handle (with debouncing) PHYXS network loopback */ | ||
251 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, | ||
252 | MDIO_MMDREG_CTRL1); | ||
253 | if (efx->loopback_mode == LOOPBACK_NETWORK) | ||
254 | ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
255 | else | ||
256 | ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); | ||
257 | |||
258 | if (ctrl1 != ctrl2) | ||
259 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, | ||
260 | MDIO_MMDREG_CTRL1, ctrl2); | ||
261 | } | ||
262 | |||
185 | /** | 263 | /** |
186 | * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. | 264 | * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. |
187 | * @efx: Efx NIC | 265 | * @efx: Efx NIC |
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 2214b6d820a7..cb99f3f4491c 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h | |||
@@ -44,11 +44,16 @@ | |||
44 | #define MDIO_MMDREG_DEVS1 (6) | 44 | #define MDIO_MMDREG_DEVS1 (6) |
45 | #define MDIO_MMDREG_CTRL2 (7) | 45 | #define MDIO_MMDREG_CTRL2 (7) |
46 | #define MDIO_MMDREG_STAT2 (8) | 46 | #define MDIO_MMDREG_STAT2 (8) |
47 | #define MDIO_MMDREG_TXDIS (9) | ||
47 | 48 | ||
48 | /* Bits in MMDREG_CTRL1 */ | 49 | /* Bits in MMDREG_CTRL1 */ |
49 | /* Reset */ | 50 | /* Reset */ |
50 | #define MDIO_MMDREG_CTRL1_RESET_LBN (15) | 51 | #define MDIO_MMDREG_CTRL1_RESET_LBN (15) |
51 | #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) | 52 | #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) |
53 | /* Loopback */ | ||
54 | /* Loopback bit for WIS, PCS, PHYSX and DTEXS */ | ||
55 | #define MDIO_MMDREG_CTRL1_LBACK_LBN (14) | ||
56 | #define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1) | ||
52 | 57 | ||
53 | /* Bits in MMDREG_STAT1 */ | 58 | /* Bits in MMDREG_STAT1 */ |
54 | #define MDIO_MMDREG_STAT1_FAULT_LBN (7) | 59 | #define MDIO_MMDREG_STAT1_FAULT_LBN (7) |
@@ -56,6 +61,9 @@ | |||
56 | /* Link state */ | 61 | /* Link state */ |
57 | #define MDIO_MMDREG_STAT1_LINK_LBN (2) | 62 | #define MDIO_MMDREG_STAT1_LINK_LBN (2) |
58 | #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) | 63 | #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) |
64 | /* Low power ability */ | ||
65 | #define MDIO_MMDREG_STAT1_LPABLE_LBN (1) | ||
66 | #define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1) | ||
59 | 67 | ||
60 | /* Bits in ID reg */ | 68 | /* Bits in ID reg */ |
61 | #define MDIO_ID_REV(_id32) (_id32 & 0xf) | 69 | #define MDIO_ID_REV(_id32) (_id32 & 0xf) |
@@ -76,6 +84,14 @@ | |||
76 | #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) | 84 | #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) |
77 | #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) | 85 | #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) |
78 | 86 | ||
87 | /* Bits in MMDREG_TXDIS */ | ||
88 | #define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0) | ||
89 | #define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1) | ||
90 | |||
91 | /* MMD-specific bits, ordered by MMD, then register */ | ||
92 | #define MDIO_PMAPMD_CTRL1_LBACK_LBN (0) | ||
93 | #define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1) | ||
94 | |||
79 | /* PMA type (4 bits) */ | 95 | /* PMA type (4 bits) */ |
80 | #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) | 96 | #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) |
81 | #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) | 97 | #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) |
@@ -95,7 +111,7 @@ | |||
95 | #define MDIO_PMAPMD_CTRL2_10_BT (0xf) | 111 | #define MDIO_PMAPMD_CTRL2_10_BT (0xf) |
96 | #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) | 112 | #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) |
97 | 113 | ||
98 | /* /\* PHY XGXS lane state *\/ */ | 114 | /* PHY XGXS lane state */ |
99 | #define MDIO_PHYXS_LANE_STATE (0x18) | 115 | #define MDIO_PHYXS_LANE_STATE (0x18) |
100 | #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) | 116 | #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) |
101 | 117 | ||
@@ -217,6 +233,12 @@ int mdio_clause45_check_mmds(struct efx_nic *efx, | |||
217 | extern int mdio_clause45_links_ok(struct efx_nic *efx, | 233 | extern int mdio_clause45_links_ok(struct efx_nic *efx, |
218 | unsigned int mmd_mask); | 234 | unsigned int mmd_mask); |
219 | 235 | ||
236 | /* Generic transmit disable support though PMAPMD */ | ||
237 | extern void mdio_clause45_transmit_disable(struct efx_nic *efx); | ||
238 | |||
239 | /* Generic part of reconfigure: set/clear loopback bits */ | ||
240 | extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx); | ||
241 | |||
220 | /* Read (some of) the PHY settings over MDIO */ | 242 | /* Read (some of) the PHY settings over MDIO */ |
221 | extern void mdio_clause45_get_settings(struct efx_nic *efx, | 243 | extern void mdio_clause45_get_settings(struct efx_nic *efx, |
222 | struct ethtool_cmd *ecmd); | 244 | struct ethtool_cmd *ecmd); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index c505482c2520..5e20e7551dae 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -42,7 +42,7 @@ | |||
42 | #ifndef EFX_DRIVER_NAME | 42 | #ifndef EFX_DRIVER_NAME |
43 | #define EFX_DRIVER_NAME "sfc" | 43 | #define EFX_DRIVER_NAME "sfc" |
44 | #endif | 44 | #endif |
45 | #define EFX_DRIVER_VERSION "2.2.0136" | 45 | #define EFX_DRIVER_VERSION "2.2" |
46 | 46 | ||
47 | #ifdef EFX_ENABLE_DEBUG | 47 | #ifdef EFX_ENABLE_DEBUG |
48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | 48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) |
@@ -52,28 +52,19 @@ | |||
52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #define NET_DEV_REGISTERED(efx) \ | ||
56 | ((efx)->net_dev->reg_state == NETREG_REGISTERED) | ||
57 | |||
58 | /* Include net device name in log messages if it has been registered. | ||
59 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
60 | * are harmless. | ||
61 | */ | ||
62 | #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") | ||
63 | |||
64 | /* Un-rate-limited logging */ | 55 | /* Un-rate-limited logging */ |
65 | #define EFX_ERR(efx, fmt, args...) \ | 56 | #define EFX_ERR(efx, fmt, args...) \ |
66 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) | 57 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) |
67 | 58 | ||
68 | #define EFX_INFO(efx, fmt, args...) \ | 59 | #define EFX_INFO(efx, fmt, args...) \ |
69 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) | 60 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) |
70 | 61 | ||
71 | #ifdef EFX_ENABLE_DEBUG | 62 | #ifdef EFX_ENABLE_DEBUG |
72 | #define EFX_LOG(efx, fmt, args...) \ | 63 | #define EFX_LOG(efx, fmt, args...) \ |
73 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 64 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
74 | #else | 65 | #else |
75 | #define EFX_LOG(efx, fmt, args...) \ | 66 | #define EFX_LOG(efx, fmt, args...) \ |
76 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 67 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
77 | #endif | 68 | #endif |
78 | 69 | ||
79 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) | 70 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) |
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) | |||
90 | #define EFX_LOG_RL(efx, fmt, args...) \ | 81 | #define EFX_LOG_RL(efx, fmt, args...) \ |
91 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) | 82 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) |
92 | 83 | ||
93 | /* Kernel headers may redefine inline anyway */ | ||
94 | #ifndef inline | ||
95 | #define inline inline __attribute__ ((always_inline)) | ||
96 | #endif | ||
97 | |||
98 | /************************************************************************** | 84 | /************************************************************************** |
99 | * | 85 | * |
100 | * Efx data structures | 86 | * Efx data structures |
@@ -134,6 +120,8 @@ struct efx_special_buffer { | |||
134 | * Set only on the final fragment of a packet; %NULL for all other | 120 | * Set only on the final fragment of a packet; %NULL for all other |
135 | * fragments. When this fragment completes, then we can free this | 121 | * fragments. When this fragment completes, then we can free this |
136 | * skb. | 122 | * skb. |
123 | * @tsoh: The associated TSO header structure, or %NULL if this | ||
124 | * buffer is not a TSO header. | ||
137 | * @dma_addr: DMA address of the fragment. | 125 | * @dma_addr: DMA address of the fragment. |
138 | * @len: Length of this fragment. | 126 | * @len: Length of this fragment. |
139 | * This field is zero when the queue slot is empty. | 127 | * This field is zero when the queue slot is empty. |
@@ -144,6 +132,7 @@ struct efx_special_buffer { | |||
144 | */ | 132 | */ |
145 | struct efx_tx_buffer { | 133 | struct efx_tx_buffer { |
146 | const struct sk_buff *skb; | 134 | const struct sk_buff *skb; |
135 | struct efx_tso_header *tsoh; | ||
147 | dma_addr_t dma_addr; | 136 | dma_addr_t dma_addr; |
148 | unsigned short len; | 137 | unsigned short len; |
149 | unsigned char continuation; | 138 | unsigned char continuation; |
@@ -187,6 +176,13 @@ struct efx_tx_buffer { | |||
187 | * variable indicates that the queue is full. This is to | 176 | * variable indicates that the queue is full. This is to |
188 | * avoid cache-line ping-pong between the xmit path and the | 177 | * avoid cache-line ping-pong between the xmit path and the |
189 | * completion path. | 178 | * completion path. |
179 | * @tso_headers_free: A list of TSO headers allocated for this TX queue | ||
180 | * that are not in use, and so available for new TSO sends. The list | ||
181 | * is protected by the TX queue lock. | ||
182 | * @tso_bursts: Number of times TSO xmit invoked by kernel | ||
183 | * @tso_long_headers: Number of packets with headers too long for standard | ||
184 | * blocks | ||
185 | * @tso_packets: Number of packets via the TSO xmit path | ||
190 | */ | 186 | */ |
191 | struct efx_tx_queue { | 187 | struct efx_tx_queue { |
192 | /* Members which don't change on the fast path */ | 188 | /* Members which don't change on the fast path */ |
@@ -206,6 +202,10 @@ struct efx_tx_queue { | |||
206 | unsigned int insert_count ____cacheline_aligned_in_smp; | 202 | unsigned int insert_count ____cacheline_aligned_in_smp; |
207 | unsigned int write_count; | 203 | unsigned int write_count; |
208 | unsigned int old_read_count; | 204 | unsigned int old_read_count; |
205 | struct efx_tso_header *tso_headers_free; | ||
206 | unsigned int tso_bursts; | ||
207 | unsigned int tso_long_headers; | ||
208 | unsigned int tso_packets; | ||
209 | }; | 209 | }; |
210 | 210 | ||
211 | /** | 211 | /** |
@@ -434,6 +434,9 @@ struct efx_board { | |||
434 | struct efx_blinker blinker; | 434 | struct efx_blinker blinker; |
435 | }; | 435 | }; |
436 | 436 | ||
437 | #define STRING_TABLE_LOOKUP(val, member) \ | ||
438 | member ## _names[val] | ||
439 | |||
437 | enum efx_int_mode { | 440 | enum efx_int_mode { |
438 | /* Be careful if altering to correct macro below */ | 441 | /* Be careful if altering to correct macro below */ |
439 | EFX_INT_MODE_MSIX = 0, | 442 | EFX_INT_MODE_MSIX = 0, |
@@ -506,6 +509,7 @@ enum efx_fc_type { | |||
506 | * @check_hw: Check hardware | 509 | * @check_hw: Check hardware |
507 | * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) | 510 | * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) |
508 | * @mmds: MMD presence mask | 511 | * @mmds: MMD presence mask |
512 | * @loopbacks: Supported loopback modes mask | ||
509 | */ | 513 | */ |
510 | struct efx_phy_operations { | 514 | struct efx_phy_operations { |
511 | int (*init) (struct efx_nic *efx); | 515 | int (*init) (struct efx_nic *efx); |
@@ -515,6 +519,7 @@ struct efx_phy_operations { | |||
515 | int (*check_hw) (struct efx_nic *efx); | 519 | int (*check_hw) (struct efx_nic *efx); |
516 | void (*reset_xaui) (struct efx_nic *efx); | 520 | void (*reset_xaui) (struct efx_nic *efx); |
517 | int mmds; | 521 | int mmds; |
522 | unsigned loopbacks; | ||
518 | }; | 523 | }; |
519 | 524 | ||
520 | /* | 525 | /* |
@@ -653,7 +658,6 @@ union efx_multicast_hash { | |||
653 | * @phy_op: PHY interface | 658 | * @phy_op: PHY interface |
654 | * @phy_data: PHY private data (including PHY-specific stats) | 659 | * @phy_data: PHY private data (including PHY-specific stats) |
655 | * @mii: PHY interface | 660 | * @mii: PHY interface |
656 | * @phy_powered: PHY power state | ||
657 | * @tx_disabled: PHY transmitter turned off | 661 | * @tx_disabled: PHY transmitter turned off |
658 | * @link_up: Link status | 662 | * @link_up: Link status |
659 | * @link_options: Link options (MII/GMII format) | 663 | * @link_options: Link options (MII/GMII format) |
@@ -662,6 +666,9 @@ union efx_multicast_hash { | |||
662 | * @multicast_hash: Multicast hash table | 666 | * @multicast_hash: Multicast hash table |
663 | * @flow_control: Flow control flags - separate RX/TX so can't use link_options | 667 | * @flow_control: Flow control flags - separate RX/TX so can't use link_options |
664 | * @reconfigure_work: work item for dealing with PHY events | 668 | * @reconfigure_work: work item for dealing with PHY events |
669 | * @loopback_mode: Loopback status | ||
670 | * @loopback_modes: Supported loopback mode bitmask | ||
671 | * @loopback_selftest: Offline self-test private state | ||
665 | * | 672 | * |
666 | * The @priv field of the corresponding &struct net_device points to | 673 | * The @priv field of the corresponding &struct net_device points to |
667 | * this. | 674 | * this. |
@@ -674,7 +681,7 @@ struct efx_nic { | |||
674 | struct workqueue_struct *workqueue; | 681 | struct workqueue_struct *workqueue; |
675 | struct work_struct reset_work; | 682 | struct work_struct reset_work; |
676 | struct delayed_work monitor_work; | 683 | struct delayed_work monitor_work; |
677 | unsigned long membase_phys; | 684 | resource_size_t membase_phys; |
678 | void __iomem *membase; | 685 | void __iomem *membase; |
679 | spinlock_t biu_lock; | 686 | spinlock_t biu_lock; |
680 | enum efx_int_mode interrupt_mode; | 687 | enum efx_int_mode interrupt_mode; |
@@ -698,7 +705,7 @@ struct efx_nic { | |||
698 | 705 | ||
699 | unsigned n_rx_nodesc_drop_cnt; | 706 | unsigned n_rx_nodesc_drop_cnt; |
700 | 707 | ||
701 | void *nic_data; | 708 | struct falcon_nic_data *nic_data; |
702 | 709 | ||
703 | struct mutex mac_lock; | 710 | struct mutex mac_lock; |
704 | int port_enabled; | 711 | int port_enabled; |
@@ -721,6 +728,7 @@ struct efx_nic { | |||
721 | struct efx_phy_operations *phy_op; | 728 | struct efx_phy_operations *phy_op; |
722 | void *phy_data; | 729 | void *phy_data; |
723 | struct mii_if_info mii; | 730 | struct mii_if_info mii; |
731 | unsigned tx_disabled; | ||
724 | 732 | ||
725 | int link_up; | 733 | int link_up; |
726 | unsigned int link_options; | 734 | unsigned int link_options; |
@@ -732,8 +740,26 @@ struct efx_nic { | |||
732 | struct work_struct reconfigure_work; | 740 | struct work_struct reconfigure_work; |
733 | 741 | ||
734 | atomic_t rx_reset; | 742 | atomic_t rx_reset; |
743 | enum efx_loopback_mode loopback_mode; | ||
744 | unsigned int loopback_modes; | ||
745 | |||
746 | void *loopback_selftest; | ||
735 | }; | 747 | }; |
736 | 748 | ||
749 | static inline int efx_dev_registered(struct efx_nic *efx) | ||
750 | { | ||
751 | return efx->net_dev->reg_state == NETREG_REGISTERED; | ||
752 | } | ||
753 | |||
754 | /* Net device name, for inclusion in log messages if it has been registered. | ||
755 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
756 | * are harmless. | ||
757 | */ | ||
758 | static inline const char *efx_dev_name(struct efx_nic *efx) | ||
759 | { | ||
760 | return efx_dev_registered(efx) ? efx->name : ""; | ||
761 | } | ||
762 | |||
737 | /** | 763 | /** |
738 | * struct efx_nic_type - Efx device type definition | 764 | * struct efx_nic_type - Efx device type definition |
739 | * @mem_bar: Memory BAR number | 765 | * @mem_bar: Memory BAR number |
@@ -769,7 +795,7 @@ struct efx_nic_type { | |||
769 | unsigned int txd_ring_mask; | 795 | unsigned int txd_ring_mask; |
770 | unsigned int rxd_ring_mask; | 796 | unsigned int rxd_ring_mask; |
771 | unsigned int evq_size; | 797 | unsigned int evq_size; |
772 | dma_addr_t max_dma_mask; | 798 | u64 max_dma_mask; |
773 | unsigned int tx_dma_mask; | 799 | unsigned int tx_dma_mask; |
774 | unsigned bug5391_mask; | 800 | unsigned bug5391_mask; |
775 | 801 | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 551299b462ae..601b001437c0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "rx.h" | 19 | #include "rx.h" |
20 | #include "efx.h" | 20 | #include "efx.h" |
21 | #include "falcon.h" | 21 | #include "falcon.h" |
22 | #include "selftest.h" | ||
22 | #include "workarounds.h" | 23 | #include "workarounds.h" |
23 | 24 | ||
24 | /* Number of RX descriptors pushed at once. */ | 25 | /* Number of RX descriptors pushed at once. */ |
@@ -85,14 +86,17 @@ static unsigned int rx_refill_limit = 95; | |||
85 | */ | 86 | */ |
86 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
87 | 88 | ||
88 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
89 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
90 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | /* Offset is always within one page, so we don't need to consider |
91 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | * the page order. |
92 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | */ |
93 | 94 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); | |
94 | #define RX_PAGE_SIZE(_efx) \ | 95 | } |
95 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) |
97 | { | ||
98 | return PAGE_SIZE << efx->rx_buffer_order; | ||
99 | } | ||
96 | 100 | ||
97 | 101 | ||
98 | /************************************************************************** | 102 | /************************************************************************** |
@@ -105,7 +109,7 @@ static unsigned int rx_refill_limit = 95; | |||
105 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | 109 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, |
106 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | 110 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) |
107 | { | 111 | { |
108 | struct efx_channel *channel = (struct efx_channel *)priv; | 112 | struct efx_channel *channel = priv; |
109 | struct iphdr *iph; | 113 | struct iphdr *iph; |
110 | struct tcphdr *th; | 114 | struct tcphdr *th; |
111 | 115 | ||
@@ -130,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | |||
130 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | 134 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, |
131 | void *priv) | 135 | void *priv) |
132 | { | 136 | { |
133 | struct efx_channel *channel = (struct efx_channel *)priv; | 137 | struct efx_channel *channel = priv; |
134 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
135 | struct iphdr *iph; | 139 | struct iphdr *iph; |
136 | 140 | ||
137 | /* We support EtherII and VLAN encapsulated IPv4 */ | 141 | /* We support EtherII and VLAN encapsulated IPv4 */ |
138 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | 142 | eh = page_address(frag->page) + frag->page_offset; |
139 | *mac_hdr = eh; | 143 | *mac_hdr = eh; |
140 | 144 | ||
141 | if (eh->h_proto == htons(ETH_P_IP)) { | 145 | if (eh->h_proto == htons(ETH_P_IP)) { |
@@ -268,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
268 | return -ENOMEM; | 272 | return -ENOMEM; |
269 | 273 | ||
270 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 274 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
271 | 0, RX_PAGE_SIZE(efx), | 275 | 0, efx_rx_buf_size(efx), |
272 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
273 | 277 | ||
274 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
@@ -279,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
279 | 283 | ||
280 | rx_queue->buf_page = rx_buf->page; | 284 | rx_queue->buf_page = rx_buf->page; |
281 | rx_queue->buf_dma_addr = dma_addr; | 285 | rx_queue->buf_dma_addr = dma_addr; |
282 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | 286 | rx_queue->buf_data = (page_address(rx_buf->page) + |
283 | EFX_PAGE_IP_ALIGN); | 287 | EFX_PAGE_IP_ALIGN); |
284 | } | 288 | } |
285 | 289 | ||
286 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | ||
287 | rx_buf->len = bytes; | 290 | rx_buf->len = bytes; |
288 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
289 | rx_buf->data = rx_queue->buf_data; | 291 | rx_buf->data = rx_queue->buf_data; |
292 | offset = efx_rx_buf_offset(rx_buf); | ||
293 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
290 | 294 | ||
291 | /* Try to pack multiple buffers per page */ | 295 | /* Try to pack multiple buffers per page */ |
292 | if (efx->rx_buffer_order == 0) { | 296 | if (efx->rx_buffer_order == 0) { |
@@ -294,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
294 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 298 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
295 | offset += ((bytes + 0x1ff) & ~0x1ff); | 299 | offset += ((bytes + 0x1ff) & ~0x1ff); |
296 | 300 | ||
297 | space = RX_PAGE_SIZE(efx) - offset; | 301 | space = efx_rx_buf_size(efx) - offset; |
298 | if (space >= bytes) { | 302 | if (space >= bytes) { |
299 | /* Refs dropped on kernel releasing each skb */ | 303 | /* Refs dropped on kernel releasing each skb */ |
300 | get_page(rx_queue->buf_page); | 304 | get_page(rx_queue->buf_page); |
@@ -343,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
343 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 347 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
344 | if (rx_buf->unmap_addr) { | 348 | if (rx_buf->unmap_addr) { |
345 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 349 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
346 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 350 | efx_rx_buf_size(efx), |
351 | PCI_DMA_FROMDEVICE); | ||
347 | rx_buf->unmap_addr = 0; | 352 | rx_buf->unmap_addr = 0; |
348 | } | 353 | } |
349 | } else if (likely(rx_buf->skb)) { | 354 | } else if (likely(rx_buf->skb)) { |
@@ -399,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
399 | return 0; | 404 | return 0; |
400 | 405 | ||
401 | /* Record minimum fill level */ | 406 | /* Record minimum fill level */ |
402 | if (unlikely(fill_level < rx_queue->min_fill)) | 407 | if (unlikely(fill_level < rx_queue->min_fill)) { |
403 | if (fill_level) | 408 | if (fill_level) |
404 | rx_queue->min_fill = fill_level; | 409 | rx_queue->min_fill = fill_level; |
410 | } | ||
405 | 411 | ||
406 | /* Acquire RX add lock. If this lock is contended, then a fast | 412 | /* Acquire RX add lock. If this lock is contended, then a fast |
407 | * fill must already be in progress (e.g. in the refill | 413 | * fill must already be in progress (e.g. in the refill |
@@ -551,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
551 | struct skb_frag_struct frags; | 557 | struct skb_frag_struct frags; |
552 | 558 | ||
553 | frags.page = rx_buf->page; | 559 | frags.page = rx_buf->page; |
554 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 560 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
555 | frags.size = rx_buf->len; | 561 | frags.size = rx_buf->len; |
556 | 562 | ||
557 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 563 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
@@ -596,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
596 | if (unlikely(rx_buf->len > hdr_len)) { | 602 | if (unlikely(rx_buf->len > hdr_len)) { |
597 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 603 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
598 | frag->page = rx_buf->page; | 604 | frag->page = rx_buf->page; |
599 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 605 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
600 | frag->size = skb->len - hdr_len; | 606 | frag->size = skb->len - hdr_len; |
601 | skb_shinfo(skb)->nr_frags = 1; | 607 | skb_shinfo(skb)->nr_frags = 1; |
602 | skb->data_len = frag->size; | 608 | skb->data_len = frag->size; |
@@ -683,6 +689,15 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
683 | struct sk_buff *skb; | 689 | struct sk_buff *skb; |
684 | int lro = efx->net_dev->features & NETIF_F_LRO; | 690 | int lro = efx->net_dev->features & NETIF_F_LRO; |
685 | 691 | ||
692 | /* If we're in loopback test, then pass the packet directly to the | ||
693 | * loopback layer, and free the rx_buf here | ||
694 | */ | ||
695 | if (unlikely(efx->loopback_selftest)) { | ||
696 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | ||
697 | efx_free_rx_buffer(efx, rx_buf); | ||
698 | goto done; | ||
699 | } | ||
700 | |||
686 | if (rx_buf->skb) { | 701 | if (rx_buf->skb) { |
687 | prefetch(skb_shinfo(rx_buf->skb)); | 702 | prefetch(skb_shinfo(rx_buf->skb)); |
688 | 703 | ||
@@ -736,7 +751,6 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
736 | /* Update allocation strategy method */ | 751 | /* Update allocation strategy method */ |
737 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | 752 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; |
738 | 753 | ||
739 | /* fall-thru */ | ||
740 | done: | 754 | done: |
741 | efx->net_dev->last_rx = jiffies; | 755 | efx->net_dev->last_rx = jiffies; |
742 | } | 756 | } |
@@ -842,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
842 | /* For a page that is part-way through splitting into RX buffers */ | 856 | /* For a page that is part-way through splitting into RX buffers */ |
843 | if (rx_queue->buf_page != NULL) { | 857 | if (rx_queue->buf_page != NULL) { |
844 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 858 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
845 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 859 | efx_rx_buf_size(rx_queue->efx), |
860 | PCI_DMA_FROMDEVICE); | ||
846 | __free_pages(rx_queue->buf_page, | 861 | __free_pages(rx_queue->buf_page, |
847 | rx_queue->efx->rx_buffer_order); | 862 | rx_queue->efx->rx_buffer_order); |
848 | rx_queue->buf_page = NULL; | 863 | rx_queue->buf_page = NULL; |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c new file mode 100644 index 000000000000..3b2de9fe7f27 --- /dev/null +++ b/drivers/net/sfc/selftest.c | |||
@@ -0,0 +1,719 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #include <linux/netdevice.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/ethtool.h> | ||
17 | #include <linux/ip.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <linux/udp.h> | ||
20 | #include <linux/rtnetlink.h> | ||
21 | #include <asm/io.h> | ||
22 | #include "net_driver.h" | ||
23 | #include "ethtool.h" | ||
24 | #include "efx.h" | ||
25 | #include "falcon.h" | ||
26 | #include "selftest.h" | ||
27 | #include "boards.h" | ||
28 | #include "workarounds.h" | ||
29 | #include "mac.h" | ||
30 | |||
31 | /* | ||
32 | * Loopback test packet structure | ||
33 | * | ||
34 | * The self-test should stress every RSS vector, and unfortunately | ||
35 | * Falcon only performs RSS on TCP/UDP packets. | ||
36 | */ | ||
37 | struct efx_loopback_payload { | ||
38 | struct ethhdr header; | ||
39 | struct iphdr ip; | ||
40 | struct udphdr udp; | ||
41 | __be16 iteration; | ||
42 | const char msg[64]; | ||
43 | } __attribute__ ((packed)); | ||
44 | |||
45 | /* Loopback test source MAC address */ | ||
46 | static const unsigned char payload_source[ETH_ALEN] = { | ||
47 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, | ||
48 | }; | ||
49 | |||
50 | static const char *payload_msg = | ||
51 | "Hello world! This is an Efx loopback test in progress!"; | ||
52 | |||
53 | /** | ||
54 | * efx_selftest_state - persistent state during a selftest | ||
55 | * @flush: Drop all packets in efx_loopback_rx_packet | ||
56 | * @packet_count: Number of packets being used in this test | ||
57 | * @skbs: An array of skbs transmitted | ||
58 | * @rx_good: RX good packet count | ||
59 | * @rx_bad: RX bad packet count | ||
60 | * @payload: Payload used in tests | ||
61 | */ | ||
62 | struct efx_selftest_state { | ||
63 | int flush; | ||
64 | int packet_count; | ||
65 | struct sk_buff **skbs; | ||
66 | atomic_t rx_good; | ||
67 | atomic_t rx_bad; | ||
68 | struct efx_loopback_payload payload; | ||
69 | }; | ||
70 | |||
71 | /************************************************************************** | ||
72 | * | ||
73 | * Configurable values | ||
74 | * | ||
75 | **************************************************************************/ | ||
76 | |||
77 | /* Level of loopback testing | ||
78 | * | ||
79 | * The maximum packet burst length is 16**(n-1), i.e. | ||
80 | * | ||
81 | * - Level 0 : no packets | ||
82 | * - Level 1 : 1 packet | ||
83 | * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) | ||
84 | * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) | ||
85 | * | ||
86 | */ | ||
87 | static unsigned int loopback_test_level = 3; | ||
88 | |||
89 | /************************************************************************** | ||
90 | * | ||
91 | * Interrupt and event queue testing | ||
92 | * | ||
93 | **************************************************************************/ | ||
94 | |||
95 | /* Test generation and receipt of interrupts */ | ||
96 | static int efx_test_interrupts(struct efx_nic *efx, | ||
97 | struct efx_self_tests *tests) | ||
98 | { | ||
99 | struct efx_channel *channel; | ||
100 | |||
101 | EFX_LOG(efx, "testing interrupts\n"); | ||
102 | tests->interrupt = -1; | ||
103 | |||
104 | /* Reset interrupt flag */ | ||
105 | efx->last_irq_cpu = -1; | ||
106 | smp_wmb(); | ||
107 | |||
108 | /* ACK each interrupting event queue. Receiving an interrupt due to | ||
109 | * traffic before a test event is raised is considered a pass */ | ||
110 | efx_for_each_channel_with_interrupt(channel, efx) { | ||
111 | if (channel->work_pending) | ||
112 | efx_process_channel_now(channel); | ||
113 | if (efx->last_irq_cpu >= 0) | ||
114 | goto success; | ||
115 | } | ||
116 | |||
117 | falcon_generate_interrupt(efx); | ||
118 | |||
119 | /* Wait for arrival of test interrupt. */ | ||
120 | EFX_LOG(efx, "waiting for test interrupt\n"); | ||
121 | schedule_timeout_uninterruptible(HZ / 10); | ||
122 | if (efx->last_irq_cpu >= 0) | ||
123 | goto success; | ||
124 | |||
125 | EFX_ERR(efx, "timed out waiting for interrupt\n"); | ||
126 | return -ETIMEDOUT; | ||
127 | |||
128 | success: | ||
129 | EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", | ||
130 | efx->interrupt_mode, efx->last_irq_cpu); | ||
131 | tests->interrupt = 1; | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* Test generation and receipt of non-interrupting events */ | ||
136 | static int efx_test_eventq(struct efx_channel *channel, | ||
137 | struct efx_self_tests *tests) | ||
138 | { | ||
139 | unsigned int magic; | ||
140 | |||
141 | /* Channel specific code, limited to 20 bits */ | ||
142 | magic = (0x00010150 + channel->channel); | ||
143 | EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", | ||
144 | channel->channel, magic); | ||
145 | |||
146 | tests->eventq_dma[channel->channel] = -1; | ||
147 | tests->eventq_int[channel->channel] = 1; /* fake pass */ | ||
148 | tests->eventq_poll[channel->channel] = 1; /* fake pass */ | ||
149 | |||
150 | /* Reset flag and zero magic word */ | ||
151 | channel->efx->last_irq_cpu = -1; | ||
152 | channel->eventq_magic = 0; | ||
153 | smp_wmb(); | ||
154 | |||
155 | falcon_generate_test_event(channel, magic); | ||
156 | udelay(1); | ||
157 | |||
158 | efx_process_channel_now(channel); | ||
159 | if (channel->eventq_magic != magic) { | ||
160 | EFX_ERR(channel->efx, "channel %d failed to see test event\n", | ||
161 | channel->channel); | ||
162 | return -ETIMEDOUT; | ||
163 | } else { | ||
164 | tests->eventq_dma[channel->channel] = 1; | ||
165 | } | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /* Test generation and receipt of interrupting events */ | ||
171 | static int efx_test_eventq_irq(struct efx_channel *channel, | ||
172 | struct efx_self_tests *tests) | ||
173 | { | ||
174 | unsigned int magic, count; | ||
175 | |||
176 | /* Channel specific code, limited to 20 bits */ | ||
177 | magic = (0x00010150 + channel->channel); | ||
178 | EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", | ||
179 | channel->channel, magic); | ||
180 | |||
181 | tests->eventq_dma[channel->channel] = -1; | ||
182 | tests->eventq_int[channel->channel] = -1; | ||
183 | tests->eventq_poll[channel->channel] = -1; | ||
184 | |||
185 | /* Reset flag and zero magic word */ | ||
186 | channel->efx->last_irq_cpu = -1; | ||
187 | channel->eventq_magic = 0; | ||
188 | smp_wmb(); | ||
189 | |||
190 | falcon_generate_test_event(channel, magic); | ||
191 | |||
192 | /* Wait for arrival of interrupt */ | ||
193 | count = 0; | ||
194 | do { | ||
195 | schedule_timeout_uninterruptible(HZ / 100); | ||
196 | |||
197 | if (channel->work_pending) | ||
198 | efx_process_channel_now(channel); | ||
199 | |||
200 | if (channel->eventq_magic == magic) | ||
201 | goto eventq_ok; | ||
202 | } while (++count < 2); | ||
203 | |||
204 | EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", | ||
205 | channel->channel); | ||
206 | |||
207 | /* See if interrupt arrived */ | ||
208 | if (channel->efx->last_irq_cpu >= 0) { | ||
209 | EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " | ||
210 | "during event queue test\n", channel->channel, | ||
211 | raw_smp_processor_id()); | ||
212 | tests->eventq_int[channel->channel] = 1; | ||
213 | } | ||
214 | |||
215 | /* Check to see if event was received even if interrupt wasn't */ | ||
216 | efx_process_channel_now(channel); | ||
217 | if (channel->eventq_magic == magic) { | ||
218 | EFX_ERR(channel->efx, "channel %d event was generated, but " | ||
219 | "failed to trigger an interrupt\n", channel->channel); | ||
220 | tests->eventq_dma[channel->channel] = 1; | ||
221 | } | ||
222 | |||
223 | return -ETIMEDOUT; | ||
224 | eventq_ok: | ||
225 | EFX_LOG(channel->efx, "channel %d event queue passed\n", | ||
226 | channel->channel); | ||
227 | tests->eventq_dma[channel->channel] = 1; | ||
228 | tests->eventq_int[channel->channel] = 1; | ||
229 | tests->eventq_poll[channel->channel] = 1; | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | /************************************************************************** | ||
234 | * | ||
235 | * PHY testing | ||
236 | * | ||
237 | **************************************************************************/ | ||
238 | |||
239 | /* Check PHY presence by reading the PHY ID registers */ | ||
240 | static int efx_test_phy(struct efx_nic *efx, | ||
241 | struct efx_self_tests *tests) | ||
242 | { | ||
243 | u16 physid1, physid2; | ||
244 | struct mii_if_info *mii = &efx->mii; | ||
245 | struct net_device *net_dev = efx->net_dev; | ||
246 | |||
247 | if (efx->phy_type == PHY_TYPE_NONE) | ||
248 | return 0; | ||
249 | |||
250 | EFX_LOG(efx, "testing PHY presence\n"); | ||
251 | tests->phy_ok = -1; | ||
252 | |||
253 | physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); | ||
254 | physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); | ||
255 | |||
256 | if ((physid1 != 0x0000) && (physid1 != 0xffff) && | ||
257 | (physid2 != 0x0000) && (physid2 != 0xffff)) { | ||
258 | EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n", | ||
259 | mii->phy_id, physid1, physid2); | ||
260 | tests->phy_ok = 1; | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id); | ||
265 | return -ENODEV; | ||
266 | } | ||
267 | |||
268 | /************************************************************************** | ||
269 | * | ||
270 | * Loopback testing | ||
271 | * NB Only one loopback test can be executing concurrently. | ||
272 | * | ||
273 | **************************************************************************/ | ||
274 | |||
275 | /* Loopback test RX callback | ||
276 | * This is called for each received packet during loopback testing. | ||
277 | */ | ||
278 | void efx_loopback_rx_packet(struct efx_nic *efx, | ||
279 | const char *buf_ptr, int pkt_len) | ||
280 | { | ||
281 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
282 | struct efx_loopback_payload *received; | ||
283 | struct efx_loopback_payload *payload; | ||
284 | |||
285 | BUG_ON(!buf_ptr); | ||
286 | |||
287 | /* If we are just flushing, then drop the packet */ | ||
288 | if ((state == NULL) || state->flush) | ||
289 | return; | ||
290 | |||
291 | payload = &state->payload; | ||
292 | |||
293 | received = (struct efx_loopback_payload *) buf_ptr; | ||
294 | received->ip.saddr = payload->ip.saddr; | ||
295 | received->ip.check = payload->ip.check; | ||
296 | |||
297 | /* Check that header exists */ | ||
298 | if (pkt_len < sizeof(received->header)) { | ||
299 | EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " | ||
300 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | ||
301 | goto err; | ||
302 | } | ||
303 | |||
304 | /* Check that the ethernet header exists */ | ||
305 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { | ||
306 | EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", | ||
307 | LOOPBACK_MODE(efx)); | ||
308 | goto err; | ||
309 | } | ||
310 | |||
311 | /* Check packet length */ | ||
312 | if (pkt_len != sizeof(*payload)) { | ||
313 | EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " | ||
314 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), | ||
315 | LOOPBACK_MODE(efx)); | ||
316 | goto err; | ||
317 | } | ||
318 | |||
319 | /* Check that IP header matches */ | ||
320 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { | ||
321 | EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", | ||
322 | LOOPBACK_MODE(efx)); | ||
323 | goto err; | ||
324 | } | ||
325 | |||
326 | /* Check that msg and padding matches */ | ||
327 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { | ||
328 | EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", | ||
329 | LOOPBACK_MODE(efx)); | ||
330 | goto err; | ||
331 | } | ||
332 | |||
333 | /* Check that iteration matches */ | ||
334 | if (received->iteration != payload->iteration) { | ||
335 | EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " | ||
336 | "%s loopback test\n", ntohs(received->iteration), | ||
337 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); | ||
338 | goto err; | ||
339 | } | ||
340 | |||
341 | /* Increase correct RX count */ | ||
342 | EFX_TRACE(efx, "got loopback RX in %s loopback test\n", | ||
343 | LOOPBACK_MODE(efx)); | ||
344 | |||
345 | atomic_inc(&state->rx_good); | ||
346 | return; | ||
347 | |||
348 | err: | ||
349 | #ifdef EFX_ENABLE_DEBUG | ||
350 | if (atomic_read(&state->rx_bad) == 0) { | ||
351 | EFX_ERR(efx, "received packet:\n"); | ||
352 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
353 | buf_ptr, pkt_len, 0); | ||
354 | EFX_ERR(efx, "expected packet:\n"); | ||
355 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | ||
356 | &state->payload, sizeof(state->payload), 0); | ||
357 | } | ||
358 | #endif | ||
359 | atomic_inc(&state->rx_bad); | ||
360 | } | ||
361 | |||
362 | /* Initialise an efx_selftest_state for a new iteration */ | ||
363 | static void efx_iterate_state(struct efx_nic *efx) | ||
364 | { | ||
365 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
366 | struct net_device *net_dev = efx->net_dev; | ||
367 | struct efx_loopback_payload *payload = &state->payload; | ||
368 | |||
369 | /* Initialise the layerII header */ | ||
370 | memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); | ||
371 | memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); | ||
372 | payload->header.h_proto = htons(ETH_P_IP); | ||
373 | |||
374 | /* saddr set later and used as incrementing count */ | ||
375 | payload->ip.daddr = htonl(INADDR_LOOPBACK); | ||
376 | payload->ip.ihl = 5; | ||
377 | payload->ip.check = htons(0xdead); | ||
378 | payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); | ||
379 | payload->ip.version = IPVERSION; | ||
380 | payload->ip.protocol = IPPROTO_UDP; | ||
381 | |||
382 | /* Initialise udp header */ | ||
383 | payload->udp.source = 0; | ||
384 | payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - | ||
385 | sizeof(struct iphdr)); | ||
386 | payload->udp.check = 0; /* checksum ignored */ | ||
387 | |||
388 | /* Fill out payload */ | ||
389 | payload->iteration = htons(ntohs(payload->iteration) + 1); | ||
390 | memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); | ||
391 | |||
392 | /* Fill out remaining state members */ | ||
393 | atomic_set(&state->rx_good, 0); | ||
394 | atomic_set(&state->rx_bad, 0); | ||
395 | smp_wmb(); | ||
396 | } | ||
397 | |||
398 | static int efx_tx_loopback(struct efx_tx_queue *tx_queue) | ||
399 | { | ||
400 | struct efx_nic *efx = tx_queue->efx; | ||
401 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
402 | struct efx_loopback_payload *payload; | ||
403 | struct sk_buff *skb; | ||
404 | int i, rc; | ||
405 | |||
406 | /* Transmit N copies of buffer */ | ||
407 | for (i = 0; i < state->packet_count; i++) { | ||
408 | /* Allocate an skb, holding an extra reference for | ||
409 | * transmit completion counting */ | ||
410 | skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); | ||
411 | if (!skb) | ||
412 | return -ENOMEM; | ||
413 | state->skbs[i] = skb; | ||
414 | skb_get(skb); | ||
415 | |||
416 | /* Copy the payload in, incrementing the source address to | ||
417 | * exercise the rss vectors */ | ||
418 | payload = ((struct efx_loopback_payload *) | ||
419 | skb_put(skb, sizeof(state->payload))); | ||
420 | memcpy(payload, &state->payload, sizeof(state->payload)); | ||
421 | payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); | ||
422 | |||
423 | /* Ensure everything we've written is visible to the | ||
424 | * interrupt handler. */ | ||
425 | smp_wmb(); | ||
426 | |||
427 | if (efx_dev_registered(efx)) | ||
428 | netif_tx_lock_bh(efx->net_dev); | ||
429 | rc = efx_xmit(efx, tx_queue, skb); | ||
430 | if (efx_dev_registered(efx)) | ||
431 | netif_tx_unlock_bh(efx->net_dev); | ||
432 | |||
433 | if (rc != NETDEV_TX_OK) { | ||
434 | EFX_ERR(efx, "TX queue %d could not transmit packet %d " | ||
435 | "of %d in %s loopback test\n", tx_queue->queue, | ||
436 | i + 1, state->packet_count, LOOPBACK_MODE(efx)); | ||
437 | |||
438 | /* Defer cleaning up the other skbs for the caller */ | ||
439 | kfree_skb(skb); | ||
440 | return -EPIPE; | ||
441 | } | ||
442 | } | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | ||
448 | struct efx_loopback_self_tests *lb_tests) | ||
449 | { | ||
450 | struct efx_nic *efx = tx_queue->efx; | ||
451 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
452 | struct sk_buff *skb; | ||
453 | int tx_done = 0, rx_good, rx_bad; | ||
454 | int i, rc = 0; | ||
455 | |||
456 | if (efx_dev_registered(efx)) | ||
457 | netif_tx_lock_bh(efx->net_dev); | ||
458 | |||
459 | /* Count the number of tx completions, and decrement the refcnt. Any | ||
460 | * skbs not already completed will be free'd when the queue is flushed */ | ||
461 | for (i=0; i < state->packet_count; i++) { | ||
462 | skb = state->skbs[i]; | ||
463 | if (skb && !skb_shared(skb)) | ||
464 | ++tx_done; | ||
465 | dev_kfree_skb_any(skb); | ||
466 | } | ||
467 | |||
468 | if (efx_dev_registered(efx)) | ||
469 | netif_tx_unlock_bh(efx->net_dev); | ||
470 | |||
471 | /* Check TX completion and received packet counts */ | ||
472 | rx_good = atomic_read(&state->rx_good); | ||
473 | rx_bad = atomic_read(&state->rx_bad); | ||
474 | if (tx_done != state->packet_count) { | ||
475 | /* Don't free the skbs; they will be picked up on TX | ||
476 | * overflow or channel teardown. | ||
477 | */ | ||
478 | EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " | ||
479 | "TX completion events in %s loopback test\n", | ||
480 | tx_queue->queue, tx_done, state->packet_count, | ||
481 | LOOPBACK_MODE(efx)); | ||
482 | rc = -ETIMEDOUT; | ||
483 | /* Allow to fall through so we see the RX errors as well */ | ||
484 | } | ||
485 | |||
486 | /* We may always be up to a flush away from our desired packet total */ | ||
487 | if (rx_good != state->packet_count) { | ||
488 | EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " | ||
489 | "received packets in %s loopback test\n", | ||
490 | tx_queue->queue, rx_good, state->packet_count, | ||
491 | LOOPBACK_MODE(efx)); | ||
492 | rc = -ETIMEDOUT; | ||
493 | /* Fall through */ | ||
494 | } | ||
495 | |||
496 | /* Update loopback test structure */ | ||
497 | lb_tests->tx_sent[tx_queue->queue] += state->packet_count; | ||
498 | lb_tests->tx_done[tx_queue->queue] += tx_done; | ||
499 | lb_tests->rx_good += rx_good; | ||
500 | lb_tests->rx_bad += rx_bad; | ||
501 | |||
502 | return rc; | ||
503 | } | ||
504 | |||
505 | static int | ||
506 | efx_test_loopback(struct efx_tx_queue *tx_queue, | ||
507 | struct efx_loopback_self_tests *lb_tests) | ||
508 | { | ||
509 | struct efx_nic *efx = tx_queue->efx; | ||
510 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
511 | struct efx_channel *channel; | ||
512 | int i, rc = 0; | ||
513 | |||
514 | for (i = 0; i < loopback_test_level; i++) { | ||
515 | /* Determine how many packets to send */ | ||
516 | state->packet_count = (efx->type->txd_ring_mask + 1) / 3; | ||
517 | state->packet_count = min(1 << (i << 2), state->packet_count); | ||
518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | ||
519 | state->packet_count, GFP_KERNEL); | ||
520 | if (!state->skbs) | ||
521 | return -ENOMEM; | ||
522 | state->flush = 0; | ||
523 | |||
524 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " | ||
525 | "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | ||
526 | state->packet_count); | ||
527 | |||
528 | efx_iterate_state(efx); | ||
529 | rc = efx_tx_loopback(tx_queue); | ||
530 | |||
531 | /* NAPI polling is not enabled, so process channels synchronously */ | ||
532 | schedule_timeout_uninterruptible(HZ / 50); | ||
533 | efx_for_each_channel_with_interrupt(channel, efx) { | ||
534 | if (channel->work_pending) | ||
535 | efx_process_channel_now(channel); | ||
536 | } | ||
537 | |||
538 | rc |= efx_rx_loopback(tx_queue, lb_tests); | ||
539 | kfree(state->skbs); | ||
540 | |||
541 | if (rc) { | ||
542 | /* Wait a while to ensure there are no packets | ||
543 | * floating around after a failure. */ | ||
544 | schedule_timeout_uninterruptible(HZ / 10); | ||
545 | return rc; | ||
546 | } | ||
547 | } | ||
548 | |||
549 | EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " | ||
550 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | ||
551 | state->packet_count); | ||
552 | |||
553 | return rc; | ||
554 | } | ||
555 | |||
556 | static int efx_test_loopbacks(struct efx_nic *efx, | ||
557 | struct efx_self_tests *tests, | ||
558 | unsigned int loopback_modes) | ||
559 | { | ||
560 | struct efx_selftest_state *state = efx->loopback_selftest; | ||
561 | struct ethtool_cmd ecmd, ecmd_loopback; | ||
562 | struct efx_tx_queue *tx_queue; | ||
563 | enum efx_loopback_mode old_mode, mode; | ||
564 | int count, rc = 0, link_up; | ||
565 | |||
566 | rc = efx_ethtool_get_settings(efx->net_dev, &ecmd); | ||
567 | if (rc) { | ||
568 | EFX_ERR(efx, "could not get GMII settings\n"); | ||
569 | return rc; | ||
570 | } | ||
571 | old_mode = efx->loopback_mode; | ||
572 | |||
573 | /* Disable autonegotiation for the purposes of loopback */ | ||
574 | memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback)); | ||
575 | if (ecmd_loopback.autoneg == AUTONEG_ENABLE) { | ||
576 | ecmd_loopback.autoneg = AUTONEG_DISABLE; | ||
577 | ecmd_loopback.duplex = DUPLEX_FULL; | ||
578 | ecmd_loopback.speed = SPEED_10000; | ||
579 | } | ||
580 | |||
581 | rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); | ||
582 | if (rc) { | ||
583 | EFX_ERR(efx, "could not disable autonegotiation\n"); | ||
584 | goto out; | ||
585 | } | ||
586 | tests->loopback_speed = ecmd_loopback.speed; | ||
587 | tests->loopback_full_duplex = ecmd_loopback.duplex; | ||
588 | |||
589 | /* Test all supported loopback modes */ | ||
590 | for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { | ||
591 | if (!(loopback_modes & (1 << mode))) | ||
592 | continue; | ||
593 | |||
594 | /* Move the port into the specified loopback mode. */ | ||
595 | state->flush = 1; | ||
596 | efx->loopback_mode = mode; | ||
597 | efx_reconfigure_port(efx); | ||
598 | |||
599 | /* Wait for the PHY to signal the link is up */ | ||
600 | count = 0; | ||
601 | do { | ||
602 | struct efx_channel *channel = &efx->channel[0]; | ||
603 | |||
604 | falcon_check_xmac(efx); | ||
605 | schedule_timeout_uninterruptible(HZ / 10); | ||
606 | if (channel->work_pending) | ||
607 | efx_process_channel_now(channel); | ||
608 | /* Wait for PHY events to be processed */ | ||
609 | flush_workqueue(efx->workqueue); | ||
610 | rmb(); | ||
611 | |||
612 | /* efx->link_up can be 1 even if the XAUI link is down, | ||
613 | * (bug5762). Usually, it's not worth bothering with the | ||
614 | * difference, but for selftests, we need that extra | ||
615 | * guarantee that the link is really, really, up. | ||
616 | */ | ||
617 | link_up = efx->link_up; | ||
618 | if (!falcon_xaui_link_ok(efx)) | ||
619 | link_up = 0; | ||
620 | |||
621 | } while ((++count < 20) && !link_up); | ||
622 | |||
623 | /* The link should now be up. If it isn't, there is no point | ||
624 | * in attempting a loopback test */ | ||
625 | if (!link_up) { | ||
626 | EFX_ERR(efx, "loopback %s never came up\n", | ||
627 | LOOPBACK_MODE(efx)); | ||
628 | rc = -EIO; | ||
629 | goto out; | ||
630 | } | ||
631 | |||
632 | EFX_LOG(efx, "link came up in %s loopback in %d iterations\n", | ||
633 | LOOPBACK_MODE(efx), count); | ||
634 | |||
635 | /* Test every TX queue */ | ||
636 | efx_for_each_tx_queue(tx_queue, efx) { | ||
637 | rc |= efx_test_loopback(tx_queue, | ||
638 | &tests->loopback[mode]); | ||
639 | if (rc) | ||
640 | goto out; | ||
641 | } | ||
642 | } | ||
643 | |||
644 | out: | ||
645 | /* Take out of loopback and restore PHY settings */ | ||
646 | state->flush = 1; | ||
647 | efx->loopback_mode = old_mode; | ||
648 | efx_ethtool_set_settings(efx->net_dev, &ecmd); | ||
649 | |||
650 | return rc; | ||
651 | } | ||
652 | |||
653 | /************************************************************************** | ||
654 | * | ||
655 | * Entry points | ||
656 | * | ||
657 | *************************************************************************/ | ||
658 | |||
659 | /* Online (i.e. non-disruptive) testing | ||
660 | * This checks interrupt generation, event delivery and PHY presence. */ | ||
661 | int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) | ||
662 | { | ||
663 | struct efx_channel *channel; | ||
664 | int rc = 0; | ||
665 | |||
666 | EFX_LOG(efx, "performing online self-tests\n"); | ||
667 | |||
668 | rc |= efx_test_interrupts(efx, tests); | ||
669 | efx_for_each_channel(channel, efx) { | ||
670 | if (channel->has_interrupt) | ||
671 | rc |= efx_test_eventq_irq(channel, tests); | ||
672 | else | ||
673 | rc |= efx_test_eventq(channel, tests); | ||
674 | } | ||
675 | rc |= efx_test_phy(efx, tests); | ||
676 | |||
677 | if (rc) | ||
678 | EFX_ERR(efx, "failed online self-tests\n"); | ||
679 | |||
680 | return rc; | ||
681 | } | ||
682 | |||
683 | /* Offline (i.e. disruptive) testing | ||
684 | * This checks MAC and PHY loopback on the specified port. */ | ||
685 | int efx_offline_test(struct efx_nic *efx, | ||
686 | struct efx_self_tests *tests, unsigned int loopback_modes) | ||
687 | { | ||
688 | struct efx_selftest_state *state; | ||
689 | int rc = 0; | ||
690 | |||
691 | EFX_LOG(efx, "performing offline self-tests\n"); | ||
692 | |||
693 | /* Create a selftest_state structure to hold state for the test */ | ||
694 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
695 | if (state == NULL) { | ||
696 | rc = -ENOMEM; | ||
697 | goto out; | ||
698 | } | ||
699 | |||
700 | /* Set the port loopback_selftest member. From this point on | ||
701 | * all received packets will be dropped. Mark the state as | ||
702 | * "flushing" so all inflight packets are dropped */ | ||
703 | BUG_ON(efx->loopback_selftest); | ||
704 | state->flush = 1; | ||
705 | efx->loopback_selftest = state; | ||
706 | |||
707 | rc = efx_test_loopbacks(efx, tests, loopback_modes); | ||
708 | |||
709 | efx->loopback_selftest = NULL; | ||
710 | wmb(); | ||
711 | kfree(state); | ||
712 | |||
713 | out: | ||
714 | if (rc) | ||
715 | EFX_ERR(efx, "failed offline self-tests\n"); | ||
716 | |||
717 | return rc; | ||
718 | } | ||
719 | |||
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h new file mode 100644 index 000000000000..f6999c2b622d --- /dev/null +++ b/drivers/net/sfc/selftest.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /**************************************************************************** | ||
2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation, incorporated herein by reference. | ||
9 | */ | ||
10 | |||
11 | #ifndef EFX_SELFTEST_H | ||
12 | #define EFX_SELFTEST_H | ||
13 | |||
14 | #include "net_driver.h" | ||
15 | |||
16 | /* | ||
17 | * Self tests | ||
18 | */ | ||
19 | |||
20 | struct efx_loopback_self_tests { | ||
21 | int tx_sent[EFX_MAX_TX_QUEUES]; | ||
22 | int tx_done[EFX_MAX_TX_QUEUES]; | ||
23 | int rx_good; | ||
24 | int rx_bad; | ||
25 | }; | ||
26 | |||
27 | /* Efx self test results | ||
28 | * For fields which are not counters, 1 indicates success and -1 | ||
29 | * indicates failure. | ||
30 | */ | ||
31 | struct efx_self_tests { | ||
32 | int interrupt; | ||
33 | int eventq_dma[EFX_MAX_CHANNELS]; | ||
34 | int eventq_int[EFX_MAX_CHANNELS]; | ||
35 | int eventq_poll[EFX_MAX_CHANNELS]; | ||
36 | int phy_ok; | ||
37 | int loopback_speed; | ||
38 | int loopback_full_duplex; | ||
39 | struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; | ||
40 | }; | ||
41 | |||
42 | extern void efx_loopback_rx_packet(struct efx_nic *efx, | ||
43 | const char *buf_ptr, int pkt_len); | ||
44 | extern int efx_online_test(struct efx_nic *efx, | ||
45 | struct efx_self_tests *tests); | ||
46 | extern int efx_offline_test(struct efx_nic *efx, | ||
47 | struct efx_self_tests *tests, | ||
48 | unsigned int loopback_modes); | ||
49 | |||
50 | #endif /* EFX_SELFTEST_H */ | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 11fa9fb8f48b..66a0d1442aba 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c | |||
@@ -116,20 +116,29 @@ void sfe4001_poweroff(struct efx_nic *efx) | |||
116 | 116 | ||
117 | /* Turn off all power rails */ | 117 | /* Turn off all power rails */ |
118 | out = 0xff; | 118 | out = 0xff; |
119 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 119 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
120 | 120 | ||
121 | /* Disable port 1 outputs on IO expander */ | 121 | /* Disable port 1 outputs on IO expander */ |
122 | cfg = 0xff; | 122 | cfg = 0xff; |
123 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); | 123 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); |
124 | 124 | ||
125 | /* Disable port 0 outputs on IO expander */ | 125 | /* Disable port 0 outputs on IO expander */ |
126 | cfg = 0xff; | 126 | cfg = 0xff; |
127 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); | 127 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); |
128 | 128 | ||
129 | /* Clear any over-temperature alert */ | 129 | /* Clear any over-temperature alert */ |
130 | (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); | 130 | efx_i2c_read(i2c, MAX6647, RSL, &in, 1); |
131 | } | 131 | } |
132 | 132 | ||
133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected | ||
134 | * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- | ||
135 | * up to allow writing the flash (done through MDIO from userland). | ||
136 | */ | ||
137 | unsigned int sfe4001_phy_flash_cfg; | ||
138 | module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); | ||
139 | MODULE_PARM_DESC(phy_flash_cfg, | ||
140 | "Force PHY to enter flash configuration mode"); | ||
141 | |||
133 | /* This board uses an I2C expander to provider power to the PHY, which needs to | 142 | /* This board uses an I2C expander to provider power to the PHY, which needs to |
134 | * be turned on before the PHY can be used. | 143 | * be turned on before the PHY can be used. |
135 | * Context: Process context, rtnl lock held | 144 | * Context: Process context, rtnl lock held |
@@ -203,6 +212,8 @@ int sfe4001_poweron(struct efx_nic *efx) | |||
203 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | | 212 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | |
204 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | | 213 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | |
205 | (1 << P0_X_TRST_LBN)); | 214 | (1 << P0_X_TRST_LBN)); |
215 | if (sfe4001_phy_flash_cfg) | ||
216 | out |= 1 << P0_EN_3V3X_LBN; | ||
206 | 217 | ||
207 | rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 218 | rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
208 | if (rc) | 219 | if (rc) |
@@ -226,6 +237,9 @@ int sfe4001_poweron(struct efx_nic *efx) | |||
226 | if (in & (1 << P1_AFE_PWD_LBN)) | 237 | if (in & (1 << P1_AFE_PWD_LBN)) |
227 | goto done; | 238 | goto done; |
228 | 239 | ||
240 | /* DSP doesn't look powered in flash config mode */ | ||
241 | if (sfe4001_phy_flash_cfg) | ||
242 | goto done; | ||
229 | } while (++count < 20); | 243 | } while (++count < 20); |
230 | 244 | ||
231 | EFX_INFO(efx, "timed out waiting for power\n"); | 245 | EFX_INFO(efx, "timed out waiting for power\n"); |
@@ -239,14 +253,14 @@ done: | |||
239 | fail3: | 253 | fail3: |
240 | /* Turn off all power rails */ | 254 | /* Turn off all power rails */ |
241 | out = 0xff; | 255 | out = 0xff; |
242 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 256 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
243 | /* Disable port 1 outputs on IO expander */ | 257 | /* Disable port 1 outputs on IO expander */ |
244 | out = 0xff; | 258 | out = 0xff; |
245 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); | 259 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); |
246 | fail2: | 260 | fail2: |
247 | /* Disable port 0 outputs on IO expander */ | 261 | /* Disable port 0 outputs on IO expander */ |
248 | out = 0xff; | 262 | out = 0xff; |
249 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); | 263 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); |
250 | fail1: | 264 | fail1: |
251 | return rc; | 265 | return rc; |
252 | } | 266 | } |
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index a2e9f79e47b1..c0146061c326 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -24,6 +24,11 @@ | |||
24 | MDIO_MMDREG_DEVS0_PCS | \ | 24 | MDIO_MMDREG_DEVS0_PCS | \ |
25 | MDIO_MMDREG_DEVS0_PHYXS) | 25 | MDIO_MMDREG_DEVS0_PHYXS) |
26 | 26 | ||
27 | #define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ | ||
28 | (1 << LOOPBACK_PCS) | \ | ||
29 | (1 << LOOPBACK_PMAPMD) | \ | ||
30 | (1 << LOOPBACK_NETWORK)) | ||
31 | |||
27 | /* We complain if we fail to see the link partner as 10G capable this many | 32 | /* We complain if we fail to see the link partner as 10G capable this many |
28 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) | 33 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) |
29 | */ | 34 | */ |
@@ -72,6 +77,10 @@ | |||
72 | #define PMA_PMD_BIST_RXD_LBN (1) | 77 | #define PMA_PMD_BIST_RXD_LBN (1) |
73 | #define PMA_PMD_BIST_AFE_LBN (0) | 78 | #define PMA_PMD_BIST_AFE_LBN (0) |
74 | 79 | ||
80 | /* Special Software reset register */ | ||
81 | #define PMA_PMD_EXT_CTRL_REG 49152 | ||
82 | #define PMA_PMD_EXT_SSR_LBN 15 | ||
83 | |||
75 | #define BIST_MAX_DELAY (1000) | 84 | #define BIST_MAX_DELAY (1000) |
76 | #define BIST_POLL_DELAY (10) | 85 | #define BIST_POLL_DELAY (10) |
77 | 86 | ||
@@ -86,6 +95,11 @@ | |||
86 | #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ | 95 | #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ |
87 | #define CLK312_EN_LBN 3 | 96 | #define CLK312_EN_LBN 3 |
88 | 97 | ||
98 | /* PHYXS registers */ | ||
99 | #define PHYXS_TEST1 (49162) | ||
100 | #define LOOPBACK_NEAR_LBN (8) | ||
101 | #define LOOPBACK_NEAR_WIDTH (1) | ||
102 | |||
89 | /* Boot status register */ | 103 | /* Boot status register */ |
90 | #define PCS_BOOT_STATUS_REG (0xd000) | 104 | #define PCS_BOOT_STATUS_REG (0xd000) |
91 | #define PCS_BOOT_FATAL_ERR_LBN (0) | 105 | #define PCS_BOOT_FATAL_ERR_LBN (0) |
@@ -106,7 +120,9 @@ MODULE_PARM_DESC(crc_error_reset_threshold, | |||
106 | 120 | ||
107 | struct tenxpress_phy_data { | 121 | struct tenxpress_phy_data { |
108 | enum tenxpress_state state; | 122 | enum tenxpress_state state; |
123 | enum efx_loopback_mode loopback_mode; | ||
109 | atomic_t bad_crc_count; | 124 | atomic_t bad_crc_count; |
125 | int tx_disabled; | ||
110 | int bad_lp_tries; | 126 | int bad_lp_tries; |
111 | }; | 127 | }; |
112 | 128 | ||
@@ -195,14 +211,18 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
195 | int rc = 0; | 211 | int rc = 0; |
196 | 212 | ||
197 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | 213 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
214 | if (!phy_data) | ||
215 | return -ENOMEM; | ||
198 | efx->phy_data = phy_data; | 216 | efx->phy_data = phy_data; |
199 | 217 | ||
200 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); | 218 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); |
201 | 219 | ||
202 | rc = mdio_clause45_wait_reset_mmds(efx, | 220 | if (!sfe4001_phy_flash_cfg) { |
203 | TENXPRESS_REQUIRED_DEVS); | 221 | rc = mdio_clause45_wait_reset_mmds(efx, |
204 | if (rc < 0) | 222 | TENXPRESS_REQUIRED_DEVS); |
205 | goto fail; | 223 | if (rc < 0) |
224 | goto fail; | ||
225 | } | ||
206 | 226 | ||
207 | rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); | 227 | rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); |
208 | if (rc < 0) | 228 | if (rc < 0) |
@@ -225,6 +245,35 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
225 | return rc; | 245 | return rc; |
226 | } | 246 | } |
227 | 247 | ||
248 | static int tenxpress_special_reset(struct efx_nic *efx) | ||
249 | { | ||
250 | int rc, reg; | ||
251 | |||
252 | EFX_TRACE(efx, "%s\n", __func__); | ||
253 | |||
254 | /* Initiate reset */ | ||
255 | reg = mdio_clause45_read(efx, efx->mii.phy_id, | ||
256 | MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG); | ||
257 | reg |= (1 << PMA_PMD_EXT_SSR_LBN); | ||
258 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
259 | PMA_PMD_EXT_CTRL_REG, reg); | ||
260 | |||
261 | msleep(200); | ||
262 | |||
263 | /* Wait for the blocks to come out of reset */ | ||
264 | rc = mdio_clause45_wait_reset_mmds(efx, | ||
265 | TENXPRESS_REQUIRED_DEVS); | ||
266 | if (rc < 0) | ||
267 | return rc; | ||
268 | |||
269 | /* Try and reconfigure the device */ | ||
270 | rc = tenxpress_init(efx); | ||
271 | if (rc < 0) | ||
272 | return rc; | ||
273 | |||
274 | return 0; | ||
275 | } | ||
276 | |||
228 | static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) | 277 | static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) |
229 | { | 278 | { |
230 | struct tenxpress_phy_data *pd = efx->phy_data; | 279 | struct tenxpress_phy_data *pd = efx->phy_data; |
@@ -299,11 +348,46 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp) | |||
299 | return ok; | 348 | return ok; |
300 | } | 349 | } |
301 | 350 | ||
351 | static void tenxpress_phyxs_loopback(struct efx_nic *efx) | ||
352 | { | ||
353 | int phy_id = efx->mii.phy_id; | ||
354 | int ctrl1, ctrl2; | ||
355 | |||
356 | ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, | ||
357 | PHYXS_TEST1); | ||
358 | if (efx->loopback_mode == LOOPBACK_PHYXS) | ||
359 | ctrl2 |= (1 << LOOPBACK_NEAR_LBN); | ||
360 | else | ||
361 | ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN); | ||
362 | if (ctrl1 != ctrl2) | ||
363 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, | ||
364 | PHYXS_TEST1, ctrl2); | ||
365 | } | ||
366 | |||
302 | static void tenxpress_phy_reconfigure(struct efx_nic *efx) | 367 | static void tenxpress_phy_reconfigure(struct efx_nic *efx) |
303 | { | 368 | { |
369 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
370 | int loop_change = LOOPBACK_OUT_OF(phy_data, efx, | ||
371 | TENXPRESS_LOOPBACKS); | ||
372 | |||
304 | if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) | 373 | if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) |
305 | return; | 374 | return; |
306 | 375 | ||
376 | /* When coming out of transmit disable, coming out of low power | ||
377 | * mode, or moving out of any PHY internal loopback mode, | ||
378 | * perform a special software reset */ | ||
379 | if ((phy_data->tx_disabled && !efx->tx_disabled) || | ||
380 | loop_change) { | ||
381 | tenxpress_special_reset(efx); | ||
382 | falcon_reset_xaui(efx); | ||
383 | } | ||
384 | |||
385 | mdio_clause45_transmit_disable(efx); | ||
386 | mdio_clause45_phy_reconfigure(efx); | ||
387 | tenxpress_phyxs_loopback(efx); | ||
388 | |||
389 | phy_data->tx_disabled = efx->tx_disabled; | ||
390 | phy_data->loopback_mode = efx->loopback_mode; | ||
307 | efx->link_up = tenxpress_link_ok(efx, 0); | 391 | efx->link_up = tenxpress_link_ok(efx, 0); |
308 | efx->link_options = GM_LPA_10000FULL; | 392 | efx->link_options = GM_LPA_10000FULL; |
309 | } | 393 | } |
@@ -431,4 +515,5 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = { | |||
431 | .clear_interrupt = tenxpress_phy_clear_interrupt, | 515 | .clear_interrupt = tenxpress_phy_clear_interrupt, |
432 | .reset_xaui = tenxpress_reset_xaui, | 516 | .reset_xaui = tenxpress_reset_xaui, |
433 | .mmds = TENXPRESS_REQUIRED_DEVS, | 517 | .mmds = TENXPRESS_REQUIRED_DEVS, |
518 | .loopbacks = TENXPRESS_LOOPBACKS, | ||
434 | }; | 519 | }; |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index fbb866b2185e..5cdd082ab8f6 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -82,6 +82,46 @@ static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | |||
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | /** | ||
86 | * struct efx_tso_header - a DMA mapped buffer for packet headers | ||
87 | * @next: Linked list of free ones. | ||
88 | * The list is protected by the TX queue lock. | ||
89 | * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. | ||
90 | * @dma_addr: The DMA address of the header below. | ||
91 | * | ||
92 | * This controls the memory used for a TSO header. Use TSOH_DATA() | ||
93 | * to find the packet header data. Use TSOH_SIZE() to calculate the | ||
94 | * total size required for a given packet header length. TSO headers | ||
95 | * in the free list are exactly %TSOH_STD_SIZE bytes in size. | ||
96 | */ | ||
97 | struct efx_tso_header { | ||
98 | union { | ||
99 | struct efx_tso_header *next; | ||
100 | size_t unmap_len; | ||
101 | }; | ||
102 | dma_addr_t dma_addr; | ||
103 | }; | ||
104 | |||
105 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | ||
106 | const struct sk_buff *skb); | ||
107 | static void efx_fini_tso(struct efx_tx_queue *tx_queue); | ||
108 | static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, | ||
109 | struct efx_tso_header *tsoh); | ||
110 | |||
111 | static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, | ||
112 | struct efx_tx_buffer *buffer) | ||
113 | { | ||
114 | if (buffer->tsoh) { | ||
115 | if (likely(!buffer->tsoh->unmap_len)) { | ||
116 | buffer->tsoh->next = tx_queue->tso_headers_free; | ||
117 | tx_queue->tso_headers_free = buffer->tsoh; | ||
118 | } else { | ||
119 | efx_tsoh_heap_free(tx_queue, buffer->tsoh); | ||
120 | } | ||
121 | buffer->tsoh = NULL; | ||
122 | } | ||
123 | } | ||
124 | |||
85 | 125 | ||
86 | /* | 126 | /* |
87 | * Add a socket buffer to a TX queue | 127 | * Add a socket buffer to a TX queue |
@@ -114,6 +154,9 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
114 | 154 | ||
115 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | 155 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); |
116 | 156 | ||
157 | if (skb_shinfo((struct sk_buff *)skb)->gso_size) | ||
158 | return efx_enqueue_skb_tso(tx_queue, skb); | ||
159 | |||
117 | /* Get size of the initial fragment */ | 160 | /* Get size of the initial fragment */ |
118 | len = skb_headlen(skb); | 161 | len = skb_headlen(skb); |
119 | 162 | ||
@@ -166,6 +209,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
166 | insert_ptr = (tx_queue->insert_count & | 209 | insert_ptr = (tx_queue->insert_count & |
167 | efx->type->txd_ring_mask); | 210 | efx->type->txd_ring_mask); |
168 | buffer = &tx_queue->buffer[insert_ptr]; | 211 | buffer = &tx_queue->buffer[insert_ptr]; |
212 | efx_tsoh_free(tx_queue, buffer); | ||
213 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
169 | EFX_BUG_ON_PARANOID(buffer->skb); | 214 | EFX_BUG_ON_PARANOID(buffer->skb); |
170 | EFX_BUG_ON_PARANOID(buffer->len); | 215 | EFX_BUG_ON_PARANOID(buffer->len); |
171 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); | 216 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); |
@@ -342,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
342 | if (unlikely(tx_queue->stopped)) { | 387 | if (unlikely(tx_queue->stopped)) { |
343 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 388 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
344 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { | 389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { |
345 | EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); | 390 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
346 | 391 | ||
347 | /* Do this under netif_tx_lock(), to avoid racing | 392 | /* Do this under netif_tx_lock(), to avoid racing |
348 | * with efx_xmit(). */ | 393 | * with efx_xmit(). */ |
@@ -432,6 +477,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | |||
432 | 477 | ||
433 | efx_release_tx_buffers(tx_queue); | 478 | efx_release_tx_buffers(tx_queue); |
434 | 479 | ||
480 | /* Free up TSO header cache */ | ||
481 | efx_fini_tso(tx_queue); | ||
482 | |||
435 | /* Release queue's stop on port, if any */ | 483 | /* Release queue's stop on port, if any */ |
436 | if (tx_queue->stopped) { | 484 | if (tx_queue->stopped) { |
437 | tx_queue->stopped = 0; | 485 | tx_queue->stopped = 0; |
@@ -450,3 +498,622 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | |||
450 | } | 498 | } |
451 | 499 | ||
452 | 500 | ||
501 | /* Efx TCP segmentation acceleration. | ||
502 | * | ||
503 | * Why? Because by doing it here in the driver we can go significantly | ||
504 | * faster than the GSO. | ||
505 | * | ||
506 | * Requires TX checksum offload support. | ||
507 | */ | ||
508 | |||
509 | /* Number of bytes inserted at the start of a TSO header buffer, | ||
510 | * similar to NET_IP_ALIGN. | ||
511 | */ | ||
512 | #if defined(__i386__) || defined(__x86_64__) | ||
513 | #define TSOH_OFFSET 0 | ||
514 | #else | ||
515 | #define TSOH_OFFSET NET_IP_ALIGN | ||
516 | #endif | ||
517 | |||
518 | #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) | ||
519 | |||
520 | /* Total size of struct efx_tso_header, buffer and padding */ | ||
521 | #define TSOH_SIZE(hdr_len) \ | ||
522 | (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) | ||
523 | |||
524 | /* Size of blocks on free list. Larger blocks must be allocated from | ||
525 | * the heap. | ||
526 | */ | ||
527 | #define TSOH_STD_SIZE 128 | ||
528 | |||
529 | #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) | ||
530 | #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) | ||
531 | #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) | ||
532 | #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) | ||
533 | |||
534 | /** | ||
535 | * struct tso_state - TSO state for an SKB | ||
536 | * @remaining_len: Bytes of data we've yet to segment | ||
537 | * @seqnum: Current sequence number | ||
538 | * @packet_space: Remaining space in current packet | ||
539 | * @ifc: Input fragment cursor. | ||
540 | * Where we are in the current fragment of the incoming SKB. These | ||
541 | * values get updated in place when we split a fragment over | ||
542 | * multiple packets. | ||
543 | * @p: Parameters. | ||
544 | * These values are set once at the start of the TSO send and do | ||
545 | * not get changed as the routine progresses. | ||
546 | * | ||
547 | * The state used during segmentation. It is put into this data structure | ||
548 | * just to make it easy to pass into inline functions. | ||
549 | */ | ||
550 | struct tso_state { | ||
551 | unsigned remaining_len; | ||
552 | unsigned seqnum; | ||
553 | unsigned packet_space; | ||
554 | |||
555 | struct { | ||
556 | /* DMA address of current position */ | ||
557 | dma_addr_t dma_addr; | ||
558 | /* Remaining length */ | ||
559 | unsigned int len; | ||
560 | /* DMA address and length of the whole fragment */ | ||
561 | unsigned int unmap_len; | ||
562 | dma_addr_t unmap_addr; | ||
563 | struct page *page; | ||
564 | unsigned page_off; | ||
565 | } ifc; | ||
566 | |||
567 | struct { | ||
568 | /* The number of bytes of header */ | ||
569 | unsigned int header_length; | ||
570 | |||
571 | /* The number of bytes to put in each outgoing segment. */ | ||
572 | int full_packet_size; | ||
573 | |||
574 | /* Current IPv4 ID, host endian. */ | ||
575 | unsigned ipv4_id; | ||
576 | } p; | ||
577 | }; | ||
578 | |||
579 | |||
580 | /* | ||
581 | * Verify that our various assumptions about sk_buffs and the conditions | ||
582 | * under which TSO will be attempted hold true. | ||
583 | */ | ||
584 | static inline void efx_tso_check_safe(const struct sk_buff *skb) | ||
585 | { | ||
586 | EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); | ||
587 | EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != | ||
588 | skb->protocol); | ||
589 | EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); | ||
590 | EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) | ||
591 | + (tcp_hdr(skb)->doff << 2u)) > | ||
592 | skb_headlen(skb)); | ||
593 | } | ||
594 | |||
595 | |||
596 | /* | ||
597 | * Allocate a page worth of efx_tso_header structures, and string them | ||
598 | * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. | ||
599 | */ | ||
600 | static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) | ||
601 | { | ||
602 | |||
603 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | ||
604 | struct efx_tso_header *tsoh; | ||
605 | dma_addr_t dma_addr; | ||
606 | u8 *base_kva, *kva; | ||
607 | |||
608 | base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); | ||
609 | if (base_kva == NULL) { | ||
610 | EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" | ||
611 | " headers\n"); | ||
612 | return -ENOMEM; | ||
613 | } | ||
614 | |||
615 | /* pci_alloc_consistent() allocates pages. */ | ||
616 | EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); | ||
617 | |||
618 | for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { | ||
619 | tsoh = (struct efx_tso_header *)kva; | ||
620 | tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); | ||
621 | tsoh->next = tx_queue->tso_headers_free; | ||
622 | tx_queue->tso_headers_free = tsoh; | ||
623 | } | ||
624 | |||
625 | return 0; | ||
626 | } | ||
627 | |||
628 | |||
629 | /* Free up a TSO header, and all others in the same page. */ | ||
630 | static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | ||
631 | struct efx_tso_header *tsoh, | ||
632 | struct pci_dev *pci_dev) | ||
633 | { | ||
634 | struct efx_tso_header **p; | ||
635 | unsigned long base_kva; | ||
636 | dma_addr_t base_dma; | ||
637 | |||
638 | base_kva = (unsigned long)tsoh & PAGE_MASK; | ||
639 | base_dma = tsoh->dma_addr & PAGE_MASK; | ||
640 | |||
641 | p = &tx_queue->tso_headers_free; | ||
642 | while (*p != NULL) { | ||
643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) | ||
644 | *p = (*p)->next; | ||
645 | else | ||
646 | p = &(*p)->next; | ||
647 | } | ||
648 | |||
649 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | ||
650 | } | ||
651 | |||
652 | static struct efx_tso_header * | ||
653 | efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | ||
654 | { | ||
655 | struct efx_tso_header *tsoh; | ||
656 | |||
657 | tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); | ||
658 | if (unlikely(!tsoh)) | ||
659 | return NULL; | ||
660 | |||
661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | ||
662 | TSOH_BUFFER(tsoh), header_len, | ||
663 | PCI_DMA_TODEVICE); | ||
664 | if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { | ||
665 | kfree(tsoh); | ||
666 | return NULL; | ||
667 | } | ||
668 | |||
669 | tsoh->unmap_len = header_len; | ||
670 | return tsoh; | ||
671 | } | ||
672 | |||
673 | static void | ||
674 | efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) | ||
675 | { | ||
676 | pci_unmap_single(tx_queue->efx->pci_dev, | ||
677 | tsoh->dma_addr, tsoh->unmap_len, | ||
678 | PCI_DMA_TODEVICE); | ||
679 | kfree(tsoh); | ||
680 | } | ||
681 | |||
682 | /** | ||
683 | * efx_tx_queue_insert - push descriptors onto the TX queue | ||
684 | * @tx_queue: Efx TX queue | ||
685 | * @dma_addr: DMA address of fragment | ||
686 | * @len: Length of fragment | ||
687 | * @skb: Only non-null for end of last segment | ||
688 | * @end_of_packet: True if last fragment in a packet | ||
689 | * @unmap_addr: DMA address of fragment for unmapping | ||
690 | * @unmap_len: Only set this in last segment of a fragment | ||
691 | * | ||
692 | * Push descriptors onto the TX queue. Return 0 on success or 1 if | ||
693 | * @tx_queue full. | ||
694 | */ | ||
695 | static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, | ||
696 | dma_addr_t dma_addr, unsigned len, | ||
697 | const struct sk_buff *skb, int end_of_packet, | ||
698 | dma_addr_t unmap_addr, unsigned unmap_len) | ||
699 | { | ||
700 | struct efx_tx_buffer *buffer; | ||
701 | struct efx_nic *efx = tx_queue->efx; | ||
702 | unsigned dma_len, fill_level, insert_ptr, misalign; | ||
703 | int q_space; | ||
704 | |||
705 | EFX_BUG_ON_PARANOID(len <= 0); | ||
706 | |||
707 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | ||
708 | /* -1 as there is no way to represent all descriptors used */ | ||
709 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | ||
710 | |||
711 | while (1) { | ||
712 | if (unlikely(q_space-- <= 0)) { | ||
713 | /* It might be that completions have happened | ||
714 | * since the xmit path last checked. Update | ||
715 | * the xmit path's copy of read_count. | ||
716 | */ | ||
717 | ++tx_queue->stopped; | ||
718 | /* This memory barrier protects the change of | ||
719 | * stopped from the access of read_count. */ | ||
720 | smp_mb(); | ||
721 | tx_queue->old_read_count = | ||
722 | *(volatile unsigned *)&tx_queue->read_count; | ||
723 | fill_level = (tx_queue->insert_count | ||
724 | - tx_queue->old_read_count); | ||
725 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | ||
726 | if (unlikely(q_space-- <= 0)) | ||
727 | return 1; | ||
728 | smp_mb(); | ||
729 | --tx_queue->stopped; | ||
730 | } | ||
731 | |||
732 | insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; | ||
733 | buffer = &tx_queue->buffer[insert_ptr]; | ||
734 | ++tx_queue->insert_count; | ||
735 | |||
736 | EFX_BUG_ON_PARANOID(tx_queue->insert_count - | ||
737 | tx_queue->read_count > | ||
738 | efx->type->txd_ring_mask); | ||
739 | |||
740 | efx_tsoh_free(tx_queue, buffer); | ||
741 | EFX_BUG_ON_PARANOID(buffer->len); | ||
742 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
743 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
744 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); | ||
745 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
746 | |||
747 | buffer->dma_addr = dma_addr; | ||
748 | |||
749 | /* Ensure we do not cross a boundary unsupported by H/W */ | ||
750 | dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; | ||
751 | |||
752 | misalign = (unsigned)dma_addr & efx->type->bug5391_mask; | ||
753 | if (misalign && dma_len + misalign > 512) | ||
754 | dma_len = 512 - misalign; | ||
755 | |||
756 | /* If there is enough space to send then do so */ | ||
757 | if (dma_len >= len) | ||
758 | break; | ||
759 | |||
760 | buffer->len = dma_len; /* Don't set the other members */ | ||
761 | dma_addr += dma_len; | ||
762 | len -= dma_len; | ||
763 | } | ||
764 | |||
765 | EFX_BUG_ON_PARANOID(!len); | ||
766 | buffer->len = len; | ||
767 | buffer->skb = skb; | ||
768 | buffer->continuation = !end_of_packet; | ||
769 | buffer->unmap_addr = unmap_addr; | ||
770 | buffer->unmap_len = unmap_len; | ||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | |||
775 | /* | ||
776 | * Put a TSO header into the TX queue. | ||
777 | * | ||
778 | * This is special-cased because we know that it is small enough to fit in | ||
779 | * a single fragment, and we know it doesn't cross a page boundary. It | ||
780 | * also allows us to not worry about end-of-packet etc. | ||
781 | */ | ||
782 | static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, | ||
783 | struct efx_tso_header *tsoh, unsigned len) | ||
784 | { | ||
785 | struct efx_tx_buffer *buffer; | ||
786 | |||
787 | buffer = &tx_queue->buffer[tx_queue->insert_count & | ||
788 | tx_queue->efx->type->txd_ring_mask]; | ||
789 | efx_tsoh_free(tx_queue, buffer); | ||
790 | EFX_BUG_ON_PARANOID(buffer->len); | ||
791 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
792 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
793 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); | ||
794 | EFX_BUG_ON_PARANOID(buffer->tsoh); | ||
795 | buffer->len = len; | ||
796 | buffer->dma_addr = tsoh->dma_addr; | ||
797 | buffer->tsoh = tsoh; | ||
798 | |||
799 | ++tx_queue->insert_count; | ||
800 | } | ||
801 | |||
802 | |||
803 | /* Remove descriptors put into a tx_queue. */ | ||
804 | static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | ||
805 | { | ||
806 | struct efx_tx_buffer *buffer; | ||
807 | |||
808 | /* Work backwards until we hit the original insert pointer value */ | ||
809 | while (tx_queue->insert_count != tx_queue->write_count) { | ||
810 | --tx_queue->insert_count; | ||
811 | buffer = &tx_queue->buffer[tx_queue->insert_count & | ||
812 | tx_queue->efx->type->txd_ring_mask]; | ||
813 | efx_tsoh_free(tx_queue, buffer); | ||
814 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
815 | buffer->len = 0; | ||
816 | buffer->continuation = 1; | ||
817 | if (buffer->unmap_len) { | ||
818 | pci_unmap_page(tx_queue->efx->pci_dev, | ||
819 | buffer->unmap_addr, | ||
820 | buffer->unmap_len, PCI_DMA_TODEVICE); | ||
821 | buffer->unmap_len = 0; | ||
822 | } | ||
823 | } | ||
824 | } | ||
825 | |||
826 | |||
827 | /* Parse the SKB header and initialise state. */ | ||
828 | static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) | ||
829 | { | ||
830 | /* All ethernet/IP/TCP headers combined size is TCP header size | ||
831 | * plus offset of TCP header relative to start of packet. | ||
832 | */ | ||
833 | st->p.header_length = ((tcp_hdr(skb)->doff << 2u) | ||
834 | + PTR_DIFF(tcp_hdr(skb), skb->data)); | ||
835 | st->p.full_packet_size = (st->p.header_length | ||
836 | + skb_shinfo(skb)->gso_size); | ||
837 | |||
838 | st->p.ipv4_id = ntohs(ip_hdr(skb)->id); | ||
839 | st->seqnum = ntohl(tcp_hdr(skb)->seq); | ||
840 | |||
841 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); | ||
842 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); | ||
843 | EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); | ||
844 | |||
845 | st->packet_space = st->p.full_packet_size; | ||
846 | st->remaining_len = skb->len - st->p.header_length; | ||
847 | } | ||
848 | |||
849 | |||
850 | /** | ||
851 | * tso_get_fragment - record fragment details and map for DMA | ||
852 | * @st: TSO state | ||
853 | * @efx: Efx NIC | ||
854 | * @data: Pointer to fragment data | ||
855 | * @len: Length of fragment | ||
856 | * | ||
857 | * Record fragment details and map for DMA. Return 0 on success, or | ||
858 | * -%ENOMEM if DMA mapping fails. | ||
859 | */ | ||
860 | static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | ||
861 | int len, struct page *page, int page_off) | ||
862 | { | ||
863 | |||
864 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, | ||
865 | len, PCI_DMA_TODEVICE); | ||
866 | if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { | ||
867 | st->ifc.unmap_len = len; | ||
868 | st->ifc.len = len; | ||
869 | st->ifc.dma_addr = st->ifc.unmap_addr; | ||
870 | st->ifc.page = page; | ||
871 | st->ifc.page_off = page_off; | ||
872 | return 0; | ||
873 | } | ||
874 | return -ENOMEM; | ||
875 | } | ||
876 | |||
877 | |||
878 | /** | ||
879 | * tso_fill_packet_with_fragment - form descriptors for the current fragment | ||
880 | * @tx_queue: Efx TX queue | ||
881 | * @skb: Socket buffer | ||
882 | * @st: TSO state | ||
883 | * | ||
884 | * Form descriptors for the current fragment, until we reach the end | ||
885 | * of fragment or end-of-packet. Return 0 on success, 1 if not enough | ||
886 | * space in @tx_queue. | ||
887 | */ | ||
888 | static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, | ||
889 | const struct sk_buff *skb, | ||
890 | struct tso_state *st) | ||
891 | { | ||
892 | |||
893 | int n, end_of_packet, rc; | ||
894 | |||
895 | if (st->ifc.len == 0) | ||
896 | return 0; | ||
897 | if (st->packet_space == 0) | ||
898 | return 0; | ||
899 | |||
900 | EFX_BUG_ON_PARANOID(st->ifc.len <= 0); | ||
901 | EFX_BUG_ON_PARANOID(st->packet_space <= 0); | ||
902 | |||
903 | n = min(st->ifc.len, st->packet_space); | ||
904 | |||
905 | st->packet_space -= n; | ||
906 | st->remaining_len -= n; | ||
907 | st->ifc.len -= n; | ||
908 | st->ifc.page_off += n; | ||
909 | end_of_packet = st->remaining_len == 0 || st->packet_space == 0; | ||
910 | |||
911 | rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, | ||
912 | st->remaining_len ? NULL : skb, | ||
913 | end_of_packet, st->ifc.unmap_addr, | ||
914 | st->ifc.len ? 0 : st->ifc.unmap_len); | ||
915 | |||
916 | st->ifc.dma_addr += n; | ||
917 | |||
918 | return rc; | ||
919 | } | ||
920 | |||
921 | |||
922 | /** | ||
923 | * tso_start_new_packet - generate a new header and prepare for the new packet | ||
924 | * @tx_queue: Efx TX queue | ||
925 | * @skb: Socket buffer | ||
926 | * @st: TSO state | ||
927 | * | ||
928 | * Generate a new header and prepare for the new packet. Return 0 on | ||
929 | * success, or -1 if failed to alloc header. | ||
930 | */ | ||
931 | static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | ||
932 | const struct sk_buff *skb, | ||
933 | struct tso_state *st) | ||
934 | { | ||
935 | struct efx_tso_header *tsoh; | ||
936 | struct iphdr *tsoh_iph; | ||
937 | struct tcphdr *tsoh_th; | ||
938 | unsigned ip_length; | ||
939 | u8 *header; | ||
940 | |||
941 | /* Allocate a DMA-mapped header buffer. */ | ||
942 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { | ||
943 | if (tx_queue->tso_headers_free == NULL) { | ||
944 | if (efx_tsoh_block_alloc(tx_queue)) | ||
945 | return -1; | ||
946 | } | ||
947 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); | ||
948 | tsoh = tx_queue->tso_headers_free; | ||
949 | tx_queue->tso_headers_free = tsoh->next; | ||
950 | tsoh->unmap_len = 0; | ||
951 | } else { | ||
952 | tx_queue->tso_long_headers++; | ||
953 | tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); | ||
954 | if (unlikely(!tsoh)) | ||
955 | return -1; | ||
956 | } | ||
957 | |||
958 | header = TSOH_BUFFER(tsoh); | ||
959 | tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); | ||
960 | tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); | ||
961 | |||
962 | /* Copy and update the headers. */ | ||
963 | memcpy(header, skb->data, st->p.header_length); | ||
964 | |||
965 | tsoh_th->seq = htonl(st->seqnum); | ||
966 | st->seqnum += skb_shinfo(skb)->gso_size; | ||
967 | if (st->remaining_len > skb_shinfo(skb)->gso_size) { | ||
968 | /* This packet will not finish the TSO burst. */ | ||
969 | ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); | ||
970 | tsoh_th->fin = 0; | ||
971 | tsoh_th->psh = 0; | ||
972 | } else { | ||
973 | /* This packet will be the last in the TSO burst. */ | ||
974 | ip_length = (st->p.header_length - ETH_HDR_LEN(skb) | ||
975 | + st->remaining_len); | ||
976 | tsoh_th->fin = tcp_hdr(skb)->fin; | ||
977 | tsoh_th->psh = tcp_hdr(skb)->psh; | ||
978 | } | ||
979 | tsoh_iph->tot_len = htons(ip_length); | ||
980 | |||
981 | /* Linux leaves suitable gaps in the IP ID space for us to fill. */ | ||
982 | tsoh_iph->id = htons(st->p.ipv4_id); | ||
983 | st->p.ipv4_id++; | ||
984 | |||
985 | st->packet_space = skb_shinfo(skb)->gso_size; | ||
986 | ++tx_queue->tso_packets; | ||
987 | |||
988 | /* Form a descriptor for this header. */ | ||
989 | efx_tso_put_header(tx_queue, tsoh, st->p.header_length); | ||
990 | |||
991 | return 0; | ||
992 | } | ||
993 | |||
994 | |||
995 | /** | ||
996 | * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer | ||
997 | * @tx_queue: Efx TX queue | ||
998 | * @skb: Socket buffer | ||
999 | * | ||
1000 | * Context: You must hold netif_tx_lock() to call this function. | ||
1001 | * | ||
1002 | * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if | ||
1003 | * @skb was not enqueued. In all cases @skb is consumed. Return | ||
1004 | * %NETDEV_TX_OK or %NETDEV_TX_BUSY. | ||
1005 | */ | ||
1006 | static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, | ||
1007 | const struct sk_buff *skb) | ||
1008 | { | ||
1009 | int frag_i, rc, rc2 = NETDEV_TX_OK; | ||
1010 | struct tso_state state; | ||
1011 | skb_frag_t *f; | ||
1012 | |||
1013 | /* Verify TSO is safe - these checks should never fail. */ | ||
1014 | efx_tso_check_safe(skb); | ||
1015 | |||
1016 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | ||
1017 | |||
1018 | tso_start(&state, skb); | ||
1019 | |||
1020 | /* Assume that skb header area contains exactly the headers, and | ||
1021 | * all payload is in the frag list. | ||
1022 | */ | ||
1023 | if (skb_headlen(skb) == state.p.header_length) { | ||
1024 | /* Grab the first payload fragment. */ | ||
1025 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); | ||
1026 | frag_i = 0; | ||
1027 | f = &skb_shinfo(skb)->frags[frag_i]; | ||
1028 | rc = tso_get_fragment(&state, tx_queue->efx, | ||
1029 | f->size, f->page, f->page_offset); | ||
1030 | if (rc) | ||
1031 | goto mem_err; | ||
1032 | } else { | ||
1033 | /* It may look like this code fragment assumes that the | ||
1034 | * skb->data portion does not cross a page boundary, but | ||
1035 | * that is not the case. It is guaranteed to be direct | ||
1036 | * mapped memory, and therefore is physically contiguous, | ||
1037 | * and so DMA will work fine. kmap_atomic() on this region | ||
1038 | * will just return the direct mapping, so that will work | ||
1039 | * too. | ||
1040 | */ | ||
1041 | int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1); | ||
1042 | int hl = state.p.header_length; | ||
1043 | rc = tso_get_fragment(&state, tx_queue->efx, | ||
1044 | skb_headlen(skb) - hl, | ||
1045 | virt_to_page(skb->data), page_off + hl); | ||
1046 | if (rc) | ||
1047 | goto mem_err; | ||
1048 | frag_i = -1; | ||
1049 | } | ||
1050 | |||
1051 | if (tso_start_new_packet(tx_queue, skb, &state) < 0) | ||
1052 | goto mem_err; | ||
1053 | |||
1054 | while (1) { | ||
1055 | rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); | ||
1056 | if (unlikely(rc)) | ||
1057 | goto stop; | ||
1058 | |||
1059 | /* Move onto the next fragment? */ | ||
1060 | if (state.ifc.len == 0) { | ||
1061 | if (++frag_i >= skb_shinfo(skb)->nr_frags) | ||
1062 | /* End of payload reached. */ | ||
1063 | break; | ||
1064 | f = &skb_shinfo(skb)->frags[frag_i]; | ||
1065 | rc = tso_get_fragment(&state, tx_queue->efx, | ||
1066 | f->size, f->page, f->page_offset); | ||
1067 | if (rc) | ||
1068 | goto mem_err; | ||
1069 | } | ||
1070 | |||
1071 | /* Start at new packet? */ | ||
1072 | if (state.packet_space == 0 && | ||
1073 | tso_start_new_packet(tx_queue, skb, &state) < 0) | ||
1074 | goto mem_err; | ||
1075 | } | ||
1076 | |||
1077 | /* Pass off to hardware */ | ||
1078 | falcon_push_buffers(tx_queue); | ||
1079 | |||
1080 | tx_queue->tso_bursts++; | ||
1081 | return NETDEV_TX_OK; | ||
1082 | |||
1083 | mem_err: | ||
1084 | EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" | ||
1085 | " error\n"); | ||
1086 | dev_kfree_skb_any((struct sk_buff *)skb); | ||
1087 | goto unwind; | ||
1088 | |||
1089 | stop: | ||
1090 | rc2 = NETDEV_TX_BUSY; | ||
1091 | |||
1092 | /* Stop the queue if it wasn't stopped before. */ | ||
1093 | if (tx_queue->stopped == 1) | ||
1094 | efx_stop_queue(tx_queue->efx); | ||
1095 | |||
1096 | unwind: | ||
1097 | efx_enqueue_unwind(tx_queue); | ||
1098 | return rc2; | ||
1099 | } | ||
1100 | |||
1101 | |||
1102 | /* | ||
1103 | * Free up all TSO datastructures associated with tx_queue. This | ||
1104 | * routine should be called only once the tx_queue is both empty and | ||
1105 | * will no longer be used. | ||
1106 | */ | ||
1107 | static void efx_fini_tso(struct efx_tx_queue *tx_queue) | ||
1108 | { | ||
1109 | unsigned i; | ||
1110 | |||
1111 | if (tx_queue->buffer) { | ||
1112 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) | ||
1113 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | ||
1114 | } | ||
1115 | |||
1116 | while (tx_queue->tso_headers_free != NULL) | ||
1117 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | ||
1118 | tx_queue->efx->pci_dev); | ||
1119 | } | ||
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index dca62f190198..35ab19c27f8d 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
19 | #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) |
20 | 20 | ||
21 | /* XAUI resets if link not detected */ | 21 | /* XAUI resets if link not detected */ |
22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | 22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS |
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c index 66dd5bf1eaa9..f3684ad28887 100644 --- a/drivers/net/sfc/xfp_phy.c +++ b/drivers/net/sfc/xfp_phy.c | |||
@@ -24,6 +24,10 @@ | |||
24 | MDIO_MMDREG_DEVS0_PMAPMD | \ | 24 | MDIO_MMDREG_DEVS0_PMAPMD | \ |
25 | MDIO_MMDREG_DEVS0_PHYXS) | 25 | MDIO_MMDREG_DEVS0_PHYXS) |
26 | 26 | ||
27 | #define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ | ||
28 | (1 << LOOPBACK_PMAPMD) | \ | ||
29 | (1 << LOOPBACK_NETWORK)) | ||
30 | |||
27 | /****************************************************************************/ | 31 | /****************************************************************************/ |
28 | /* Quake-specific MDIO registers */ | 32 | /* Quake-specific MDIO registers */ |
29 | #define MDIO_QUAKE_LED0_REG (0xD006) | 33 | #define MDIO_QUAKE_LED0_REG (0xD006) |
@@ -35,6 +39,10 @@ void xfp_set_led(struct efx_nic *p, int led, int mode) | |||
35 | mode); | 39 | mode); |
36 | } | 40 | } |
37 | 41 | ||
42 | struct xfp_phy_data { | ||
43 | int tx_disabled; | ||
44 | }; | ||
45 | |||
38 | #define XFP_MAX_RESET_TIME 500 | 46 | #define XFP_MAX_RESET_TIME 500 |
39 | #define XFP_RESET_WAIT 10 | 47 | #define XFP_RESET_WAIT 10 |
40 | 48 | ||
@@ -72,18 +80,33 @@ static int xfp_reset_phy(struct efx_nic *efx) | |||
72 | 80 | ||
73 | static int xfp_phy_init(struct efx_nic *efx) | 81 | static int xfp_phy_init(struct efx_nic *efx) |
74 | { | 82 | { |
83 | struct xfp_phy_data *phy_data; | ||
75 | u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); | 84 | u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); |
76 | int rc; | 85 | int rc; |
77 | 86 | ||
87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); | ||
88 | if (!phy_data) | ||
89 | return -ENOMEM; | ||
90 | efx->phy_data = phy_data; | ||
91 | |||
78 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" | 92 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" |
79 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), | 93 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), |
80 | MDIO_ID_REV(devid)); | 94 | MDIO_ID_REV(devid)); |
81 | 95 | ||
96 | phy_data->tx_disabled = efx->tx_disabled; | ||
97 | |||
82 | rc = xfp_reset_phy(efx); | 98 | rc = xfp_reset_phy(efx); |
83 | 99 | ||
84 | EFX_INFO(efx, "XFP: PHY init %s.\n", | 100 | EFX_INFO(efx, "XFP: PHY init %s.\n", |
85 | rc ? "failed" : "successful"); | 101 | rc ? "failed" : "successful"); |
102 | if (rc < 0) | ||
103 | goto fail; | ||
86 | 104 | ||
105 | return 0; | ||
106 | |||
107 | fail: | ||
108 | kfree(efx->phy_data); | ||
109 | efx->phy_data = NULL; | ||
87 | return rc; | 110 | return rc; |
88 | } | 111 | } |
89 | 112 | ||
@@ -110,6 +133,16 @@ static int xfp_phy_check_hw(struct efx_nic *efx) | |||
110 | 133 | ||
111 | static void xfp_phy_reconfigure(struct efx_nic *efx) | 134 | static void xfp_phy_reconfigure(struct efx_nic *efx) |
112 | { | 135 | { |
136 | struct xfp_phy_data *phy_data = efx->phy_data; | ||
137 | |||
138 | /* Reset the PHY when moving from tx off to tx on */ | ||
139 | if (phy_data->tx_disabled && !efx->tx_disabled) | ||
140 | xfp_reset_phy(efx); | ||
141 | |||
142 | mdio_clause45_transmit_disable(efx); | ||
143 | mdio_clause45_phy_reconfigure(efx); | ||
144 | |||
145 | phy_data->tx_disabled = efx->tx_disabled; | ||
113 | efx->link_up = xfp_link_ok(efx); | 146 | efx->link_up = xfp_link_ok(efx); |
114 | efx->link_options = GM_LPA_10000FULL; | 147 | efx->link_options = GM_LPA_10000FULL; |
115 | } | 148 | } |
@@ -119,6 +152,10 @@ static void xfp_phy_fini(struct efx_nic *efx) | |||
119 | { | 152 | { |
120 | /* Clobber the LED if it was blinking */ | 153 | /* Clobber the LED if it was blinking */ |
121 | efx->board_info.blink(efx, 0); | 154 | efx->board_info.blink(efx, 0); |
155 | |||
156 | /* Free the context block */ | ||
157 | kfree(efx->phy_data); | ||
158 | efx->phy_data = NULL; | ||
122 | } | 159 | } |
123 | 160 | ||
124 | struct efx_phy_operations falcon_xfp_phy_ops = { | 161 | struct efx_phy_operations falcon_xfp_phy_ops = { |
@@ -129,4 +166,5 @@ struct efx_phy_operations falcon_xfp_phy_ops = { | |||
129 | .clear_interrupt = xfp_phy_clear_interrupt, | 166 | .clear_interrupt = xfp_phy_clear_interrupt, |
130 | .reset_xaui = efx_port_dummy_op_void, | 167 | .reset_xaui = efx_port_dummy_op_void, |
131 | .mmds = XFP_REQUIRED_DEVS, | 168 | .mmds = XFP_REQUIRED_DEVS, |
169 | .loopbacks = XFP_LOOPBACKS, | ||
132 | }; | 170 | }; |