aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-06 08:23:39 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-06 08:23:39 -0400
commit68083e05d72d94f347293d8cc0067050ba904bfa (patch)
tree842e71365bd90866be7add181661a4039d891564 /drivers/net/sfc
parent7baac8b91f9871ba8cb09af84de4ae1d86d07812 (diff)
parentb7279469d66b55119784b8b9529c99c1955fe747 (diff)
Merge commit 'v2.6.26-rc9' into cpus4096
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/bitfield.h7
-rw-r--r--drivers/net/sfc/boards.c9
-rw-r--r--drivers/net/sfc/efx.c84
-rw-r--r--drivers/net/sfc/falcon.c91
-rw-r--r--drivers/net/sfc/falcon.h5
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h4
-rw-r--r--drivers/net/sfc/falcon_io.h29
-rw-r--r--drivers/net/sfc/falcon_xmac.c12
-rw-r--r--drivers/net/sfc/net_driver.h44
-rw-r--r--drivers/net/sfc/rx.c48
-rw-r--r--drivers/net/sfc/selftest.c14
-rw-r--r--drivers/net/sfc/sfe4001.c14
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c11
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sfc/xfp_phy.c4
16 files changed, 197 insertions, 185 deletions
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2806201644cc..2c79d27404e0 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -483,7 +483,7 @@ typedef union efx_oword {
483#endif 483#endif
484 484
485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ 485#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
486 if (FALCON_REV(efx) >= FALCON_REV_B0) { \ 486 if (falcon_rev(efx) >= FALCON_REV_B0) { \
487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ 487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
488 } else { \ 488 } else { \
489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ 489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
@@ -491,7 +491,7 @@ typedef union efx_oword {
491} while (0) 491} while (0)
492 492
493#define EFX_QWORD_FIELD_VER(efx, qword, field) \ 493#define EFX_QWORD_FIELD_VER(efx, qword, field) \
494 (FALCON_REV(efx) >= FALCON_REV_B0 ? \ 494 (falcon_rev(efx) >= FALCON_REV_B0 ? \
495 EFX_QWORD_FIELD((qword), field##_B0) : \ 495 EFX_QWORD_FIELD((qword), field##_B0) : \
496 EFX_QWORD_FIELD((qword), field##_A1)) 496 EFX_QWORD_FIELD((qword), field##_A1))
497 497
@@ -501,8 +501,5 @@ typedef union efx_oword {
501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) 501#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
502#define EFX_DMA_TYPE_WIDTH(width) \ 502#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
505 ~((u64) 0) : ~((u32) 0))
506#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
507 504
508#endif /* EFX_BITFIELD_H */ 505#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index eecaa6d58584..7fc0328dc055 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
27 struct efx_blinker *bl = &efx->board_info.blinker; 27 struct efx_blinker *bl = &efx->board_info.blinker;
28 efx->board_info.set_fault_led(efx, bl->state); 28 efx->board_info.set_fault_led(efx, bl->state);
29 bl->state = !bl->state; 29 bl->state = !bl->state;
30 if (bl->resubmit) { 30 if (bl->resubmit)
31 bl->timer.expires = jiffies + BLINK_INTERVAL; 31 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
32 add_timer(&bl->timer);
33 }
34} 32}
35 33
36static void board_blink(struct efx_nic *efx, int blink) 34static void board_blink(struct efx_nic *efx, int blink)
@@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
44 blinker->state = 0; 42 blinker->state = 0;
45 setup_timer(&blinker->timer, blink_led_timer, 43 setup_timer(&blinker->timer, blink_led_timer,
46 (unsigned long)efx); 44 (unsigned long)efx);
47 blinker->timer.expires = jiffies + BLINK_INTERVAL; 45 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
48 add_timer(&blinker->timer);
49 } else { 46 } else {
50 blinker->resubmit = 0; 47 blinker->resubmit = 0;
51 if (blinker->timer.function) 48 if (blinker->timer.function)
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 418f2e53a95b..449760642e31 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
199 */ 199 */
200static inline void efx_channel_processed(struct efx_channel *channel) 200static inline void efx_channel_processed(struct efx_channel *channel)
201{ 201{
202 /* Write to EVQ_RPTR_REG. If a new event arrived in a race 202 /* The interrupt handler for this channel may set work_pending
203 * with finishing processing, a new interrupt will be raised. 203 * as soon as we acknowledge the events we've seen. Make sure
204 */ 204 * it's cleared before then. */
205 channel->work_pending = 0; 205 channel->work_pending = 0;
206 smp_wmb(); /* Ensure channel updated before any new interrupt. */ 206 smp_wmb();
207
207 falcon_eventq_read_ack(channel); 208 falcon_eventq_read_ack(channel);
208} 209}
209 210
@@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
265 napi_disable(&channel->napi_str); 266 napi_disable(&channel->napi_str);
266 267
267 /* Poll the channel */ 268 /* Poll the channel */
268 (void) efx_process_channel(channel, efx->type->evq_size); 269 efx_process_channel(channel, efx->type->evq_size);
269 270
270 /* Ack the eventq. This may cause an interrupt to be generated 271 /* Ack the eventq. This may cause an interrupt to be generated
271 * when they are reenabled */ 272 * when they are reenabled */
@@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
317 * 318 *
318 *************************************************************************/ 319 *************************************************************************/
319 320
320/* Setup per-NIC RX buffer parameters.
321 * Calculate the rx buffer allocation parameters required to support
322 * the current MTU, including padding for header alignment and overruns.
323 */
324static void efx_calc_rx_buffer_params(struct efx_nic *efx)
325{
326 unsigned int order, len;
327
328 len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
329 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
330 efx->type->rx_buffer_padding);
331
332 /* Calculate page-order */
333 for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
334 ;
335
336 efx->rx_buffer_len = len;
337 efx->rx_buffer_order = order;
338}
339
340static int efx_probe_channel(struct efx_channel *channel) 321static int efx_probe_channel(struct efx_channel *channel)
341{ 322{
342 struct efx_tx_queue *tx_queue; 323 struct efx_tx_queue *tx_queue;
@@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
387 struct efx_channel *channel; 368 struct efx_channel *channel;
388 int rc = 0; 369 int rc = 0;
389 370
390 efx_calc_rx_buffer_params(efx); 371 /* Calculate the rx buffer allocation parameters required to
372 * support the current MTU, including padding for header
373 * alignment and overruns.
374 */
375 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
376 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
377 efx->type->rx_buffer_padding);
378 efx->rx_buffer_order = get_order(efx->rx_buffer_len);
391 379
392 /* Initialise the channels */ 380 /* Initialise the channels */
393 efx_for_each_channel(channel, efx) { 381 efx_for_each_channel(channel, efx) {
@@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
440 netif_napi_add(channel->napi_dev, &channel->napi_str, 428 netif_napi_add(channel->napi_dev, &channel->napi_str,
441 efx_poll, napi_weight); 429 efx_poll, napi_weight);
442 430
431 /* The interrupt handler for this channel may set work_pending
432 * as soon as we enable it. Make sure it's cleared before
433 * then. Similarly, make sure it sees the enabled flag set. */
443 channel->work_pending = 0; 434 channel->work_pending = 0;
444 channel->enabled = 1; 435 channel->enabled = 1;
445 smp_wmb(); /* ensure channel updated before first interrupt */ 436 smp_wmb();
446 437
447 napi_enable(&channel->napi_str); 438 napi_enable(&channel->napi_str);
448 439
@@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
704 mutex_unlock(&efx->mac_lock); 695 mutex_unlock(&efx->mac_lock);
705 696
706 /* Serialise against efx_set_multicast_list() */ 697 /* Serialise against efx_set_multicast_list() */
707 if (NET_DEV_REGISTERED(efx)) { 698 if (efx_dev_registered(efx)) {
708 netif_tx_lock_bh(efx->net_dev); 699 netif_tx_lock_bh(efx->net_dev);
709 netif_tx_unlock_bh(efx->net_dev); 700 netif_tx_unlock_bh(efx->net_dev);
710 } 701 }
@@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
791 efx->membase = ioremap_nocache(efx->membase_phys, 782 efx->membase = ioremap_nocache(efx->membase_phys,
792 efx->type->mem_map_size); 783 efx->type->mem_map_size);
793 if (!efx->membase) { 784 if (!efx->membase) {
794 EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", 785 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
795 efx->type->mem_bar, efx->membase_phys, 786 efx->type->mem_bar,
787 (unsigned long long)efx->membase_phys,
796 efx->type->mem_map_size); 788 efx->type->mem_map_size);
797 rc = -ENOMEM; 789 rc = -ENOMEM;
798 goto fail4; 790 goto fail4;
799 } 791 }
800 EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", 792 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
801 efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, 793 efx->type->mem_bar, (unsigned long long)efx->membase_phys,
802 efx->membase); 794 efx->type->mem_map_size, efx->membase);
803 795
804 return 0; 796 return 0;
805 797
806 fail4: 798 fail4:
807 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 799 release_mem_region(efx->membase_phys, efx->type->mem_map_size);
808 fail3: 800 fail3:
809 efx->membase_phys = 0UL; 801 efx->membase_phys = 0;
810 fail2: 802 fail2:
811 pci_disable_device(efx->pci_dev); 803 pci_disable_device(efx->pci_dev);
812 fail1: 804 fail1:
@@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
824 816
825 if (efx->membase_phys) { 817 if (efx->membase_phys) {
826 pci_release_region(efx->pci_dev, efx->type->mem_bar); 818 pci_release_region(efx->pci_dev, efx->type->mem_bar);
827 efx->membase_phys = 0UL; 819 efx->membase_phys = 0;
828 } 820 }
829 821
830 pci_disable_device(efx->pci_dev); 822 pci_disable_device(efx->pci_dev);
@@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
1043 return; 1035 return;
1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1036 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
1045 return; 1037 return;
1046 if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) 1038 if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
1047 return; 1039 return;
1048 1040
1049 /* Mark the port as enabled so port reconfigurations can start, then 1041 /* Mark the port as enabled so port reconfigurations can start, then
@@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
1073 cancel_delayed_work_sync(&efx->monitor_work); 1065 cancel_delayed_work_sync(&efx->monitor_work);
1074 1066
1075 /* Ensure that all RX slow refills are complete. */ 1067 /* Ensure that all RX slow refills are complete. */
1076 efx_for_each_rx_queue(rx_queue, efx) { 1068 efx_for_each_rx_queue(rx_queue, efx)
1077 cancel_delayed_work_sync(&rx_queue->work); 1069 cancel_delayed_work_sync(&rx_queue->work);
1078 }
1079 1070
1080 /* Stop scheduled port reconfigurations */ 1071 /* Stop scheduled port reconfigurations */
1081 cancel_work_sync(&efx->reconfigure_work); 1072 cancel_work_sync(&efx->reconfigure_work);
@@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
1101 falcon_disable_interrupts(efx); 1092 falcon_disable_interrupts(efx);
1102 if (efx->legacy_irq) 1093 if (efx->legacy_irq)
1103 synchronize_irq(efx->legacy_irq); 1094 synchronize_irq(efx->legacy_irq);
1104 efx_for_each_channel_with_interrupt(channel, efx) 1095 efx_for_each_channel_with_interrupt(channel, efx) {
1105 if (channel->irq) 1096 if (channel->irq)
1106 synchronize_irq(channel->irq); 1097 synchronize_irq(channel->irq);
1098 }
1107 1099
1108 /* Stop all NAPI processing and synchronous rx refills */ 1100 /* Stop all NAPI processing and synchronous rx refills */
1109 efx_for_each_channel(channel, efx) 1101 efx_for_each_channel(channel, efx)
@@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
1125 /* Stop the kernel transmit interface late, so the watchdog 1117 /* Stop the kernel transmit interface late, so the watchdog
1126 * timer isn't ticking over the flush */ 1118 * timer isn't ticking over the flush */
1127 efx_stop_queue(efx); 1119 efx_stop_queue(efx);
1128 if (NET_DEV_REGISTERED(efx)) { 1120 if (efx_dev_registered(efx)) {
1129 netif_tx_lock_bh(efx->net_dev); 1121 netif_tx_lock_bh(efx->net_dev);
1130 netif_tx_unlock_bh(efx->net_dev); 1122 netif_tx_unlock_bh(efx->net_dev);
1131 } 1123 }
@@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
1344 return 0; 1336 return 0;
1345} 1337}
1346 1338
1347/* Context: process, dev_base_lock held, non-blocking. */ 1339/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1348static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1340static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1349{ 1341{
1350 struct efx_nic *efx = net_dev->priv; 1342 struct efx_nic *efx = net_dev->priv;
1351 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1343 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1352 struct net_device_stats *stats = &net_dev->stats; 1344 struct net_device_stats *stats = &net_dev->stats;
1353 1345
1346 /* Update stats if possible, but do not wait if another thread
1347 * is updating them (or resetting the NIC); slightly stale
1348 * stats are acceptable.
1349 */
1354 if (!spin_trylock(&efx->stats_lock)) 1350 if (!spin_trylock(&efx->stats_lock))
1355 return stats; 1351 return stats;
1356 if (efx->state == STATE_RUNNING) { 1352 if (efx->state == STATE_RUNNING) {
@@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1494static int efx_netdev_event(struct notifier_block *this, 1490static int efx_netdev_event(struct notifier_block *this,
1495 unsigned long event, void *ptr) 1491 unsigned long event, void *ptr)
1496{ 1492{
1497 struct net_device *net_dev = (struct net_device *)ptr; 1493 struct net_device *net_dev = ptr;
1498 1494
1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1495 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1500 struct efx_nic *efx = net_dev->priv; 1496 struct efx_nic *efx = net_dev->priv;
@@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1563 efx_for_each_tx_queue(tx_queue, efx) 1559 efx_for_each_tx_queue(tx_queue, efx)
1564 efx_release_tx_buffers(tx_queue); 1560 efx_release_tx_buffers(tx_queue);
1565 1561
1566 if (NET_DEV_REGISTERED(efx)) { 1562 if (efx_dev_registered(efx)) {
1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1563 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1568 unregister_netdev(efx->net_dev); 1564 unregister_netdev(efx->net_dev);
1569 } 1565 }
@@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
1688 if (method == RESET_TYPE_DISABLE) { 1684 if (method == RESET_TYPE_DISABLE) {
1689 /* Reinitialise the device anyway so the driver unload sequence 1685 /* Reinitialise the device anyway so the driver unload sequence
1690 * can talk to the external SRAM */ 1686 * can talk to the external SRAM */
1691 (void) falcon_init_nic(efx); 1687 falcon_init_nic(efx);
1692 rc = -EIO; 1688 rc = -EIO;
1693 goto fail4; 1689 goto fail4;
1694 } 1690 }
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index b57cc68058c0..790db89db345 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
116 ************************************************************************** 116 **************************************************************************
117 */ 117 */
118 118
119/* DMA address mask (up to 46-bit, avoiding compiler warnings) 119/* DMA address mask */
120 * 120#define FALCON_DMA_MASK DMA_BIT_MASK(46)
121 * Note that it is possible to have a platform with 64-bit longs and
122 * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
123 * platform DMA mask.
124 */
125#if BITS_PER_LONG == 64
126#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
127#else
128#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
129#endif
130 121
131/* TX DMA length mask (13-bit) */ 122/* TX DMA length mask (13-bit) */
132#define FALCON_TX_DMA_MASK (4096 - 1) 123#define FALCON_TX_DMA_MASK (4096 - 1)
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4 136#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
146 137
147#define FALCON_IS_DUAL_FUNC(efx) \ 138#define FALCON_IS_DUAL_FUNC(efx) \
148 (FALCON_REV(efx) < FALCON_REV_B0) 139 (falcon_rev(efx) < FALCON_REV_B0)
149 140
150/************************************************************************** 141/**************************************************************************
151 * 142 *
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
465 TX_DESCQ_TYPE, 0, 456 TX_DESCQ_TYPE, 0,
466 TX_NON_IP_DROP_DIS_B0, 1); 457 TX_NON_IP_DROP_DIS_B0, 1);
467 458
468 if (FALCON_REV(efx) >= FALCON_REV_B0) { 459 if (falcon_rev(efx) >= FALCON_REV_B0) {
469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 460 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 461 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); 462 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 465 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
475 tx_queue->queue); 466 tx_queue->queue);
476 467
477 if (FALCON_REV(efx) < FALCON_REV_B0) { 468 if (falcon_rev(efx) < FALCON_REV_B0) {
478 efx_oword_t reg; 469 efx_oword_t reg;
479 470
480 BUG_ON(tx_queue->queue >= 128); /* HW limit */ 471 BUG_ON(tx_queue->queue >= 128); /* HW limit */
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
635 efx_oword_t rx_desc_ptr; 626 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx; 627 struct efx_nic *efx = rx_queue->efx;
637 int rc; 628 int rc;
638 int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; 629 int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
639 int iscsi_digest_en = is_b0; 630 int iscsi_digest_en = is_b0;
640 631
641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 632 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
@@ -742,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
742 continue; 733 continue;
743 break; 734 break;
744 } 735 }
745 if (rc) 736 if (rc) {
746 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); 737 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
738 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
739 }
747 740
748 /* Remove RX descriptor ring from card */ 741 /* Remove RX descriptor ring from card */
749 EFX_ZERO_OWORD(rx_desc_ptr); 742 EFX_ZERO_OWORD(rx_desc_ptr);
@@ -822,10 +815,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 815 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
823 tx_queue = &efx->tx_queue[tx_ev_q_label]; 816 tx_queue = &efx->tx_queue[tx_ev_q_label];
824 817
825 if (NET_DEV_REGISTERED(efx)) 818 if (efx_dev_registered(efx))
826 netif_tx_lock(efx->net_dev); 819 netif_tx_lock(efx->net_dev);
827 falcon_notify_tx_desc(tx_queue); 820 falcon_notify_tx_desc(tx_queue);
828 if (NET_DEV_REGISTERED(efx)) 821 if (efx_dev_registered(efx))
829 netif_tx_unlock(efx->net_dev); 822 netif_tx_unlock(efx->net_dev);
830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 823 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
831 EFX_WORKAROUND_10727(efx)) { 824 EFX_WORKAROUND_10727(efx)) {
@@ -884,7 +877,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
884 RX_EV_TCP_UDP_CHKSUM_ERR); 877 RX_EV_TCP_UDP_CHKSUM_ERR);
885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 878 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 879 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
887 rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? 880 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 881 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 882 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
890 883
@@ -1065,7 +1058,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 1058 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1066 is_phy_event = 1; 1059 is_phy_event = 1;
1067 1060
1068 if ((FALCON_REV(efx) >= FALCON_REV_B0) && 1061 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1062 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
1070 is_phy_event = 1; 1063 is_phy_event = 1;
1071 1064
@@ -1405,7 +1398,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1405static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1398static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1406{ 1399{
1407 struct falcon_nic_data *nic_data = efx->nic_data; 1400 struct falcon_nic_data *nic_data = efx->nic_data;
1408 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1401 efx_oword_t *int_ker = efx->irq_status.addr;
1409 efx_oword_t fatal_intr; 1402 efx_oword_t fatal_intr;
1410 int error, mem_perr; 1403 int error, mem_perr;
1411 static int n_int_errors; 1404 static int n_int_errors;
@@ -1451,8 +1444,8 @@ out:
1451 */ 1444 */
1452static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1445static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1453{ 1446{
1454 struct efx_nic *efx = (struct efx_nic *)dev_id; 1447 struct efx_nic *efx = dev_id;
1455 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1448 efx_oword_t *int_ker = efx->irq_status.addr;
1456 struct efx_channel *channel; 1449 struct efx_channel *channel;
1457 efx_dword_t reg; 1450 efx_dword_t reg;
1458 u32 queues; 1451 u32 queues;
@@ -1489,8 +1482,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1489 1482
1490static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1483static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1491{ 1484{
1492 struct efx_nic *efx = (struct efx_nic *)dev_id; 1485 struct efx_nic *efx = dev_id;
1493 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1486 efx_oword_t *int_ker = efx->irq_status.addr;
1494 struct efx_channel *channel; 1487 struct efx_channel *channel;
1495 int syserr; 1488 int syserr;
1496 int queues; 1489 int queues;
@@ -1542,9 +1535,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1542 */ 1535 */
1543static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1536static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1544{ 1537{
1545 struct efx_channel *channel = (struct efx_channel *)dev_id; 1538 struct efx_channel *channel = dev_id;
1546 struct efx_nic *efx = channel->efx; 1539 struct efx_nic *efx = channel->efx;
1547 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1540 efx_oword_t *int_ker = efx->irq_status.addr;
1548 int syserr; 1541 int syserr;
1549 1542
1550 efx->last_irq_cpu = raw_smp_processor_id(); 1543 efx->last_irq_cpu = raw_smp_processor_id();
@@ -1572,7 +1565,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1572 unsigned long offset; 1565 unsigned long offset;
1573 efx_dword_t dword; 1566 efx_dword_t dword;
1574 1567
1575 if (FALCON_REV(efx) < FALCON_REV_B0) 1568 if (falcon_rev(efx) < FALCON_REV_B0)
1576 return; 1569 return;
1577 1570
1578 for (offset = RX_RSS_INDIR_TBL_B0; 1571 for (offset = RX_RSS_INDIR_TBL_B0;
@@ -1595,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1595 1588
1596 if (!EFX_INT_MODE_USE_MSI(efx)) { 1589 if (!EFX_INT_MODE_USE_MSI(efx)) {
1597 irq_handler_t handler; 1590 irq_handler_t handler;
1598 if (FALCON_REV(efx) >= FALCON_REV_B0) 1591 if (falcon_rev(efx) >= FALCON_REV_B0)
1599 handler = falcon_legacy_interrupt_b0; 1592 handler = falcon_legacy_interrupt_b0;
1600 else 1593 else
1601 handler = falcon_legacy_interrupt_a1; 1594 handler = falcon_legacy_interrupt_a1;
@@ -1636,12 +1629,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1636 efx_oword_t reg; 1629 efx_oword_t reg;
1637 1630
1638 /* Disable MSI/MSI-X interrupts */ 1631 /* Disable MSI/MSI-X interrupts */
1639 efx_for_each_channel_with_interrupt(channel, efx) 1632 efx_for_each_channel_with_interrupt(channel, efx) {
1640 if (channel->irq) 1633 if (channel->irq)
1641 free_irq(channel->irq, channel); 1634 free_irq(channel->irq, channel);
1635 }
1642 1636
1643 /* ACK legacy interrupt */ 1637 /* ACK legacy interrupt */
1644 if (FALCON_REV(efx) >= FALCON_REV_B0) 1638 if (falcon_rev(efx) >= FALCON_REV_B0)
1645 falcon_read(efx, &reg, INT_ISR0_B0); 1639 falcon_read(efx, &reg, INT_ISR0_B0);
1646 else 1640 else
1647 falcon_irq_ack_a1(efx); 1641 falcon_irq_ack_a1(efx);
@@ -1732,7 +1726,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1732 efx_oword_t temp; 1726 efx_oword_t temp;
1733 int count; 1727 int count;
1734 1728
1735 if ((FALCON_REV(efx) < FALCON_REV_B0) || 1729 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1736 (efx->loopback_mode != LOOPBACK_NONE)) 1730 (efx->loopback_mode != LOOPBACK_NONE))
1737 return; 1731 return;
1738 1732
@@ -1785,7 +1779,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1785{ 1779{
1786 efx_oword_t temp; 1780 efx_oword_t temp;
1787 1781
1788 if (FALCON_REV(efx) < FALCON_REV_B0) 1782 if (falcon_rev(efx) < FALCON_REV_B0)
1789 return; 1783 return;
1790 1784
1791 /* Isolate the MAC -> RX */ 1785 /* Isolate the MAC -> RX */
@@ -1823,7 +1817,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1823 MAC_SPEED, link_speed); 1817 MAC_SPEED, link_speed);
1824 /* On B0, MAC backpressure can be disabled and packets get 1818 /* On B0, MAC backpressure can be disabled and packets get
1825 * discarded. */ 1819 * discarded. */
1826 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1820 if (falcon_rev(efx) >= FALCON_REV_B0) {
1827 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1821 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1828 !efx->link_up); 1822 !efx->link_up);
1829 } 1823 }
@@ -1841,7 +1835,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1841 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1835 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1842 1836
1843 /* Unisolate the MAC -> RX */ 1837 /* Unisolate the MAC -> RX */
1844 if (FALCON_REV(efx) >= FALCON_REV_B0) 1838 if (falcon_rev(efx) >= FALCON_REV_B0)
1845 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1839 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
1846 falcon_write(efx, &reg, RX_CFG_REG_KER); 1840 falcon_write(efx, &reg, RX_CFG_REG_KER);
1847} 1841}
@@ -1856,7 +1850,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1856 return 0; 1850 return 0;
1857 1851
1858 /* Statistics fetch will fail if the MAC is in TX drain */ 1852 /* Statistics fetch will fail if the MAC is in TX drain */
1859 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1853 if (falcon_rev(efx) >= FALCON_REV_B0) {
1860 efx_oword_t temp; 1854 efx_oword_t temp;
1861 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1855 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1862 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 1856 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
@@ -1940,7 +1934,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1940static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 1934static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1941 int addr, int value) 1935 int addr, int value)
1942{ 1936{
1943 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 1937 struct efx_nic *efx = net_dev->priv;
1944 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 1938 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1945 efx_oword_t reg; 1939 efx_oword_t reg;
1946 1940
@@ -2008,7 +2002,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
2008 * could be read, -1 will be returned. */ 2002 * could be read, -1 will be returned. */
2009static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2003static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2010{ 2004{
2011 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 2005 struct efx_nic *efx = net_dev->priv;
2012 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2006 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2013 efx_oword_t reg; 2007 efx_oword_t reg;
2014 int value = -1; 2008 int value = -1;
@@ -2113,7 +2107,7 @@ int falcon_probe_port(struct efx_nic *efx)
2113 falcon_init_mdio(&efx->mii); 2107 falcon_init_mdio(&efx->mii);
2114 2108
2115 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2109 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2116 if (FALCON_REV(efx) >= FALCON_REV_B0) 2110 if (falcon_rev(efx) >= FALCON_REV_B0)
2117 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2111 efx->flow_control = EFX_FC_RX | EFX_FC_TX;
2118 else 2112 else
2119 efx->flow_control = EFX_FC_RX; 2113 efx->flow_control = EFX_FC_RX;
@@ -2373,7 +2367,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2373 return -ENODEV; 2367 return -ENODEV;
2374 } 2368 }
2375 2369
2376 switch (FALCON_REV(efx)) { 2370 switch (falcon_rev(efx)) {
2377 case FALCON_REV_A0: 2371 case FALCON_REV_A0:
2378 case 0xff: 2372 case 0xff:
2379 EFX_ERR(efx, "Falcon rev A0 not supported\n"); 2373 EFX_ERR(efx, "Falcon rev A0 not supported\n");
@@ -2399,7 +2393,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2399 break; 2393 break;
2400 2394
2401 default: 2395 default:
2402 EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); 2396 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2403 return -ENODEV; 2397 return -ENODEV;
2404 } 2398 }
2405 2399
@@ -2419,7 +2413,7 @@ int falcon_probe_nic(struct efx_nic *efx)
2419 2413
2420 /* Allocate storage for hardware specific data */ 2414 /* Allocate storage for hardware specific data */
2421 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2415 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2422 efx->nic_data = (void *) nic_data; 2416 efx->nic_data = nic_data;
2423 2417
2424 /* Determine number of ports etc. */ 2418 /* Determine number of ports etc. */
2425 rc = falcon_probe_nic_variant(efx); 2419 rc = falcon_probe_nic_variant(efx);
@@ -2489,13 +2483,10 @@ int falcon_probe_nic(struct efx_nic *efx)
2489 */ 2483 */
2490int falcon_init_nic(struct efx_nic *efx) 2484int falcon_init_nic(struct efx_nic *efx)
2491{ 2485{
2492 struct falcon_nic_data *data;
2493 efx_oword_t temp; 2486 efx_oword_t temp;
2494 unsigned thresh; 2487 unsigned thresh;
2495 int rc; 2488 int rc;
2496 2489
2497 data = (struct falcon_nic_data *)efx->nic_data;
2498
2499 /* Set up the address region register. This is only needed 2490 /* Set up the address region register. This is only needed
2500 * for the B0 FPGA, but since we are just pushing in the 2491 * for the B0 FPGA, but since we are just pushing in the
2501 * reset defaults this may as well be unconditional. */ 2492 * reset defaults this may as well be unconditional. */
@@ -2562,7 +2553,7 @@ int falcon_init_nic(struct efx_nic *efx)
2562 2553
2563 /* Set number of RSS queues for receive path. */ 2554 /* Set number of RSS queues for receive path. */
2564 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 2555 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2565 if (FALCON_REV(efx) >= FALCON_REV_B0) 2556 if (falcon_rev(efx) >= FALCON_REV_B0)
2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); 2557 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2567 else 2558 else
2568 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); 2559 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
@@ -2600,7 +2591,7 @@ int falcon_init_nic(struct efx_nic *efx)
2600 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 2591 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2601 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 2592 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
2602 /* Squash TX of packets of 16 bytes or less */ 2593 /* Squash TX of packets of 16 bytes or less */
2603 if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2594 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
2604 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 2595 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
2605 falcon_write(efx, &temp, TX_CFG2_REG_KER); 2596 falcon_write(efx, &temp, TX_CFG2_REG_KER);
2606 2597
@@ -2617,7 +2608,7 @@ int falcon_init_nic(struct efx_nic *efx)
2617 if (EFX_WORKAROUND_7575(efx)) 2608 if (EFX_WORKAROUND_7575(efx))
2618 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, 2609 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
2619 (3 * 4096) / 32); 2610 (3 * 4096) / 32);
2620 if (FALCON_REV(efx) >= FALCON_REV_B0) 2611 if (falcon_rev(efx) >= FALCON_REV_B0)
2621 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); 2612 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
2622 2613
2623 /* RX FIFO flow control thresholds */ 2614 /* RX FIFO flow control thresholds */
@@ -2633,7 +2624,7 @@ int falcon_init_nic(struct efx_nic *efx)
2633 falcon_write(efx, &temp, RX_CFG_REG_KER); 2624 falcon_write(efx, &temp, RX_CFG_REG_KER);
2634 2625
2635 /* Set destination of both TX and RX Flush events */ 2626 /* Set destination of both TX and RX Flush events */
2636 if (FALCON_REV(efx) >= FALCON_REV_B0) { 2627 if (falcon_rev(efx) >= FALCON_REV_B0) {
2637 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 2628 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
2638 falcon_write(efx, &temp, DP_CTRL_REG); 2629 falcon_write(efx, &temp, DP_CTRL_REG);
2639 } 2630 }
@@ -2647,7 +2638,7 @@ void falcon_remove_nic(struct efx_nic *efx)
2647 2638
2648 falcon_free_buffer(efx, &efx->irq_status); 2639 falcon_free_buffer(efx, &efx->irq_status);
2649 2640
2650 (void) falcon_reset_hw(efx, RESET_TYPE_ALL); 2641 falcon_reset_hw(efx, RESET_TYPE_ALL);
2651 2642
2652 /* Release the second function after the reset */ 2643 /* Release the second function after the reset */
2653 if (nic_data->pci_dev2) { 2644 if (nic_data->pci_dev2) {
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 6117403b0c03..492f9bc28840 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -23,7 +23,10 @@ enum falcon_revision {
23 FALCON_REV_B0 = 2, 23 FALCON_REV_B0 = 2,
24}; 24};
25 25
26#define FALCON_REV(efx) ((efx)->pci_dev->revision) 26static inline int falcon_rev(struct efx_nic *efx)
27{
28 return efx->pci_dev->revision;
29}
27 30
28extern struct efx_nic_type falcon_a_nic_type; 31extern struct efx_nic_type falcon_a_nic_type;
29extern struct efx_nic_type falcon_b_nic_type; 32extern struct efx_nic_type falcon_b_nic_type;
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 06e2d68fc3d1..6d003114eeab 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
1125 u8 port1_phy_type; 1125 u8 port1_phy_type;
1126 __le16 asic_sub_revision; 1126 __le16 asic_sub_revision;
1127 __le16 board_revision; 1127 __le16 board_revision;
1128} __attribute__ ((packed)); 1128} __packed;
1129 1129
1130#define NVCONFIG_BASE 0x300 1130#define NVCONFIG_BASE 0x300
1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C 1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
@@ -1144,6 +1144,6 @@ struct falcon_nvconfig {
1144 __le16 board_struct_ver; 1144 __le16 board_struct_ver;
1145 __le16 board_checksum; 1145 __le16 board_checksum;
1146 struct falcon_nvconfig_board_v2 board_v2; 1146 struct falcon_nvconfig_board_v2 board_v2;
1147} __attribute__ ((packed)); 1147} __packed;
1148 1148
1149#endif /* EFX_FALCON_HWDEFS_H */ 1149#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index ea08184ddfa9..6670cdfc41ab 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -56,14 +56,27 @@
56#define FALCON_USE_QWORD_IO 1 56#define FALCON_USE_QWORD_IO 1
57#endif 57#endif
58 58
59#define _falcon_writeq(efx, value, reg) \ 59#ifdef FALCON_USE_QWORD_IO
60 __raw_writeq((__force u64) (value), (efx)->membase + (reg)) 60static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
61#define _falcon_writel(efx, value, reg) \ 61 unsigned int reg)
62 __raw_writel((__force u32) (value), (efx)->membase + (reg)) 62{
63#define _falcon_readq(efx, reg) \ 63 __raw_writeq((__force u64)value, efx->membase + reg);
64 ((__force __le64) __raw_readq((efx)->membase + (reg))) 64}
65#define _falcon_readl(efx, reg) \ 65static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
66 ((__force __le32) __raw_readl((efx)->membase + (reg))) 66{
67 return (__force __le64)__raw_readq(efx->membase + reg);
68}
69#endif
70
71static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
72 unsigned int reg)
73{
74 __raw_writel((__force u32)value, efx->membase + reg);
75}
76static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
77{
78 return (__force __le32)__raw_readl(efx->membase + reg);
79}
67 80
68/* Writes to a normal 16-byte Falcon register, locking as appropriate. */ 81/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
69static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, 82static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index a74b7931a3c4..55c0d9760be8 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
221{ 221{
222 efx_dword_t reg; 222 efx_dword_t reg;
223 223
224 if (FALCON_REV(efx) < FALCON_REV_B0) 224 if (falcon_rev(efx) < FALCON_REV_B0)
225 return 1; 225 return 1;
226 226
227 /* The ISR latches, so clear it and re-read */ 227 /* The ISR latches, so clear it and re-read */
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
241{ 241{
242 efx_dword_t reg; 242 efx_dword_t reg;
243 243
244 if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 244 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
245 return; 245 return;
246 246
247 /* Flush the ISR */ 247 /* Flush the ISR */
@@ -454,12 +454,12 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
454 454
455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
456 __func__, tries); 456 __func__, tries);
457 (void) falcon_reset_xaui(efx); 457 falcon_reset_xaui(efx);
458 udelay(200); 458 udelay(200);
459 tries--; 459 tries--;
460 } 460 }
461 461
462 EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n", 462 EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
463 max_tries); 463 max_tries);
464 return 0; 464 return 0;
465} 465}
@@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx)
572 xaui_link_ok = falcon_xaui_link_ok(efx); 572 xaui_link_ok = falcon_xaui_link_ok(efx);
573 573
574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
575 (void) falcon_reset_xaui(efx); 575 falcon_reset_xaui(efx);
576 576
577 /* Call the PHY check_hw routine */ 577 /* Call the PHY check_hw routine */
578 rc = efx->phy_op->check_hw(efx); 578 rc = efx->phy_op->check_hw(efx);
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
639 reset = ((flow_control & EFX_FC_TX) && 639 reset = ((flow_control & EFX_FC_TX) &&
640 !(efx->flow_control & EFX_FC_TX)); 640 !(efx->flow_control & EFX_FC_TX));
641 if (EFX_WORKAROUND_11482(efx) && reset) { 641 if (EFX_WORKAROUND_11482(efx) && reset) {
642 if (FALCON_REV(efx) >= FALCON_REV_B0) { 642 if (falcon_rev(efx) >= FALCON_REV_B0) {
643 /* Recover by resetting the EM block */ 643 /* Recover by resetting the EM block */
644 if (efx->link_up) 644 if (efx->link_up)
645 falcon_drain_tx_fifo(efx); 645 falcon_drain_tx_fifo(efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 59f261b4171f..5e20e7551dae 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -42,7 +42,7 @@
42#ifndef EFX_DRIVER_NAME 42#ifndef EFX_DRIVER_NAME
43#define EFX_DRIVER_NAME "sfc" 43#define EFX_DRIVER_NAME "sfc"
44#endif 44#endif
45#define EFX_DRIVER_VERSION "2.2.0136" 45#define EFX_DRIVER_VERSION "2.2"
46 46
47#ifdef EFX_ENABLE_DEBUG 47#ifdef EFX_ENABLE_DEBUG
48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -52,28 +52,19 @@
52#define EFX_WARN_ON_PARANOID(x) do {} while (0) 52#define EFX_WARN_ON_PARANOID(x) do {} while (0)
53#endif 53#endif
54 54
55#define NET_DEV_REGISTERED(efx) \
56 ((efx)->net_dev->reg_state == NETREG_REGISTERED)
57
58/* Include net device name in log messages if it has been registered.
59 * Use efx->name not efx->net_dev->name so that races with (un)registration
60 * are harmless.
61 */
62#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
63
64/* Un-rate-limited logging */ 55/* Un-rate-limited logging */
65#define EFX_ERR(efx, fmt, args...) \ 56#define EFX_ERR(efx, fmt, args...) \
66dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) 57dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
67 58
68#define EFX_INFO(efx, fmt, args...) \ 59#define EFX_INFO(efx, fmt, args...) \
69dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) 60dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
70 61
71#ifdef EFX_ENABLE_DEBUG 62#ifdef EFX_ENABLE_DEBUG
72#define EFX_LOG(efx, fmt, args...) \ 63#define EFX_LOG(efx, fmt, args...) \
73dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 64dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
74#else 65#else
75#define EFX_LOG(efx, fmt, args...) \ 66#define EFX_LOG(efx, fmt, args...) \
76dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 67dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
77#endif 68#endif
78 69
79#define EFX_TRACE(efx, fmt, args...) do {} while (0) 70#define EFX_TRACE(efx, fmt, args...) do {} while (0)
@@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
90#define EFX_LOG_RL(efx, fmt, args...) \ 81#define EFX_LOG_RL(efx, fmt, args...) \
91do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) 82do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
92 83
93/* Kernel headers may redefine inline anyway */
94#ifndef inline
95#define inline inline __attribute__ ((always_inline))
96#endif
97
98/************************************************************************** 84/**************************************************************************
99 * 85 *
100 * Efx data structures 86 * Efx data structures
@@ -695,7 +681,7 @@ struct efx_nic {
695 struct workqueue_struct *workqueue; 681 struct workqueue_struct *workqueue;
696 struct work_struct reset_work; 682 struct work_struct reset_work;
697 struct delayed_work monitor_work; 683 struct delayed_work monitor_work;
698 unsigned long membase_phys; 684 resource_size_t membase_phys;
699 void __iomem *membase; 685 void __iomem *membase;
700 spinlock_t biu_lock; 686 spinlock_t biu_lock;
701 enum efx_int_mode interrupt_mode; 687 enum efx_int_mode interrupt_mode;
@@ -719,7 +705,7 @@ struct efx_nic {
719 705
720 unsigned n_rx_nodesc_drop_cnt; 706 unsigned n_rx_nodesc_drop_cnt;
721 707
722 void *nic_data; 708 struct falcon_nic_data *nic_data;
723 709
724 struct mutex mac_lock; 710 struct mutex mac_lock;
725 int port_enabled; 711 int port_enabled;
@@ -760,6 +746,20 @@ struct efx_nic {
760 void *loopback_selftest; 746 void *loopback_selftest;
761}; 747};
762 748
749static inline int efx_dev_registered(struct efx_nic *efx)
750{
751 return efx->net_dev->reg_state == NETREG_REGISTERED;
752}
753
754/* Net device name, for inclusion in log messages if it has been registered.
755 * Use efx->name not efx->net_dev->name so that races with (un)registration
756 * are harmless.
757 */
758static inline const char *efx_dev_name(struct efx_nic *efx)
759{
760 return efx_dev_registered(efx) ? efx->name : "";
761}
762
763/** 763/**
764 * struct efx_nic_type - Efx device type definition 764 * struct efx_nic_type - Efx device type definition
765 * @mem_bar: Memory BAR number 765 * @mem_bar: Memory BAR number
@@ -795,7 +795,7 @@ struct efx_nic_type {
795 unsigned int txd_ring_mask; 795 unsigned int txd_ring_mask;
796 unsigned int rxd_ring_mask; 796 unsigned int rxd_ring_mask;
797 unsigned int evq_size; 797 unsigned int evq_size;
798 dma_addr_t max_dma_mask; 798 u64 max_dma_mask;
799 unsigned int tx_dma_mask; 799 unsigned int tx_dma_mask;
800 unsigned bug5391_mask; 800 unsigned bug5391_mask;
801 801
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 670622373ddf..601b001437c0 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
86 */ 86 */
87#define EFX_RXD_HEAD_ROOM 2 87#define EFX_RXD_HEAD_ROOM 2
88 88
89/* Macros for zero-order pages (potentially) containing multiple RX buffers */ 89static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
90#define RX_DATA_OFFSET(_data) \ 90{
91 (((unsigned long) (_data)) & (PAGE_SIZE-1)) 91 /* Offset is always within one page, so we don't need to consider
92#define RX_BUF_OFFSET(_rx_buf) \ 92 * the page order.
93 RX_DATA_OFFSET((_rx_buf)->data) 93 */
94 94 return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
95#define RX_PAGE_SIZE(_efx) \ 95}
96 (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) 96static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
97{
98 return PAGE_SIZE << efx->rx_buffer_order;
99}
97 100
98 101
99/************************************************************************** 102/**************************************************************************
@@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
106static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, 109static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
107 void **tcpudp_hdr, u64 *hdr_flags, void *priv) 110 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
108{ 111{
109 struct efx_channel *channel = (struct efx_channel *)priv; 112 struct efx_channel *channel = priv;
110 struct iphdr *iph; 113 struct iphdr *iph;
111 struct tcphdr *th; 114 struct tcphdr *th;
112 115
@@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
131 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, 134 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
132 void *priv) 135 void *priv)
133{ 136{
134 struct efx_channel *channel = (struct efx_channel *)priv; 137 struct efx_channel *channel = priv;
135 struct ethhdr *eh; 138 struct ethhdr *eh;
136 struct iphdr *iph; 139 struct iphdr *iph;
137 140
138 /* We support EtherII and VLAN encapsulated IPv4 */ 141 /* We support EtherII and VLAN encapsulated IPv4 */
139 eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); 142 eh = page_address(frag->page) + frag->page_offset;
140 *mac_hdr = eh; 143 *mac_hdr = eh;
141 144
142 if (eh->h_proto == htons(ETH_P_IP)) { 145 if (eh->h_proto == htons(ETH_P_IP)) {
@@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
269 return -ENOMEM; 272 return -ENOMEM;
270 273
271 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 274 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
272 0, RX_PAGE_SIZE(efx), 275 0, efx_rx_buf_size(efx),
273 PCI_DMA_FROMDEVICE); 276 PCI_DMA_FROMDEVICE);
274 277
275 if (unlikely(pci_dma_mapping_error(dma_addr))) { 278 if (unlikely(pci_dma_mapping_error(dma_addr))) {
@@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
280 283
281 rx_queue->buf_page = rx_buf->page; 284 rx_queue->buf_page = rx_buf->page;
282 rx_queue->buf_dma_addr = dma_addr; 285 rx_queue->buf_dma_addr = dma_addr;
283 rx_queue->buf_data = ((char *) page_address(rx_buf->page) + 286 rx_queue->buf_data = (page_address(rx_buf->page) +
284 EFX_PAGE_IP_ALIGN); 287 EFX_PAGE_IP_ALIGN);
285 } 288 }
286 289
287 offset = RX_DATA_OFFSET(rx_queue->buf_data);
288 rx_buf->len = bytes; 290 rx_buf->len = bytes;
289 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
290 rx_buf->data = rx_queue->buf_data; 291 rx_buf->data = rx_queue->buf_data;
292 offset = efx_rx_buf_offset(rx_buf);
293 rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
291 294
292 /* Try to pack multiple buffers per page */ 295 /* Try to pack multiple buffers per page */
293 if (efx->rx_buffer_order == 0) { 296 if (efx->rx_buffer_order == 0) {
@@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
295 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 298 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
296 offset += ((bytes + 0x1ff) & ~0x1ff); 299 offset += ((bytes + 0x1ff) & ~0x1ff);
297 300
298 space = RX_PAGE_SIZE(efx) - offset; 301 space = efx_rx_buf_size(efx) - offset;
299 if (space >= bytes) { 302 if (space >= bytes) {
300 /* Refs dropped on kernel releasing each skb */ 303 /* Refs dropped on kernel releasing each skb */
301 get_page(rx_queue->buf_page); 304 get_page(rx_queue->buf_page);
@@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
344 EFX_BUG_ON_PARANOID(rx_buf->skb); 347 EFX_BUG_ON_PARANOID(rx_buf->skb);
345 if (rx_buf->unmap_addr) { 348 if (rx_buf->unmap_addr) {
346 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 349 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
347 RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); 350 efx_rx_buf_size(efx),
351 PCI_DMA_FROMDEVICE);
348 rx_buf->unmap_addr = 0; 352 rx_buf->unmap_addr = 0;
349 } 353 }
350 } else if (likely(rx_buf->skb)) { 354 } else if (likely(rx_buf->skb)) {
@@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
400 return 0; 404 return 0;
401 405
402 /* Record minimum fill level */ 406 /* Record minimum fill level */
403 if (unlikely(fill_level < rx_queue->min_fill)) 407 if (unlikely(fill_level < rx_queue->min_fill)) {
404 if (fill_level) 408 if (fill_level)
405 rx_queue->min_fill = fill_level; 409 rx_queue->min_fill = fill_level;
410 }
406 411
407 /* Acquire RX add lock. If this lock is contended, then a fast 412 /* Acquire RX add lock. If this lock is contended, then a fast
408 * fill must already be in progress (e.g. in the refill 413 * fill must already be in progress (e.g. in the refill
@@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
552 struct skb_frag_struct frags; 557 struct skb_frag_struct frags;
553 558
554 frags.page = rx_buf->page; 559 frags.page = rx_buf->page;
555 frags.page_offset = RX_BUF_OFFSET(rx_buf); 560 frags.page_offset = efx_rx_buf_offset(rx_buf);
556 frags.size = rx_buf->len; 561 frags.size = rx_buf->len;
557 562
558 lro_receive_frags(lro_mgr, &frags, rx_buf->len, 563 lro_receive_frags(lro_mgr, &frags, rx_buf->len,
@@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
597 if (unlikely(rx_buf->len > hdr_len)) { 602 if (unlikely(rx_buf->len > hdr_len)) {
598 struct skb_frag_struct *frag = skb_shinfo(skb)->frags; 603 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
599 frag->page = rx_buf->page; 604 frag->page = rx_buf->page;
600 frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; 605 frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
601 frag->size = skb->len - hdr_len; 606 frag->size = skb->len - hdr_len;
602 skb_shinfo(skb)->nr_frags = 1; 607 skb_shinfo(skb)->nr_frags = 1;
603 skb->data_len = frag->size; 608 skb->data_len = frag->size;
@@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
851 /* For a page that is part-way through splitting into RX buffers */ 856 /* For a page that is part-way through splitting into RX buffers */
852 if (rx_queue->buf_page != NULL) { 857 if (rx_queue->buf_page != NULL) {
853 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, 858 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
854 RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); 859 efx_rx_buf_size(rx_queue->efx),
860 PCI_DMA_FROMDEVICE);
855 __free_pages(rx_queue->buf_page, 861 __free_pages(rx_queue->buf_page,
856 rx_queue->efx->rx_buffer_order); 862 rx_queue->efx->rx_buffer_order);
857 rx_queue->buf_page = NULL; 863 rx_queue->buf_page = NULL;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index cbda15946e8f..3b2de9fe7f27 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
290 290
291 payload = &state->payload; 291 payload = &state->payload;
292 292
293 received = (struct efx_loopback_payload *)(char *) buf_ptr; 293 received = (struct efx_loopback_payload *) buf_ptr;
294 received->ip.saddr = payload->ip.saddr; 294 received->ip.saddr = payload->ip.saddr;
295 received->ip.check = payload->ip.check; 295 received->ip.check = payload->ip.check;
296 296
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
424 * interrupt handler. */ 424 * interrupt handler. */
425 smp_wmb(); 425 smp_wmb();
426 426
427 if (NET_DEV_REGISTERED(efx)) 427 if (efx_dev_registered(efx))
428 netif_tx_lock_bh(efx->net_dev); 428 netif_tx_lock_bh(efx->net_dev);
429 rc = efx_xmit(efx, tx_queue, skb); 429 rc = efx_xmit(efx, tx_queue, skb);
430 if (NET_DEV_REGISTERED(efx)) 430 if (efx_dev_registered(efx))
431 netif_tx_unlock_bh(efx->net_dev); 431 netif_tx_unlock_bh(efx->net_dev);
432 432
433 if (rc != NETDEV_TX_OK) { 433 if (rc != NETDEV_TX_OK) {
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
453 int tx_done = 0, rx_good, rx_bad; 453 int tx_done = 0, rx_good, rx_bad;
454 int i, rc = 0; 454 int i, rc = 0;
455 455
456 if (NET_DEV_REGISTERED(efx)) 456 if (efx_dev_registered(efx))
457 netif_tx_lock_bh(efx->net_dev); 457 netif_tx_lock_bh(efx->net_dev);
458 458
459 /* Count the number of tx completions, and decrement the refcnt. Any 459 /* Count the number of tx completions, and decrement the refcnt. Any
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
465 dev_kfree_skb_any(skb); 465 dev_kfree_skb_any(skb);
466 } 466 }
467 467
468 if (NET_DEV_REGISTERED(efx)) 468 if (efx_dev_registered(efx))
469 netif_tx_unlock_bh(efx->net_dev); 469 netif_tx_unlock_bh(efx->net_dev);
470 470
471 /* Check TX completion and received packet counts */ 471 /* Check TX completion and received packet counts */
@@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
517 state->packet_count = min(1 << (i << 2), state->packet_count); 517 state->packet_count = min(1 << (i << 2), state->packet_count);
518 state->skbs = kzalloc(sizeof(state->skbs[0]) * 518 state->skbs = kzalloc(sizeof(state->skbs[0]) *
519 state->packet_count, GFP_KERNEL); 519 state->packet_count, GFP_KERNEL);
520 if (!state->skbs)
521 return -ENOMEM;
520 state->flush = 0; 522 state->flush = 0;
521 523
522 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 524 EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
@@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx,
700 * "flushing" so all inflight packets are dropped */ 702 * "flushing" so all inflight packets are dropped */
701 BUG_ON(efx->loopback_selftest); 703 BUG_ON(efx->loopback_selftest);
702 state->flush = 1; 704 state->flush = 1;
703 efx->loopback_selftest = (void *)state; 705 efx->loopback_selftest = state;
704 706
705 rc = efx_test_loopbacks(efx, tests, loopback_modes); 707 rc = efx_test_loopbacks(efx, tests, loopback_modes);
706 708
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 725d1a539c49..66a0d1442aba 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx)
116 116
117 /* Turn off all power rails */ 117 /* Turn off all power rails */
118 out = 0xff; 118 out = 0xff;
119 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 119 efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
120 120
121 /* Disable port 1 outputs on IO expander */ 121 /* Disable port 1 outputs on IO expander */
122 cfg = 0xff; 122 cfg = 0xff;
123 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 123 efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
124 124
125 /* Disable port 0 outputs on IO expander */ 125 /* Disable port 0 outputs on IO expander */
126 cfg = 0xff; 126 cfg = 0xff;
127 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 127 efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
128 128
129 /* Clear any over-temperature alert */ 129 /* Clear any over-temperature alert */
130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 130 efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
131} 131}
132 132
133/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 133/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
@@ -253,14 +253,14 @@ done:
253fail3: 253fail3:
254 /* Turn off all power rails */ 254 /* Turn off all power rails */
255 out = 0xff; 255 out = 0xff;
256 (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 256 efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
257 /* Disable port 1 outputs on IO expander */ 257 /* Disable port 1 outputs on IO expander */
258 out = 0xff; 258 out = 0xff;
259 (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); 259 efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
260fail2: 260fail2:
261 /* Disable port 0 outputs on IO expander */ 261 /* Disable port 0 outputs on IO expander */
262 out = 0xff; 262 out = 0xff;
263 (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); 263 efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
264fail1: 264fail1:
265 return rc; 265 return rc;
266} 266}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index b1cd6deec01f..c0146061c326 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
211 int rc = 0; 211 int rc = 0;
212 212
213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
214 if (!phy_data)
215 return -ENOMEM;
214 efx->phy_data = phy_data; 216 efx->phy_data = phy_data;
215 217
216 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 218 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
@@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
376 * perform a special software reset */ 378 * perform a special software reset */
377 if ((phy_data->tx_disabled && !efx->tx_disabled) || 379 if ((phy_data->tx_disabled && !efx->tx_disabled) ||
378 loop_change) { 380 loop_change) {
379 (void) tenxpress_special_reset(efx); 381 tenxpress_special_reset(efx);
380 falcon_reset_xaui(efx); 382 falcon_reset_xaui(efx);
381 } 383 }
382 384
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 9b436f5b4888..5cdd082ab8f6 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
387 if (unlikely(tx_queue->stopped)) { 387 if (unlikely(tx_queue->stopped)) {
388 fill_level = tx_queue->insert_count - tx_queue->read_count; 388 fill_level = tx_queue->insert_count - tx_queue->read_count;
389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
390 EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); 390 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
391 391
392 /* Do this under netif_tx_lock(), to avoid racing 392 /* Do this under netif_tx_lock(), to avoid racing
393 * with efx_xmit(). */ 393 * with efx_xmit(). */
@@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
639 base_dma = tsoh->dma_addr & PAGE_MASK; 639 base_dma = tsoh->dma_addr & PAGE_MASK;
640 640
641 p = &tx_queue->tso_headers_free; 641 p = &tx_queue->tso_headers_free;
642 while (*p != NULL) 642 while (*p != NULL) {
643 if (((unsigned long)*p & PAGE_MASK) == base_kva) 643 if (((unsigned long)*p & PAGE_MASK) == base_kva)
644 *p = (*p)->next; 644 *p = (*p)->next;
645 else 645 else
646 p = &(*p)->next; 646 p = &(*p)->next;
647 }
647 648
648 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 649 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
649} 650}
@@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
939 940
940 /* Allocate a DMA-mapped header buffer. */ 941 /* Allocate a DMA-mapped header buffer. */
941 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 942 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
942 if (tx_queue->tso_headers_free == NULL) 943 if (tx_queue->tso_headers_free == NULL) {
943 if (efx_tsoh_block_alloc(tx_queue)) 944 if (efx_tsoh_block_alloc(tx_queue))
944 return -1; 945 return -1;
946 }
945 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 947 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
946 tsoh = tx_queue->tso_headers_free; 948 tsoh = tx_queue->tso_headers_free;
947 tx_queue->tso_headers_free = tsoh->next; 949 tx_queue->tso_headers_free = tsoh->next;
@@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1106{ 1108{
1107 unsigned i; 1109 unsigned i;
1108 1110
1109 if (tx_queue->buffer) 1111 if (tx_queue->buffer) {
1110 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1112 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
1111 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1113 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1114 }
1112 1115
1113 while (tx_queue->tso_headers_free != NULL) 1116 while (tx_queue->tso_headers_free != NULL)
1114 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1117 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index dca62f190198..35ab19c27f8d 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -16,7 +16,7 @@
16 */ 16 */
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1 18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
20 20
21/* XAUI resets if link not detected */ 21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 3b9f9ddbc372..f3684ad28887 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx)
85 int rc; 85 int rc;
86 86
87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
88 efx->phy_data = (void *) phy_data; 88 if (!phy_data)
89 return -ENOMEM;
90 efx->phy_data = phy_data;
89 91
90 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 92 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
91 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),