diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2008-05-16 16:16:10 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-22 05:59:27 -0400 |
commit | 55668611d0b2a5947cd17f66243be3cebf21400c (patch) | |
tree | 8882b336ea5d7fd7e544c888a3b246e9463436fa /drivers/net | |
parent | b3475645ed8b823c063f7560b243026150d7c3f8 (diff) |
sfc: Replaced various macros with inline functions
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/sfc/bitfield.h | 4 | ||||
-rw-r--r-- | drivers/net/sfc/efx.c | 8 | ||||
-rw-r--r-- | drivers/net/sfc/falcon.c | 46 | ||||
-rw-r--r-- | drivers/net/sfc/falcon.h | 5 | ||||
-rw-r--r-- | drivers/net/sfc/falcon_io.h | 29 | ||||
-rw-r--r-- | drivers/net/sfc/falcon_xmac.c | 6 | ||||
-rw-r--r-- | drivers/net/sfc/net_driver.h | 31 | ||||
-rw-r--r-- | drivers/net/sfc/rx.c | 39 | ||||
-rw-r--r-- | drivers/net/sfc/selftest.c | 8 | ||||
-rw-r--r-- | drivers/net/sfc/tx.c | 2 | ||||
-rw-r--r-- | drivers/net/sfc/workarounds.h | 2 |
11 files changed, 105 insertions, 75 deletions
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index 2806201644cc..c98a591bd800 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h | |||
@@ -483,7 +483,7 @@ typedef union efx_oword { | |||
483 | #endif | 483 | #endif |
484 | 484 | ||
485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ | 485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ |
486 | if (FALCON_REV(efx) >= FALCON_REV_B0) { \ | 486 | if (falcon_rev(efx) >= FALCON_REV_B0) { \ |
487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ | 487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ |
488 | } else { \ | 488 | } else { \ |
489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ | 489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ |
@@ -491,7 +491,7 @@ typedef union efx_oword { | |||
491 | } while (0) | 491 | } while (0) |
492 | 492 | ||
493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ | 493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ |
494 | (FALCON_REV(efx) >= FALCON_REV_B0 ? \ | 494 | (falcon_rev(efx) >= FALCON_REV_B0 ? \ |
495 | EFX_QWORD_FIELD((qword), field##_B0) : \ | 495 | EFX_QWORD_FIELD((qword), field##_B0) : \ |
496 | EFX_QWORD_FIELD((qword), field##_A1)) | 496 | EFX_QWORD_FIELD((qword), field##_A1)) |
497 | 497 | ||
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index df19e86ab2e7..86d40295a777 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -691,7 +691,7 @@ static void efx_stop_port(struct efx_nic *efx) | |||
691 | mutex_unlock(&efx->mac_lock); | 691 | mutex_unlock(&efx->mac_lock); |
692 | 692 | ||
693 | /* Serialise against efx_set_multicast_list() */ | 693 | /* Serialise against efx_set_multicast_list() */ |
694 | if (NET_DEV_REGISTERED(efx)) { | 694 | if (efx_dev_registered(efx)) { |
695 | netif_tx_lock_bh(efx->net_dev); | 695 | netif_tx_lock_bh(efx->net_dev); |
696 | netif_tx_unlock_bh(efx->net_dev); | 696 | netif_tx_unlock_bh(efx->net_dev); |
697 | } | 697 | } |
@@ -1030,7 +1030,7 @@ static void efx_start_all(struct efx_nic *efx) | |||
1030 | return; | 1030 | return; |
1031 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | 1031 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) |
1032 | return; | 1032 | return; |
1033 | if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) | 1033 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) |
1034 | return; | 1034 | return; |
1035 | 1035 | ||
1036 | /* Mark the port as enabled so port reconfigurations can start, then | 1036 | /* Mark the port as enabled so port reconfigurations can start, then |
@@ -1112,7 +1112,7 @@ static void efx_stop_all(struct efx_nic *efx) | |||
1112 | /* Stop the kernel transmit interface late, so the watchdog | 1112 | /* Stop the kernel transmit interface late, so the watchdog |
1113 | * timer isn't ticking over the flush */ | 1113 | * timer isn't ticking over the flush */ |
1114 | efx_stop_queue(efx); | 1114 | efx_stop_queue(efx); |
1115 | if (NET_DEV_REGISTERED(efx)) { | 1115 | if (efx_dev_registered(efx)) { |
1116 | netif_tx_lock_bh(efx->net_dev); | 1116 | netif_tx_lock_bh(efx->net_dev); |
1117 | netif_tx_unlock_bh(efx->net_dev); | 1117 | netif_tx_unlock_bh(efx->net_dev); |
1118 | } | 1118 | } |
@@ -1550,7 +1550,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
1550 | efx_for_each_tx_queue(tx_queue, efx) | 1550 | efx_for_each_tx_queue(tx_queue, efx) |
1551 | efx_release_tx_buffers(tx_queue); | 1551 | efx_release_tx_buffers(tx_queue); |
1552 | 1552 | ||
1553 | if (NET_DEV_REGISTERED(efx)) { | 1553 | if (efx_dev_registered(efx)) { |
1554 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 1554 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
1555 | unregister_netdev(efx->net_dev); | 1555 | unregister_netdev(efx->net_dev); |
1556 | } | 1556 | } |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 4f96ce4c3532..e02f1d1728aa 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
@@ -145,7 +145,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
145 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 | 145 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 |
146 | 146 | ||
147 | #define FALCON_IS_DUAL_FUNC(efx) \ | 147 | #define FALCON_IS_DUAL_FUNC(efx) \ |
148 | (FALCON_REV(efx) < FALCON_REV_B0) | 148 | (falcon_rev(efx) < FALCON_REV_B0) |
149 | 149 | ||
150 | /************************************************************************** | 150 | /************************************************************************** |
151 | * | 151 | * |
@@ -465,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
465 | TX_DESCQ_TYPE, 0, | 465 | TX_DESCQ_TYPE, 0, |
466 | TX_NON_IP_DROP_DIS_B0, 1); | 466 | TX_NON_IP_DROP_DIS_B0, 1); |
467 | 467 | ||
468 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 468 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
469 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); | 469 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); |
470 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); | 470 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); |
471 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); | 471 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); |
@@ -474,7 +474,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
474 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 474 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
475 | tx_queue->queue); | 475 | tx_queue->queue); |
476 | 476 | ||
477 | if (FALCON_REV(efx) < FALCON_REV_B0) { | 477 | if (falcon_rev(efx) < FALCON_REV_B0) { |
478 | efx_oword_t reg; | 478 | efx_oword_t reg; |
479 | 479 | ||
480 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ | 480 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ |
@@ -635,7 +635,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
635 | efx_oword_t rx_desc_ptr; | 635 | efx_oword_t rx_desc_ptr; |
636 | struct efx_nic *efx = rx_queue->efx; | 636 | struct efx_nic *efx = rx_queue->efx; |
637 | int rc; | 637 | int rc; |
638 | int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; | 638 | int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; |
639 | int iscsi_digest_en = is_b0; | 639 | int iscsi_digest_en = is_b0; |
640 | 640 | ||
641 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 641 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", |
@@ -822,10 +822,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel, | |||
822 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | 822 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); |
823 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 823 | tx_queue = &efx->tx_queue[tx_ev_q_label]; |
824 | 824 | ||
825 | if (NET_DEV_REGISTERED(efx)) | 825 | if (efx_dev_registered(efx)) |
826 | netif_tx_lock(efx->net_dev); | 826 | netif_tx_lock(efx->net_dev); |
827 | falcon_notify_tx_desc(tx_queue); | 827 | falcon_notify_tx_desc(tx_queue); |
828 | if (NET_DEV_REGISTERED(efx)) | 828 | if (efx_dev_registered(efx)) |
829 | netif_tx_unlock(efx->net_dev); | 829 | netif_tx_unlock(efx->net_dev); |
830 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | 830 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && |
831 | EFX_WORKAROUND_10727(efx)) { | 831 | EFX_WORKAROUND_10727(efx)) { |
@@ -884,7 +884,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
884 | RX_EV_TCP_UDP_CHKSUM_ERR); | 884 | RX_EV_TCP_UDP_CHKSUM_ERR); |
885 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | 885 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); |
886 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | 886 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); |
887 | rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? | 887 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? |
888 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | 888 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); |
889 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | 889 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); |
890 | 890 | ||
@@ -1065,7 +1065,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
1065 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) | 1065 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) |
1066 | is_phy_event = 1; | 1066 | is_phy_event = 1; |
1067 | 1067 | ||
1068 | if ((FALCON_REV(efx) >= FALCON_REV_B0) && | 1068 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
1069 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | 1069 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) |
1070 | is_phy_event = 1; | 1070 | is_phy_event = 1; |
1071 | 1071 | ||
@@ -1572,7 +1572,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) | |||
1572 | unsigned long offset; | 1572 | unsigned long offset; |
1573 | efx_dword_t dword; | 1573 | efx_dword_t dword; |
1574 | 1574 | ||
1575 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1575 | if (falcon_rev(efx) < FALCON_REV_B0) |
1576 | return; | 1576 | return; |
1577 | 1577 | ||
1578 | for (offset = RX_RSS_INDIR_TBL_B0; | 1578 | for (offset = RX_RSS_INDIR_TBL_B0; |
@@ -1595,7 +1595,7 @@ int falcon_init_interrupt(struct efx_nic *efx) | |||
1595 | 1595 | ||
1596 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 1596 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
1597 | irq_handler_t handler; | 1597 | irq_handler_t handler; |
1598 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1598 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1599 | handler = falcon_legacy_interrupt_b0; | 1599 | handler = falcon_legacy_interrupt_b0; |
1600 | else | 1600 | else |
1601 | handler = falcon_legacy_interrupt_a1; | 1601 | handler = falcon_legacy_interrupt_a1; |
@@ -1642,7 +1642,7 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | /* ACK legacy interrupt */ | 1644 | /* ACK legacy interrupt */ |
1645 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1645 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1646 | falcon_read(efx, ®, INT_ISR0_B0); | 1646 | falcon_read(efx, ®, INT_ISR0_B0); |
1647 | else | 1647 | else |
1648 | falcon_irq_ack_a1(efx); | 1648 | falcon_irq_ack_a1(efx); |
@@ -1733,7 +1733,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) | |||
1733 | efx_oword_t temp; | 1733 | efx_oword_t temp; |
1734 | int count; | 1734 | int count; |
1735 | 1735 | ||
1736 | if ((FALCON_REV(efx) < FALCON_REV_B0) || | 1736 | if ((falcon_rev(efx) < FALCON_REV_B0) || |
1737 | (efx->loopback_mode != LOOPBACK_NONE)) | 1737 | (efx->loopback_mode != LOOPBACK_NONE)) |
1738 | return; | 1738 | return; |
1739 | 1739 | ||
@@ -1786,7 +1786,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | |||
1786 | { | 1786 | { |
1787 | efx_oword_t temp; | 1787 | efx_oword_t temp; |
1788 | 1788 | ||
1789 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1789 | if (falcon_rev(efx) < FALCON_REV_B0) |
1790 | return; | 1790 | return; |
1791 | 1791 | ||
1792 | /* Isolate the MAC -> RX */ | 1792 | /* Isolate the MAC -> RX */ |
@@ -1824,7 +1824,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1824 | MAC_SPEED, link_speed); | 1824 | MAC_SPEED, link_speed); |
1825 | /* On B0, MAC backpressure can be disabled and packets get | 1825 | /* On B0, MAC backpressure can be disabled and packets get |
1826 | * discarded. */ | 1826 | * discarded. */ |
1827 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1827 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1828 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | 1828 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, |
1829 | !efx->link_up); | 1829 | !efx->link_up); |
1830 | } | 1830 | } |
@@ -1842,7 +1842,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
1842 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | 1842 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); |
1843 | 1843 | ||
1844 | /* Unisolate the MAC -> RX */ | 1844 | /* Unisolate the MAC -> RX */ |
1845 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1845 | if (falcon_rev(efx) >= FALCON_REV_B0) |
1846 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 1846 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); |
1847 | falcon_write(efx, ®, RX_CFG_REG_KER); | 1847 | falcon_write(efx, ®, RX_CFG_REG_KER); |
1848 | } | 1848 | } |
@@ -1857,7 +1857,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
1857 | return 0; | 1857 | return 0; |
1858 | 1858 | ||
1859 | /* Statistics fetch will fail if the MAC is in TX drain */ | 1859 | /* Statistics fetch will fail if the MAC is in TX drain */ |
1860 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1860 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
1861 | efx_oword_t temp; | 1861 | efx_oword_t temp; |
1862 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 1862 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); |
1863 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | 1863 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) |
@@ -2114,7 +2114,7 @@ int falcon_probe_port(struct efx_nic *efx) | |||
2114 | falcon_init_mdio(&efx->mii); | 2114 | falcon_init_mdio(&efx->mii); |
2115 | 2115 | ||
2116 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | 2116 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ |
2117 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2117 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2118 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; | 2118 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; |
2119 | else | 2119 | else |
2120 | efx->flow_control = EFX_FC_RX; | 2120 | efx->flow_control = EFX_FC_RX; |
@@ -2374,7 +2374,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2374 | return -ENODEV; | 2374 | return -ENODEV; |
2375 | } | 2375 | } |
2376 | 2376 | ||
2377 | switch (FALCON_REV(efx)) { | 2377 | switch (falcon_rev(efx)) { |
2378 | case FALCON_REV_A0: | 2378 | case FALCON_REV_A0: |
2379 | case 0xff: | 2379 | case 0xff: |
2380 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | 2380 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); |
@@ -2400,7 +2400,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
2400 | break; | 2400 | break; |
2401 | 2401 | ||
2402 | default: | 2402 | default: |
2403 | EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); | 2403 | EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); |
2404 | return -ENODEV; | 2404 | return -ENODEV; |
2405 | } | 2405 | } |
2406 | 2406 | ||
@@ -2563,7 +2563,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2563 | 2563 | ||
2564 | /* Set number of RSS queues for receive path. */ | 2564 | /* Set number of RSS queues for receive path. */ |
2565 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | 2565 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); |
2566 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2566 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2567 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); | 2567 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); |
2568 | else | 2568 | else |
2569 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); | 2569 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); |
@@ -2601,7 +2601,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2601 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 2601 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
2602 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | 2602 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); |
2603 | /* Squash TX of packets of 16 bytes or less */ | 2603 | /* Squash TX of packets of 16 bytes or less */ |
2604 | if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | 2604 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) |
2605 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | 2605 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); |
2606 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | 2606 | falcon_write(efx, &temp, TX_CFG2_REG_KER); |
2607 | 2607 | ||
@@ -2618,7 +2618,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2618 | if (EFX_WORKAROUND_7575(efx)) | 2618 | if (EFX_WORKAROUND_7575(efx)) |
2619 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, | 2619 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, |
2620 | (3 * 4096) / 32); | 2620 | (3 * 4096) / 32); |
2621 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2621 | if (falcon_rev(efx) >= FALCON_REV_B0) |
2622 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); | 2622 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); |
2623 | 2623 | ||
2624 | /* RX FIFO flow control thresholds */ | 2624 | /* RX FIFO flow control thresholds */ |
@@ -2634,7 +2634,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
2634 | falcon_write(efx, &temp, RX_CFG_REG_KER); | 2634 | falcon_write(efx, &temp, RX_CFG_REG_KER); |
2635 | 2635 | ||
2636 | /* Set destination of both TX and RX Flush events */ | 2636 | /* Set destination of both TX and RX Flush events */ |
2637 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 2637 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
2638 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | 2638 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); |
2639 | falcon_write(efx, &temp, DP_CTRL_REG); | 2639 | falcon_write(efx, &temp, DP_CTRL_REG); |
2640 | } | 2640 | } |
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h index 6117403b0c03..492f9bc28840 100644 --- a/drivers/net/sfc/falcon.h +++ b/drivers/net/sfc/falcon.h | |||
@@ -23,7 +23,10 @@ enum falcon_revision { | |||
23 | FALCON_REV_B0 = 2, | 23 | FALCON_REV_B0 = 2, |
24 | }; | 24 | }; |
25 | 25 | ||
26 | #define FALCON_REV(efx) ((efx)->pci_dev->revision) | 26 | static inline int falcon_rev(struct efx_nic *efx) |
27 | { | ||
28 | return efx->pci_dev->revision; | ||
29 | } | ||
27 | 30 | ||
28 | extern struct efx_nic_type falcon_a_nic_type; | 31 | extern struct efx_nic_type falcon_a_nic_type; |
29 | extern struct efx_nic_type falcon_b_nic_type; | 32 | extern struct efx_nic_type falcon_b_nic_type; |
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h index ea08184ddfa9..6670cdfc41ab 100644 --- a/drivers/net/sfc/falcon_io.h +++ b/drivers/net/sfc/falcon_io.h | |||
@@ -56,14 +56,27 @@ | |||
56 | #define FALCON_USE_QWORD_IO 1 | 56 | #define FALCON_USE_QWORD_IO 1 |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #define _falcon_writeq(efx, value, reg) \ | 59 | #ifdef FALCON_USE_QWORD_IO |
60 | __raw_writeq((__force u64) (value), (efx)->membase + (reg)) | 60 | static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, |
61 | #define _falcon_writel(efx, value, reg) \ | 61 | unsigned int reg) |
62 | __raw_writel((__force u32) (value), (efx)->membase + (reg)) | 62 | { |
63 | #define _falcon_readq(efx, reg) \ | 63 | __raw_writeq((__force u64)value, efx->membase + reg); |
64 | ((__force __le64) __raw_readq((efx)->membase + (reg))) | 64 | } |
65 | #define _falcon_readl(efx, reg) \ | 65 | static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) |
66 | ((__force __le32) __raw_readl((efx)->membase + (reg))) | 66 | { |
67 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | static inline void _falcon_writel(struct efx_nic *efx, __le32 value, | ||
72 | unsigned int reg) | ||
73 | { | ||
74 | __raw_writel((__force u32)value, efx->membase + reg); | ||
75 | } | ||
76 | static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) | ||
77 | { | ||
78 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
79 | } | ||
67 | 80 | ||
68 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ | 81 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ |
69 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, | 82 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, |
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index d2978d4d3bf9..dbdcee4b0f8d 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
@@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx) | |||
221 | { | 221 | { |
222 | efx_dword_t reg; | 222 | efx_dword_t reg; |
223 | 223 | ||
224 | if (FALCON_REV(efx) < FALCON_REV_B0) | 224 | if (falcon_rev(efx) < FALCON_REV_B0) |
225 | return 1; | 225 | return 1; |
226 | 226 | ||
227 | /* The ISR latches, so clear it and re-read */ | 227 | /* The ISR latches, so clear it and re-read */ |
@@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) | |||
241 | { | 241 | { |
242 | efx_dword_t reg; | 242 | efx_dword_t reg; |
243 | 243 | ||
244 | if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) | 244 | if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) |
245 | return; | 245 | return; |
246 | 246 | ||
247 | /* Flush the ISR */ | 247 | /* Flush the ISR */ |
@@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) | |||
639 | reset = ((flow_control & EFX_FC_TX) && | 639 | reset = ((flow_control & EFX_FC_TX) && |
640 | !(efx->flow_control & EFX_FC_TX)); | 640 | !(efx->flow_control & EFX_FC_TX)); |
641 | if (EFX_WORKAROUND_11482(efx) && reset) { | 641 | if (EFX_WORKAROUND_11482(efx) && reset) { |
642 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 642 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
643 | /* Recover by resetting the EM block */ | 643 | /* Recover by resetting the EM block */ |
644 | if (efx->link_up) | 644 | if (efx->link_up) |
645 | falcon_drain_tx_fifo(efx); | 645 | falcon_drain_tx_fifo(efx); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 59f261b4171f..18b21ef23014 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -52,28 +52,19 @@ | |||
52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #define NET_DEV_REGISTERED(efx) \ | ||
56 | ((efx)->net_dev->reg_state == NETREG_REGISTERED) | ||
57 | |||
58 | /* Include net device name in log messages if it has been registered. | ||
59 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
60 | * are harmless. | ||
61 | */ | ||
62 | #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") | ||
63 | |||
64 | /* Un-rate-limited logging */ | 55 | /* Un-rate-limited logging */ |
65 | #define EFX_ERR(efx, fmt, args...) \ | 56 | #define EFX_ERR(efx, fmt, args...) \ |
66 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) | 57 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) |
67 | 58 | ||
68 | #define EFX_INFO(efx, fmt, args...) \ | 59 | #define EFX_INFO(efx, fmt, args...) \ |
69 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) | 60 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) |
70 | 61 | ||
71 | #ifdef EFX_ENABLE_DEBUG | 62 | #ifdef EFX_ENABLE_DEBUG |
72 | #define EFX_LOG(efx, fmt, args...) \ | 63 | #define EFX_LOG(efx, fmt, args...) \ |
73 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 64 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
74 | #else | 65 | #else |
75 | #define EFX_LOG(efx, fmt, args...) \ | 66 | #define EFX_LOG(efx, fmt, args...) \ |
76 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 67 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
77 | #endif | 68 | #endif |
78 | 69 | ||
79 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) | 70 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) |
@@ -760,6 +751,20 @@ struct efx_nic { | |||
760 | void *loopback_selftest; | 751 | void *loopback_selftest; |
761 | }; | 752 | }; |
762 | 753 | ||
754 | static inline int efx_dev_registered(struct efx_nic *efx) | ||
755 | { | ||
756 | return efx->net_dev->reg_state == NETREG_REGISTERED; | ||
757 | } | ||
758 | |||
759 | /* Net device name, for inclusion in log messages if it has been registered. | ||
760 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
761 | * are harmless. | ||
762 | */ | ||
763 | static inline const char *efx_dev_name(struct efx_nic *efx) | ||
764 | { | ||
765 | return efx_dev_registered(efx) ? efx->name : ""; | ||
766 | } | ||
767 | |||
763 | /** | 768 | /** |
764 | * struct efx_nic_type - Efx device type definition | 769 | * struct efx_nic_type - Efx device type definition |
765 | * @mem_bar: Memory BAR number | 770 | * @mem_bar: Memory BAR number |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index a6413309c577..f15d33225342 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -86,14 +86,21 @@ static unsigned int rx_refill_limit = 95; | |||
86 | */ | 86 | */ |
87 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
88 | 88 | ||
89 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_page_offset(void *p) |
90 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
91 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | return (__force unsigned int)p & (PAGE_SIZE - 1); |
92 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | } |
93 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
94 | 94 | { | |
95 | #define RX_PAGE_SIZE(_efx) \ | 95 | /* Offset is always within one page, so we don't need to consider |
96 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | * the page order. |
97 | */ | ||
98 | return efx_page_offset(buf->data); | ||
99 | } | ||
100 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | ||
101 | { | ||
102 | return PAGE_SIZE << efx->rx_buffer_order; | ||
103 | } | ||
97 | 104 | ||
98 | 105 | ||
99 | /************************************************************************** | 106 | /************************************************************************** |
@@ -269,7 +276,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
269 | return -ENOMEM; | 276 | return -ENOMEM; |
270 | 277 | ||
271 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 278 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
272 | 0, RX_PAGE_SIZE(efx), | 279 | 0, efx_rx_buf_size(efx), |
273 | PCI_DMA_FROMDEVICE); | 280 | PCI_DMA_FROMDEVICE); |
274 | 281 | ||
275 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 282 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
@@ -284,7 +291,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
284 | EFX_PAGE_IP_ALIGN); | 291 | EFX_PAGE_IP_ALIGN); |
285 | } | 292 | } |
286 | 293 | ||
287 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | 294 | offset = efx_page_offset(rx_queue->buf_data); |
288 | rx_buf->len = bytes; | 295 | rx_buf->len = bytes; |
289 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | 296 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; |
290 | rx_buf->data = rx_queue->buf_data; | 297 | rx_buf->data = rx_queue->buf_data; |
@@ -295,7 +302,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
295 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 302 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
296 | offset += ((bytes + 0x1ff) & ~0x1ff); | 303 | offset += ((bytes + 0x1ff) & ~0x1ff); |
297 | 304 | ||
298 | space = RX_PAGE_SIZE(efx) - offset; | 305 | space = efx_rx_buf_size(efx) - offset; |
299 | if (space >= bytes) { | 306 | if (space >= bytes) { |
300 | /* Refs dropped on kernel releasing each skb */ | 307 | /* Refs dropped on kernel releasing each skb */ |
301 | get_page(rx_queue->buf_page); | 308 | get_page(rx_queue->buf_page); |
@@ -344,7 +351,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
344 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 351 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
345 | if (rx_buf->unmap_addr) { | 352 | if (rx_buf->unmap_addr) { |
346 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 353 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
347 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 354 | efx_rx_buf_size(efx), |
355 | PCI_DMA_FROMDEVICE); | ||
348 | rx_buf->unmap_addr = 0; | 356 | rx_buf->unmap_addr = 0; |
349 | } | 357 | } |
350 | } else if (likely(rx_buf->skb)) { | 358 | } else if (likely(rx_buf->skb)) { |
@@ -553,7 +561,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
553 | struct skb_frag_struct frags; | 561 | struct skb_frag_struct frags; |
554 | 562 | ||
555 | frags.page = rx_buf->page; | 563 | frags.page = rx_buf->page; |
556 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 564 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
557 | frags.size = rx_buf->len; | 565 | frags.size = rx_buf->len; |
558 | 566 | ||
559 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 567 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
@@ -598,7 +606,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
598 | if (unlikely(rx_buf->len > hdr_len)) { | 606 | if (unlikely(rx_buf->len > hdr_len)) { |
599 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 607 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
600 | frag->page = rx_buf->page; | 608 | frag->page = rx_buf->page; |
601 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 609 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
602 | frag->size = skb->len - hdr_len; | 610 | frag->size = skb->len - hdr_len; |
603 | skb_shinfo(skb)->nr_frags = 1; | 611 | skb_shinfo(skb)->nr_frags = 1; |
604 | skb->data_len = frag->size; | 612 | skb->data_len = frag->size; |
@@ -852,7 +860,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
852 | /* For a page that is part-way through splitting into RX buffers */ | 860 | /* For a page that is part-way through splitting into RX buffers */ |
853 | if (rx_queue->buf_page != NULL) { | 861 | if (rx_queue->buf_page != NULL) { |
854 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 862 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
855 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 863 | efx_rx_buf_size(rx_queue->efx), |
864 | PCI_DMA_FROMDEVICE); | ||
856 | __free_pages(rx_queue->buf_page, | 865 | __free_pages(rx_queue->buf_page, |
857 | rx_queue->efx->rx_buffer_order); | 866 | rx_queue->efx->rx_buffer_order); |
858 | rx_queue->buf_page = NULL; | 867 | rx_queue->buf_page = NULL; |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index cbda15946e8f..2fb69d8b3d70 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
@@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue) | |||
424 | * interrupt handler. */ | 424 | * interrupt handler. */ |
425 | smp_wmb(); | 425 | smp_wmb(); |
426 | 426 | ||
427 | if (NET_DEV_REGISTERED(efx)) | 427 | if (efx_dev_registered(efx)) |
428 | netif_tx_lock_bh(efx->net_dev); | 428 | netif_tx_lock_bh(efx->net_dev); |
429 | rc = efx_xmit(efx, tx_queue, skb); | 429 | rc = efx_xmit(efx, tx_queue, skb); |
430 | if (NET_DEV_REGISTERED(efx)) | 430 | if (efx_dev_registered(efx)) |
431 | netif_tx_unlock_bh(efx->net_dev); | 431 | netif_tx_unlock_bh(efx->net_dev); |
432 | 432 | ||
433 | if (rc != NETDEV_TX_OK) { | 433 | if (rc != NETDEV_TX_OK) { |
@@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
453 | int tx_done = 0, rx_good, rx_bad; | 453 | int tx_done = 0, rx_good, rx_bad; |
454 | int i, rc = 0; | 454 | int i, rc = 0; |
455 | 455 | ||
456 | if (NET_DEV_REGISTERED(efx)) | 456 | if (efx_dev_registered(efx)) |
457 | netif_tx_lock_bh(efx->net_dev); | 457 | netif_tx_lock_bh(efx->net_dev); |
458 | 458 | ||
459 | /* Count the number of tx completions, and decrement the refcnt. Any | 459 | /* Count the number of tx completions, and decrement the refcnt. Any |
@@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
465 | dev_kfree_skb_any(skb); | 465 | dev_kfree_skb_any(skb); |
466 | } | 466 | } |
467 | 467 | ||
468 | if (NET_DEV_REGISTERED(efx)) | 468 | if (efx_dev_registered(efx)) |
469 | netif_tx_unlock_bh(efx->net_dev); | 469 | netif_tx_unlock_bh(efx->net_dev); |
470 | 470 | ||
471 | /* Check TX completion and received packet counts */ | 471 | /* Check TX completion and received packet counts */ |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 75eb0fd5fd2b..5cdd082ab8f6 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
387 | if (unlikely(tx_queue->stopped)) { | 387 | if (unlikely(tx_queue->stopped)) { |
388 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 388 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { | 389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { |
390 | EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); | 390 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
391 | 391 | ||
392 | /* Do this under netif_tx_lock(), to avoid racing | 392 | /* Do this under netif_tx_lock(), to avoid racing |
393 | * with efx_xmit(). */ | 393 | * with efx_xmit(). */ |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index dca62f190198..35ab19c27f8d 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
@@ -16,7 +16,7 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
19 | #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) |
20 | 20 | ||
21 | /* XAUI resets if link not detected */ | 21 | /* XAUI resets if link not detected */ |
22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | 22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS |