aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/falcon.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/falcon.c')
-rw-r--r--drivers/net/sfc/falcon.c97
1 files changed, 45 insertions, 52 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 46db549ce580..790db89db345 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
116 ************************************************************************** 116 **************************************************************************
117 */ 117 */
118 118
119/* DMA address mask (up to 46-bit, avoiding compiler warnings) 119/* DMA address mask */
120 * 120#define FALCON_DMA_MASK DMA_BIT_MASK(46)
121 * Note that it is possible to have a platform with 64-bit longs and
122 * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
123 * platform DMA mask.
124 */
125#if BITS_PER_LONG == 64
126#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
127#else
128#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
129#endif
130 121
131/* TX DMA length mask (13-bit) */ 122/* TX DMA length mask (13-bit) */
132#define FALCON_TX_DMA_MASK (4096 - 1) 123#define FALCON_TX_DMA_MASK (4096 - 1)
@@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4 136#define PCI_EXP_LNKSTA_LNK_WID_LBN 4
146 137
147#define FALCON_IS_DUAL_FUNC(efx) \ 138#define FALCON_IS_DUAL_FUNC(efx) \
148 (FALCON_REV(efx) < FALCON_REV_B0) 139 (falcon_rev(efx) < FALCON_REV_B0)
149 140
150/************************************************************************** 141/**************************************************************************
151 * 142 *
@@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
465 TX_DESCQ_TYPE, 0, 456 TX_DESCQ_TYPE, 0,
466 TX_NON_IP_DROP_DIS_B0, 1); 457 TX_NON_IP_DROP_DIS_B0, 1);
467 458
468 if (FALCON_REV(efx) >= FALCON_REV_B0) { 459 if (falcon_rev(efx) >= FALCON_REV_B0) {
469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 460 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 461 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); 462 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
@@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 465 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
475 tx_queue->queue); 466 tx_queue->queue);
476 467
477 if (FALCON_REV(efx) < FALCON_REV_B0) { 468 if (falcon_rev(efx) < FALCON_REV_B0) {
478 efx_oword_t reg; 469 efx_oword_t reg;
479 470
480 BUG_ON(tx_queue->queue >= 128); /* HW limit */ 471 BUG_ON(tx_queue->queue >= 128); /* HW limit */
@@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
635 efx_oword_t rx_desc_ptr; 626 efx_oword_t rx_desc_ptr;
636 struct efx_nic *efx = rx_queue->efx; 627 struct efx_nic *efx = rx_queue->efx;
637 int rc; 628 int rc;
638 int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; 629 int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
639 int iscsi_digest_en = is_b0; 630 int iscsi_digest_en = is_b0;
640 631
641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 632 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
@@ -742,8 +733,10 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
742 continue; 733 continue;
743 break; 734 break;
744 } 735 }
745 if (rc) 736 if (rc) {
746 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); 737 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
738 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
739 }
747 740
748 /* Remove RX descriptor ring from card */ 741 /* Remove RX descriptor ring from card */
749 EFX_ZERO_OWORD(rx_desc_ptr); 742 EFX_ZERO_OWORD(rx_desc_ptr);
@@ -822,10 +815,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 815 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
823 tx_queue = &efx->tx_queue[tx_ev_q_label]; 816 tx_queue = &efx->tx_queue[tx_ev_q_label];
824 817
825 if (NET_DEV_REGISTERED(efx)) 818 if (efx_dev_registered(efx))
826 netif_tx_lock(efx->net_dev); 819 netif_tx_lock(efx->net_dev);
827 falcon_notify_tx_desc(tx_queue); 820 falcon_notify_tx_desc(tx_queue);
828 if (NET_DEV_REGISTERED(efx)) 821 if (efx_dev_registered(efx))
829 netif_tx_unlock(efx->net_dev); 822 netif_tx_unlock(efx->net_dev);
830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 823 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
831 EFX_WORKAROUND_10727(efx)) { 824 EFX_WORKAROUND_10727(efx)) {
@@ -884,7 +877,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
884 RX_EV_TCP_UDP_CHKSUM_ERR); 877 RX_EV_TCP_UDP_CHKSUM_ERR);
885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 878 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 879 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
887 rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? 880 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 881 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 882 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
890 883
@@ -1065,7 +1058,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 1058 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1066 is_phy_event = 1; 1059 is_phy_event = 1;
1067 1060
1068 if ((FALCON_REV(efx) >= FALCON_REV_B0) && 1061 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1062 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
1070 is_phy_event = 1; 1063 is_phy_event = 1;
1071 1064
@@ -1129,6 +1122,7 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
1129 case RX_RECOVERY_EV_DECODE: 1122 case RX_RECOVERY_EV_DECODE:
1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 1123 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
1131 "Resetting.\n", channel->channel); 1124 "Resetting.\n", channel->channel);
1125 atomic_inc(&efx->rx_reset);
1132 efx_schedule_reset(efx, 1126 efx_schedule_reset(efx,
1133 EFX_WORKAROUND_6555(efx) ? 1127 EFX_WORKAROUND_6555(efx) ?
1134 RESET_TYPE_RX_RECOVERY : 1128 RESET_TYPE_RX_RECOVERY :
@@ -1404,7 +1398,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1404static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1398static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1405{ 1399{
1406 struct falcon_nic_data *nic_data = efx->nic_data; 1400 struct falcon_nic_data *nic_data = efx->nic_data;
1407 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1401 efx_oword_t *int_ker = efx->irq_status.addr;
1408 efx_oword_t fatal_intr; 1402 efx_oword_t fatal_intr;
1409 int error, mem_perr; 1403 int error, mem_perr;
1410 static int n_int_errors; 1404 static int n_int_errors;
@@ -1450,8 +1444,8 @@ out:
1450 */ 1444 */
1451static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1445static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1452{ 1446{
1453 struct efx_nic *efx = (struct efx_nic *)dev_id; 1447 struct efx_nic *efx = dev_id;
1454 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1448 efx_oword_t *int_ker = efx->irq_status.addr;
1455 struct efx_channel *channel; 1449 struct efx_channel *channel;
1456 efx_dword_t reg; 1450 efx_dword_t reg;
1457 u32 queues; 1451 u32 queues;
@@ -1488,8 +1482,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1488 1482
1489static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1483static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1490{ 1484{
1491 struct efx_nic *efx = (struct efx_nic *)dev_id; 1485 struct efx_nic *efx = dev_id;
1492 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1486 efx_oword_t *int_ker = efx->irq_status.addr;
1493 struct efx_channel *channel; 1487 struct efx_channel *channel;
1494 int syserr; 1488 int syserr;
1495 int queues; 1489 int queues;
@@ -1541,9 +1535,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1541 */ 1535 */
1542static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1536static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
1543{ 1537{
1544 struct efx_channel *channel = (struct efx_channel *)dev_id; 1538 struct efx_channel *channel = dev_id;
1545 struct efx_nic *efx = channel->efx; 1539 struct efx_nic *efx = channel->efx;
1546 efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1540 efx_oword_t *int_ker = efx->irq_status.addr;
1547 int syserr; 1541 int syserr;
1548 1542
1549 efx->last_irq_cpu = raw_smp_processor_id(); 1543 efx->last_irq_cpu = raw_smp_processor_id();
@@ -1571,7 +1565,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1571 unsigned long offset; 1565 unsigned long offset;
1572 efx_dword_t dword; 1566 efx_dword_t dword;
1573 1567
1574 if (FALCON_REV(efx) < FALCON_REV_B0) 1568 if (falcon_rev(efx) < FALCON_REV_B0)
1575 return; 1569 return;
1576 1570
1577 for (offset = RX_RSS_INDIR_TBL_B0; 1571 for (offset = RX_RSS_INDIR_TBL_B0;
@@ -1594,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1594 1588
1595 if (!EFX_INT_MODE_USE_MSI(efx)) { 1589 if (!EFX_INT_MODE_USE_MSI(efx)) {
1596 irq_handler_t handler; 1590 irq_handler_t handler;
1597 if (FALCON_REV(efx) >= FALCON_REV_B0) 1591 if (falcon_rev(efx) >= FALCON_REV_B0)
1598 handler = falcon_legacy_interrupt_b0; 1592 handler = falcon_legacy_interrupt_b0;
1599 else 1593 else
1600 handler = falcon_legacy_interrupt_a1; 1594 handler = falcon_legacy_interrupt_a1;
@@ -1635,12 +1629,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1635 efx_oword_t reg; 1629 efx_oword_t reg;
1636 1630
1637 /* Disable MSI/MSI-X interrupts */ 1631 /* Disable MSI/MSI-X interrupts */
1638 efx_for_each_channel_with_interrupt(channel, efx) 1632 efx_for_each_channel_with_interrupt(channel, efx) {
1639 if (channel->irq) 1633 if (channel->irq)
1640 free_irq(channel->irq, channel); 1634 free_irq(channel->irq, channel);
1635 }
1641 1636
1642 /* ACK legacy interrupt */ 1637 /* ACK legacy interrupt */
1643 if (FALCON_REV(efx) >= FALCON_REV_B0) 1638 if (falcon_rev(efx) >= FALCON_REV_B0)
1644 falcon_read(efx, &reg, INT_ISR0_B0); 1639 falcon_read(efx, &reg, INT_ISR0_B0);
1645 else 1640 else
1646 falcon_irq_ack_a1(efx); 1641 falcon_irq_ack_a1(efx);
@@ -1731,7 +1726,8 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1731 efx_oword_t temp; 1726 efx_oword_t temp;
1732 int count; 1727 int count;
1733 1728
1734 if (FALCON_REV(efx) < FALCON_REV_B0) 1729 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1730 (efx->loopback_mode != LOOPBACK_NONE))
1735 return; 1731 return;
1736 1732
1737 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1733 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
@@ -1783,7 +1779,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1783{ 1779{
1784 efx_oword_t temp; 1780 efx_oword_t temp;
1785 1781
1786 if (FALCON_REV(efx) < FALCON_REV_B0) 1782 if (falcon_rev(efx) < FALCON_REV_B0)
1787 return; 1783 return;
1788 1784
1789 /* Isolate the MAC -> RX */ 1785 /* Isolate the MAC -> RX */
@@ -1821,7 +1817,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1821 MAC_SPEED, link_speed); 1817 MAC_SPEED, link_speed);
1822 /* On B0, MAC backpressure can be disabled and packets get 1818 /* On B0, MAC backpressure can be disabled and packets get
1823 * discarded. */ 1819 * discarded. */
1824 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1820 if (falcon_rev(efx) >= FALCON_REV_B0) {
1825 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1821 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
1826 !efx->link_up); 1822 !efx->link_up);
1827 } 1823 }
@@ -1839,7 +1835,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1839 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1835 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1840 1836
1841 /* Unisolate the MAC -> RX */ 1837 /* Unisolate the MAC -> RX */
1842 if (FALCON_REV(efx) >= FALCON_REV_B0) 1838 if (falcon_rev(efx) >= FALCON_REV_B0)
1843 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1839 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
1844 falcon_write(efx, &reg, RX_CFG_REG_KER); 1840 falcon_write(efx, &reg, RX_CFG_REG_KER);
1845} 1841}
@@ -1854,7 +1850,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1854 return 0; 1850 return 0;
1855 1851
1856 /* Statistics fetch will fail if the MAC is in TX drain */ 1852 /* Statistics fetch will fail if the MAC is in TX drain */
1857 if (FALCON_REV(efx) >= FALCON_REV_B0) { 1853 if (falcon_rev(efx) >= FALCON_REV_B0) {
1858 efx_oword_t temp; 1854 efx_oword_t temp;
1859 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1855 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
1860 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 1856 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
@@ -1938,7 +1934,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1938static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 1934static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1939 int addr, int value) 1935 int addr, int value)
1940{ 1936{
1941 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 1937 struct efx_nic *efx = net_dev->priv;
1942 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 1938 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1943 efx_oword_t reg; 1939 efx_oword_t reg;
1944 1940
@@ -2006,7 +2002,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
2006 * could be read, -1 will be returned. */ 2002 * could be read, -1 will be returned. */
2007static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2003static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2008{ 2004{
2009 struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 2005 struct efx_nic *efx = net_dev->priv;
2010 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2006 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2011 efx_oword_t reg; 2007 efx_oword_t reg;
2012 int value = -1; 2008 int value = -1;
@@ -2091,6 +2087,8 @@ static int falcon_probe_phy(struct efx_nic *efx)
2091 efx->phy_type); 2087 efx->phy_type);
2092 return -1; 2088 return -1;
2093 } 2089 }
2090
2091 efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks;
2094 return 0; 2092 return 0;
2095} 2093}
2096 2094
@@ -2109,7 +2107,7 @@ int falcon_probe_port(struct efx_nic *efx)
2109 falcon_init_mdio(&efx->mii); 2107 falcon_init_mdio(&efx->mii);
2110 2108
2111 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2109 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2112 if (FALCON_REV(efx) >= FALCON_REV_B0) 2110 if (falcon_rev(efx) >= FALCON_REV_B0)
2113 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2111 efx->flow_control = EFX_FC_RX | EFX_FC_TX;
2114 else 2112 else
2115 efx->flow_control = EFX_FC_RX; 2113 efx->flow_control = EFX_FC_RX;
@@ -2369,7 +2367,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2369 return -ENODEV; 2367 return -ENODEV;
2370 } 2368 }
2371 2369
2372 switch (FALCON_REV(efx)) { 2370 switch (falcon_rev(efx)) {
2373 case FALCON_REV_A0: 2371 case FALCON_REV_A0:
2374 case 0xff: 2372 case 0xff:
2375 EFX_ERR(efx, "Falcon rev A0 not supported\n"); 2373 EFX_ERR(efx, "Falcon rev A0 not supported\n");
@@ -2395,7 +2393,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2395 break; 2393 break;
2396 2394
2397 default: 2395 default:
2398 EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); 2396 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2399 return -ENODEV; 2397 return -ENODEV;
2400 } 2398 }
2401 2399
@@ -2415,7 +2413,7 @@ int falcon_probe_nic(struct efx_nic *efx)
2415 2413
2416 /* Allocate storage for hardware specific data */ 2414 /* Allocate storage for hardware specific data */
2417 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2415 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2418 efx->nic_data = (void *) nic_data; 2416 efx->nic_data = nic_data;
2419 2417
2420 /* Determine number of ports etc. */ 2418 /* Determine number of ports etc. */
2421 rc = falcon_probe_nic_variant(efx); 2419 rc = falcon_probe_nic_variant(efx);
@@ -2468,14 +2466,12 @@ int falcon_probe_nic(struct efx_nic *efx)
2468 fail5: 2466 fail5:
2469 falcon_free_buffer(efx, &efx->irq_status); 2467 falcon_free_buffer(efx, &efx->irq_status);
2470 fail4: 2468 fail4:
2471 /* fall-thru */
2472 fail3: 2469 fail3:
2473 if (nic_data->pci_dev2) { 2470 if (nic_data->pci_dev2) {
2474 pci_dev_put(nic_data->pci_dev2); 2471 pci_dev_put(nic_data->pci_dev2);
2475 nic_data->pci_dev2 = NULL; 2472 nic_data->pci_dev2 = NULL;
2476 } 2473 }
2477 fail2: 2474 fail2:
2478 /* fall-thru */
2479 fail1: 2475 fail1:
2480 kfree(efx->nic_data); 2476 kfree(efx->nic_data);
2481 return rc; 2477 return rc;
@@ -2487,13 +2483,10 @@ int falcon_probe_nic(struct efx_nic *efx)
2487 */ 2483 */
2488int falcon_init_nic(struct efx_nic *efx) 2484int falcon_init_nic(struct efx_nic *efx)
2489{ 2485{
2490 struct falcon_nic_data *data;
2491 efx_oword_t temp; 2486 efx_oword_t temp;
2492 unsigned thresh; 2487 unsigned thresh;
2493 int rc; 2488 int rc;
2494 2489
2495 data = (struct falcon_nic_data *)efx->nic_data;
2496
2497 /* Set up the address region register. This is only needed 2490 /* Set up the address region register. This is only needed
2498 * for the B0 FPGA, but since we are just pushing in the 2491 * for the B0 FPGA, but since we are just pushing in the
2499 * reset defaults this may as well be unconditional. */ 2492 * reset defaults this may as well be unconditional. */
@@ -2560,7 +2553,7 @@ int falcon_init_nic(struct efx_nic *efx)
2560 2553
2561 /* Set number of RSS queues for receive path. */ 2554 /* Set number of RSS queues for receive path. */
2562 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 2555 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2563 if (FALCON_REV(efx) >= FALCON_REV_B0) 2556 if (falcon_rev(efx) >= FALCON_REV_B0)
2564 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); 2557 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2565 else 2558 else
2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); 2559 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
@@ -2598,7 +2591,7 @@ int falcon_init_nic(struct efx_nic *efx)
2598 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 2591 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
2599 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 2592 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
2600 /* Squash TX of packets of 16 bytes or less */ 2593 /* Squash TX of packets of 16 bytes or less */
2601 if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2594 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
2602 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 2595 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
2603 falcon_write(efx, &temp, TX_CFG2_REG_KER); 2596 falcon_write(efx, &temp, TX_CFG2_REG_KER);
2604 2597
@@ -2615,7 +2608,7 @@ int falcon_init_nic(struct efx_nic *efx)
2615 if (EFX_WORKAROUND_7575(efx)) 2608 if (EFX_WORKAROUND_7575(efx))
2616 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, 2609 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
2617 (3 * 4096) / 32); 2610 (3 * 4096) / 32);
2618 if (FALCON_REV(efx) >= FALCON_REV_B0) 2611 if (falcon_rev(efx) >= FALCON_REV_B0)
2619 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); 2612 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
2620 2613
2621 /* RX FIFO flow control thresholds */ 2614 /* RX FIFO flow control thresholds */
@@ -2631,7 +2624,7 @@ int falcon_init_nic(struct efx_nic *efx)
2631 falcon_write(efx, &temp, RX_CFG_REG_KER); 2624 falcon_write(efx, &temp, RX_CFG_REG_KER);
2632 2625
2633 /* Set destination of both TX and RX Flush events */ 2626 /* Set destination of both TX and RX Flush events */
2634 if (FALCON_REV(efx) >= FALCON_REV_B0) { 2627 if (falcon_rev(efx) >= FALCON_REV_B0) {
2635 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 2628 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
2636 falcon_write(efx, &temp, DP_CTRL_REG); 2629 falcon_write(efx, &temp, DP_CTRL_REG);
2637 } 2630 }
@@ -2645,7 +2638,7 @@ void falcon_remove_nic(struct efx_nic *efx)
2645 2638
2646 falcon_free_buffer(efx, &efx->irq_status); 2639 falcon_free_buffer(efx, &efx->irq_status);
2647 2640
2648 (void) falcon_reset_hw(efx, RESET_TYPE_ALL); 2641 falcon_reset_hw(efx, RESET_TYPE_ALL);
2649 2642
2650 /* Release the second function after the reset */ 2643 /* Release the second function after the reset */
2651 if (nic_data->pci_dev2) { 2644 if (nic_data->pci_dev2) {