aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/falcon.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc/falcon.c')
-rw-r--r--drivers/net/sfc/falcon.c480
1 files changed, 291 insertions, 189 deletions
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 31ed1f49de00..6884dc8c1f82 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -15,11 +15,11 @@
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/i2c.h> 16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h> 17#include <linux/i2c-algo-bit.h>
18#include <linux/mii.h>
18#include "net_driver.h" 19#include "net_driver.h"
19#include "bitfield.h" 20#include "bitfield.h"
20#include "efx.h" 21#include "efx.h"
21#include "mac.h" 22#include "mac.h"
22#include "gmii.h"
23#include "spi.h" 23#include "spi.h"
24#include "falcon.h" 24#include "falcon.h"
25#include "falcon_hwdefs.h" 25#include "falcon_hwdefs.h"
@@ -70,6 +70,20 @@ static int disable_dma_stats;
70#define RX_DC_ENTRIES_ORDER 2 70#define RX_DC_ENTRIES_ORDER 2
71#define RX_DC_BASE 0x100000 71#define RX_DC_BASE 0x100000
72 72
73static const unsigned int
74/* "Large" EEPROM device: Atmel AT25640 or similar
75 * 8 KB, 16-bit address, 32 B write block */
76large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
77 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
78 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
79/* Default flash device: Atmel AT25F1024
80 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
81default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
82 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
83 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
84 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
85 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
86
73/* RX FIFO XOFF watermark 87/* RX FIFO XOFF watermark
74 * 88 *
75 * When the amount of the RX FIFO increases used increases past this 89 * When the amount of the RX FIFO increases used increases past this
@@ -770,15 +784,18 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
770 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 784 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
771 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 785 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
772 786
773 /* Count errors that are not in MAC stats. */ 787 /* Count errors that are not in MAC stats. Ignore expected
788 * checksum errors during self-test. */
774 if (rx_ev_frm_trunc) 789 if (rx_ev_frm_trunc)
775 ++rx_queue->channel->n_rx_frm_trunc; 790 ++rx_queue->channel->n_rx_frm_trunc;
776 else if (rx_ev_tobe_disc) 791 else if (rx_ev_tobe_disc)
777 ++rx_queue->channel->n_rx_tobe_disc; 792 ++rx_queue->channel->n_rx_tobe_disc;
778 else if (rx_ev_ip_hdr_chksum_err) 793 else if (!efx->loopback_selftest) {
779 ++rx_queue->channel->n_rx_ip_hdr_chksum_err; 794 if (rx_ev_ip_hdr_chksum_err)
780 else if (rx_ev_tcp_udp_chksum_err) 795 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
781 ++rx_queue->channel->n_rx_tcp_udp_chksum_err; 796 else if (rx_ev_tcp_udp_chksum_err)
797 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
798 }
782 if (rx_ev_ip_frag_err) 799 if (rx_ev_ip_frag_err)
783 ++rx_queue->channel->n_rx_ip_frag_err; 800 ++rx_queue->channel->n_rx_ip_frag_err;
784 801
@@ -809,7 +826,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
809#endif 826#endif
810 827
811 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) && 828 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
812 efx->phy_type == PHY_TYPE_10XPRESS)) 829 efx->phy_type == PHY_TYPE_SFX7101))
813 tenxpress_crc_err(efx); 830 tenxpress_crc_err(efx);
814} 831}
815 832
@@ -893,22 +910,20 @@ static void falcon_handle_global_event(struct efx_channel *channel,
893 efx_qword_t *event) 910 efx_qword_t *event)
894{ 911{
895 struct efx_nic *efx = channel->efx; 912 struct efx_nic *efx = channel->efx;
896 bool is_phy_event = false, handled = false; 913 bool handled = false;
897 914
898 /* Check for interrupt on either port. Some boards have a
899 * single PHY wired to the interrupt line for port 1. */
900 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || 915 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
901 EFX_QWORD_FIELD(*event, G_PHY1_INTR) || 916 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
902 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 917 EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
903 is_phy_event = true; 918 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
919 efx->phy_op->clear_interrupt(efx);
920 queue_work(efx->workqueue, &efx->phy_work);
921 handled = true;
922 }
904 923
905 if ((falcon_rev(efx) >= FALCON_REV_B0) && 924 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
906 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) 925 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
907 is_phy_event = true; 926 queue_work(efx->workqueue, &efx->mac_work);
908
909 if (is_phy_event) {
910 efx->phy_op->clear_interrupt(efx);
911 queue_work(efx->workqueue, &efx->reconfigure_work);
912 handled = true; 927 handled = true;
913 } 928 }
914 929
@@ -1151,6 +1166,19 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1151 falcon_generate_event(channel, &test_event); 1166 falcon_generate_event(channel, &test_event);
1152} 1167}
1153 1168
1169void falcon_sim_phy_event(struct efx_nic *efx)
1170{
1171 efx_qword_t phy_event;
1172
1173 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1174 if (EFX_IS10G(efx))
1175 EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1176 else
1177 EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1178
1179 falcon_generate_event(&efx->channel[0], &phy_event);
1180}
1181
1154/************************************************************************** 1182/**************************************************************************
1155 * 1183 *
1156 * Flush handling 1184 * Flush handling
@@ -1560,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1560 efx_for_each_channel(channel, efx) { 1588 efx_for_each_channel(channel, efx) {
1561 rc = request_irq(channel->irq, falcon_msi_interrupt, 1589 rc = request_irq(channel->irq, falcon_msi_interrupt,
1562 IRQF_PROBE_SHARED, /* Not shared */ 1590 IRQF_PROBE_SHARED, /* Not shared */
1563 efx->name, channel); 1591 channel->name, channel);
1564 if (rc) { 1592 if (rc) {
1565 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); 1593 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1566 goto fail2; 1594 goto fail2;
@@ -1605,32 +1633,45 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1605 ************************************************************************** 1633 **************************************************************************
1606 */ 1634 */
1607 1635
1608#define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t)) 1636#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1637
1638static int falcon_spi_poll(struct efx_nic *efx)
1639{
1640 efx_oword_t reg;
1641 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
1642 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1643}
1609 1644
1610/* Wait for SPI command completion */ 1645/* Wait for SPI command completion */
1611static int falcon_spi_wait(struct efx_nic *efx) 1646static int falcon_spi_wait(struct efx_nic *efx)
1612{ 1647{
1613 unsigned long timeout = jiffies + DIV_ROUND_UP(HZ, 10); 1648 /* Most commands will finish quickly, so we start polling at
1614 efx_oword_t reg; 1649 * very short intervals. Sometimes the command may have to
1615 bool cmd_en, timer_active; 1650 * wait for VPD or expansion ROM access outside of our
1651 * control, so we allow up to 100 ms. */
1652 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
1653 int i;
1654
1655 for (i = 0; i < 10; i++) {
1656 if (!falcon_spi_poll(efx))
1657 return 0;
1658 udelay(10);
1659 }
1616 1660
1617 for (;;) { 1661 for (;;) {
1618 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 1662 if (!falcon_spi_poll(efx))
1619 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
1620 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
1621 if (!cmd_en && !timer_active)
1622 return 0; 1663 return 0;
1623 if (time_after_eq(jiffies, timeout)) { 1664 if (time_after_eq(jiffies, timeout)) {
1624 EFX_ERR(efx, "timed out waiting for SPI\n"); 1665 EFX_ERR(efx, "timed out waiting for SPI\n");
1625 return -ETIMEDOUT; 1666 return -ETIMEDOUT;
1626 } 1667 }
1627 cpu_relax(); 1668 schedule_timeout_uninterruptible(1);
1628 } 1669 }
1629} 1670}
1630 1671
1631static int falcon_spi_cmd(const struct efx_spi_device *spi, 1672int falcon_spi_cmd(const struct efx_spi_device *spi,
1632 unsigned int command, int address, 1673 unsigned int command, int address,
1633 const void *in, void *out, unsigned int len) 1674 const void *in, void *out, size_t len)
1634{ 1675{
1635 struct efx_nic *efx = spi->efx; 1676 struct efx_nic *efx = spi->efx;
1636 bool addressed = (address >= 0); 1677 bool addressed = (address >= 0);
@@ -1641,9 +1682,10 @@ static int falcon_spi_cmd(const struct efx_spi_device *spi,
1641 /* Input validation */ 1682 /* Input validation */
1642 if (len > FALCON_SPI_MAX_LEN) 1683 if (len > FALCON_SPI_MAX_LEN)
1643 return -EINVAL; 1684 return -EINVAL;
1685 BUG_ON(!mutex_is_locked(&efx->spi_lock));
1644 1686
1645 /* Check SPI not currently being accessed */ 1687 /* Check that previous command is not still running */
1646 rc = falcon_spi_wait(efx); 1688 rc = falcon_spi_poll(efx);
1647 if (rc) 1689 if (rc)
1648 return rc; 1690 return rc;
1649 1691
@@ -1685,8 +1727,8 @@ static int falcon_spi_cmd(const struct efx_spi_device *spi,
1685 return 0; 1727 return 0;
1686} 1728}
1687 1729
1688static unsigned int 1730static size_t
1689falcon_spi_write_limit(const struct efx_spi_device *spi, unsigned int start) 1731falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1690{ 1732{
1691 return min(FALCON_SPI_MAX_LEN, 1733 return min(FALCON_SPI_MAX_LEN,
1692 (spi->block_size - (start & (spi->block_size - 1)))); 1734 (spi->block_size - (start & (spi->block_size - 1))));
@@ -1699,38 +1741,40 @@ efx_spi_munge_command(const struct efx_spi_device *spi,
1699 return command | (((address >> 8) & spi->munge_address) << 3); 1741 return command | (((address >> 8) & spi->munge_address) << 3);
1700} 1742}
1701 1743
1702 1744/* Wait up to 10 ms for buffered write completion */
1703static int falcon_spi_fast_wait(const struct efx_spi_device *spi) 1745int falcon_spi_wait_write(const struct efx_spi_device *spi)
1704{ 1746{
1747 struct efx_nic *efx = spi->efx;
1748 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1705 u8 status; 1749 u8 status;
1706 int i, rc; 1750 int rc;
1707
1708 /* Wait up to 1000us for flash/EEPROM to finish a fast operation. */
1709 for (i = 0; i < 50; i++) {
1710 udelay(20);
1711 1751
1752 for (;;) {
1712 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, 1753 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1713 &status, sizeof(status)); 1754 &status, sizeof(status));
1714 if (rc) 1755 if (rc)
1715 return rc; 1756 return rc;
1716 if (!(status & SPI_STATUS_NRDY)) 1757 if (!(status & SPI_STATUS_NRDY))
1717 return 0; 1758 return 0;
1759 if (time_after_eq(jiffies, timeout)) {
1760 EFX_ERR(efx, "SPI write timeout on device %d"
1761 " last status=0x%02x\n",
1762 spi->device_id, status);
1763 return -ETIMEDOUT;
1764 }
1765 schedule_timeout_uninterruptible(1);
1718 } 1766 }
1719 EFX_ERR(spi->efx,
1720 "timed out waiting for device %d last status=0x%02x\n",
1721 spi->device_id, status);
1722 return -ETIMEDOUT;
1723} 1767}
1724 1768
1725int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, 1769int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1726 size_t len, size_t *retlen, u8 *buffer) 1770 size_t len, size_t *retlen, u8 *buffer)
1727{ 1771{
1728 unsigned int command, block_len, pos = 0; 1772 size_t block_len, pos = 0;
1773 unsigned int command;
1729 int rc = 0; 1774 int rc = 0;
1730 1775
1731 while (pos < len) { 1776 while (pos < len) {
1732 block_len = min((unsigned int)len - pos, 1777 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1733 FALCON_SPI_MAX_LEN);
1734 1778
1735 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 1779 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1736 rc = falcon_spi_cmd(spi, command, start + pos, NULL, 1780 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
@@ -1756,7 +1800,8 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1756 size_t len, size_t *retlen, const u8 *buffer) 1800 size_t len, size_t *retlen, const u8 *buffer)
1757{ 1801{
1758 u8 verify_buffer[FALCON_SPI_MAX_LEN]; 1802 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1759 unsigned int command, block_len, pos = 0; 1803 size_t block_len, pos = 0;
1804 unsigned int command;
1760 int rc = 0; 1805 int rc = 0;
1761 1806
1762 while (pos < len) { 1807 while (pos < len) {
@@ -1764,7 +1809,7 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1764 if (rc) 1809 if (rc)
1765 break; 1810 break;
1766 1811
1767 block_len = min((unsigned int)len - pos, 1812 block_len = min(len - pos,
1768 falcon_spi_write_limit(spi, start + pos)); 1813 falcon_spi_write_limit(spi, start + pos));
1769 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); 1814 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1770 rc = falcon_spi_cmd(spi, command, start + pos, 1815 rc = falcon_spi_cmd(spi, command, start + pos,
@@ -1772,7 +1817,7 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1772 if (rc) 1817 if (rc)
1773 break; 1818 break;
1774 1819
1775 rc = falcon_spi_fast_wait(spi); 1820 rc = falcon_spi_wait_write(spi);
1776 if (rc) 1821 if (rc)
1777 break; 1822 break;
1778 1823
@@ -1805,40 +1850,61 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1805 * 1850 *
1806 ************************************************************************** 1851 **************************************************************************
1807 */ 1852 */
1808void falcon_drain_tx_fifo(struct efx_nic *efx) 1853
1854static int falcon_reset_macs(struct efx_nic *efx)
1809{ 1855{
1810 efx_oword_t temp; 1856 efx_oword_t reg;
1811 int count; 1857 int count;
1812 1858
1813 if ((falcon_rev(efx) < FALCON_REV_B0) || 1859 if (falcon_rev(efx) < FALCON_REV_B0) {
1814 (efx->loopback_mode != LOOPBACK_NONE)) 1860 /* It's not safe to use GLB_CTL_REG to reset the
1815 return; 1861 * macs, so instead use the internal MAC resets
1862 */
1863 if (!EFX_IS10G(efx)) {
1864 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1);
1865 falcon_write(efx, &reg, GM_CFG1_REG);
1866 udelay(1000);
1867
1868 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0);
1869 falcon_write(efx, &reg, GM_CFG1_REG);
1870 udelay(1000);
1871 return 0;
1872 } else {
1873 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
1874 falcon_write(efx, &reg, XM_GLB_CFG_REG);
1875
1876 for (count = 0; count < 10000; count++) {
1877 falcon_read(efx, &reg, XM_GLB_CFG_REG);
1878 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
1879 return 0;
1880 udelay(10);
1881 }
1816 1882
1817 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1883 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1818 /* There is no point in draining more than once */ 1884 return -ETIMEDOUT;
1819 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 1885 }
1820 return; 1886 }
1821 1887
1822 /* MAC stats will fail whilst the TX fifo is draining. Serialise 1888 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1823 * the drain sequence with the statistics fetch */ 1889 * the drain sequence with the statistics fetch */
1824 spin_lock(&efx->stats_lock); 1890 spin_lock(&efx->stats_lock);
1825 1891
1826 EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1); 1892 falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
1827 falcon_write(efx, &temp, MAC0_CTRL_REG_KER); 1893 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
1894 falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
1828 1895
1829 /* Reset the MAC and EM block. */ 1896 falcon_read(efx, &reg, GLB_CTL_REG_KER);
1830 falcon_read(efx, &temp, GLB_CTL_REG_KER); 1897 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1);
1831 EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1); 1898 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1);
1832 EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1); 1899 EFX_SET_OWORD_FIELD(reg, RST_EM, 1);
1833 EFX_SET_OWORD_FIELD(temp, RST_EM, 1); 1900 falcon_write(efx, &reg, GLB_CTL_REG_KER);
1834 falcon_write(efx, &temp, GLB_CTL_REG_KER);
1835 1901
1836 count = 0; 1902 count = 0;
1837 while (1) { 1903 while (1) {
1838 falcon_read(efx, &temp, GLB_CTL_REG_KER); 1904 falcon_read(efx, &reg, GLB_CTL_REG_KER);
1839 if (!EFX_OWORD_FIELD(temp, RST_XGTX) && 1905 if (!EFX_OWORD_FIELD(reg, RST_XGTX) &&
1840 !EFX_OWORD_FIELD(temp, RST_XGRX) && 1906 !EFX_OWORD_FIELD(reg, RST_XGRX) &&
1841 !EFX_OWORD_FIELD(temp, RST_EM)) { 1907 !EFX_OWORD_FIELD(reg, RST_EM)) {
1842 EFX_LOG(efx, "Completed MAC reset after %d loops\n", 1908 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1843 count); 1909 count);
1844 break; 1910 break;
@@ -1855,21 +1921,39 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1855 1921
1856 /* If we've reset the EM block and the link is up, then 1922 /* If we've reset the EM block and the link is up, then
1857 * we'll have to kick the XAUI link so the PHY can recover */ 1923 * we'll have to kick the XAUI link so the PHY can recover */
1858 if (efx->link_up && EFX_WORKAROUND_5147(efx)) 1924 if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1859 falcon_reset_xaui(efx); 1925 falcon_reset_xaui(efx);
1926
1927 return 0;
1928}
1929
1930void falcon_drain_tx_fifo(struct efx_nic *efx)
1931{
1932 efx_oword_t reg;
1933
1934 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1935 (efx->loopback_mode != LOOPBACK_NONE))
1936 return;
1937
1938 falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
1939 /* There is no point in draining more than once */
1940 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0))
1941 return;
1942
1943 falcon_reset_macs(efx);
1860} 1944}
1861 1945
1862void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) 1946void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1863{ 1947{
1864 efx_oword_t temp; 1948 efx_oword_t reg;
1865 1949
1866 if (falcon_rev(efx) < FALCON_REV_B0) 1950 if (falcon_rev(efx) < FALCON_REV_B0)
1867 return; 1951 return;
1868 1952
1869 /* Isolate the MAC -> RX */ 1953 /* Isolate the MAC -> RX */
1870 falcon_read(efx, &temp, RX_CFG_REG_KER); 1954 falcon_read(efx, &reg, RX_CFG_REG_KER);
1871 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0); 1955 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0);
1872 falcon_write(efx, &temp, RX_CFG_REG_KER); 1956 falcon_write(efx, &reg, RX_CFG_REG_KER);
1873 1957
1874 if (!efx->link_up) 1958 if (!efx->link_up)
1875 falcon_drain_tx_fifo(efx); 1959 falcon_drain_tx_fifo(efx);
@@ -1881,14 +1965,12 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1881 int link_speed; 1965 int link_speed;
1882 bool tx_fc; 1966 bool tx_fc;
1883 1967
1884 if (efx->link_options & GM_LPA_10000) 1968 switch (efx->link_speed) {
1885 link_speed = 0x3; 1969 case 10000: link_speed = 3; break;
1886 else if (efx->link_options & GM_LPA_1000) 1970 case 1000: link_speed = 2; break;
1887 link_speed = 0x2; 1971 case 100: link_speed = 1; break;
1888 else if (efx->link_options & GM_LPA_100) 1972 default: link_speed = 0; break;
1889 link_speed = 0x1; 1973 }
1890 else
1891 link_speed = 0x0;
1892 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work 1974 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1893 * as advertised. Disable to ensure packets are not 1975 * as advertised. Disable to ensure packets are not
1894 * indefinitely held and TX queue can be flushed at any point 1976 * indefinitely held and TX queue can be flushed at any point
@@ -1914,7 +1996,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1914 /* Transmission of pause frames when RX crosses the threshold is 1996 /* Transmission of pause frames when RX crosses the threshold is
1915 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. 1997 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1916 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ 1998 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1917 tx_fc = !!(efx->flow_control & EFX_FC_TX); 1999 tx_fc = !!(efx->link_fc & EFX_FC_TX);
1918 falcon_read(efx, &reg, RX_CFG_REG_KER); 2000 falcon_read(efx, &reg, RX_CFG_REG_KER);
1919 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 2001 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1920 2002
@@ -1998,7 +2080,8 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1998 efx_dword_t md_stat; 2080 efx_dword_t md_stat;
1999 int count; 2081 int count;
2000 2082
2001 for (count = 0; count < 1000; count++) { /* wait upto 10ms */ 2083 /* wait upto 50ms - taken max from datasheet */
2084 for (count = 0; count < 5000; count++) {
2002 falcon_readl(efx, &md_stat, MD_STAT_REG_KER); 2085 falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
2003 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { 2086 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
2004 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || 2087 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
@@ -2162,10 +2245,14 @@ static void falcon_init_mdio(struct mii_if_info *gmii)
2162static int falcon_probe_phy(struct efx_nic *efx) 2245static int falcon_probe_phy(struct efx_nic *efx)
2163{ 2246{
2164 switch (efx->phy_type) { 2247 switch (efx->phy_type) {
2165 case PHY_TYPE_10XPRESS: 2248 case PHY_TYPE_SFX7101:
2166 efx->phy_op = &falcon_tenxpress_phy_ops; 2249 efx->phy_op = &falcon_sfx7101_phy_ops;
2250 break;
2251 case PHY_TYPE_SFT9001A:
2252 case PHY_TYPE_SFT9001B:
2253 efx->phy_op = &falcon_sft9001_phy_ops;
2167 break; 2254 break;
2168 case PHY_TYPE_XFP: 2255 case PHY_TYPE_QT2022C2:
2169 efx->phy_op = &falcon_xfp_phy_ops; 2256 efx->phy_op = &falcon_xfp_phy_ops;
2170 break; 2257 break;
2171 default: 2258 default:
@@ -2174,10 +2261,59 @@ static int falcon_probe_phy(struct efx_nic *efx)
2174 return -1; 2261 return -1;
2175 } 2262 }
2176 2263
2177 efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; 2264 if (efx->phy_op->macs & EFX_XMAC)
2265 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2266 (1 << LOOPBACK_XGXS) |
2267 (1 << LOOPBACK_XAUI));
2268 if (efx->phy_op->macs & EFX_GMAC)
2269 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2270 efx->loopback_modes |= efx->phy_op->loopbacks;
2271
2178 return 0; 2272 return 0;
2179} 2273}
2180 2274
2275int falcon_switch_mac(struct efx_nic *efx)
2276{
2277 struct efx_mac_operations *old_mac_op = efx->mac_op;
2278 efx_oword_t nic_stat;
2279 unsigned strap_val;
2280
2281 /* Internal loopbacks override the phy speed setting */
2282 if (efx->loopback_mode == LOOPBACK_GMAC) {
2283 efx->link_speed = 1000;
2284 efx->link_fd = true;
2285 } else if (LOOPBACK_INTERNAL(efx)) {
2286 efx->link_speed = 10000;
2287 efx->link_fd = true;
2288 }
2289
2290 efx->mac_op = (EFX_IS10G(efx) ?
2291 &falcon_xmac_operations : &falcon_gmac_operations);
2292 if (old_mac_op == efx->mac_op)
2293 return 0;
2294
2295 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2296
2297 /* Not all macs support a mac-level link state */
2298 efx->mac_up = true;
2299
2300 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2301 strap_val = EFX_IS10G(efx) ? 5 : 3;
2302 if (falcon_rev(efx) >= FALCON_REV_B0) {
2303 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
2304 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
2305 falcon_write(efx, &nic_stat, NIC_STAT_REG);
2306 } else {
2307 /* Falcon A1 does not support 1G/10G speed switching
2308 * and must not be used with a PHY that does. */
2309 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
2310 }
2311
2312
2313 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2314 return falcon_reset_macs(efx);
2315}
2316
2181/* This call is responsible for hooking in the MAC and PHY operations */ 2317/* This call is responsible for hooking in the MAC and PHY operations */
2182int falcon_probe_port(struct efx_nic *efx) 2318int falcon_probe_port(struct efx_nic *efx)
2183{ 2319{
@@ -2194,9 +2330,9 @@ int falcon_probe_port(struct efx_nic *efx)
2194 2330
2195 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2331 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2196 if (falcon_rev(efx) >= FALCON_REV_B0) 2332 if (falcon_rev(efx) >= FALCON_REV_B0)
2197 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2333 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2198 else 2334 else
2199 efx->flow_control = EFX_FC_RX; 2335 efx->wanted_fc = EFX_FC_RX;
2200 2336
2201 /* Allocate buffer for stats */ 2337 /* Allocate buffer for stats */
2202 rc = falcon_alloc_buffer(efx, &efx->stats_buffer, 2338 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
@@ -2253,13 +2389,18 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2253 __le16 *word, *limit; 2389 __le16 *word, *limit;
2254 u32 csum; 2390 u32 csum;
2255 2391
2256 region = kmalloc(NVCONFIG_END, GFP_KERNEL); 2392 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2393 if (!spi)
2394 return -EINVAL;
2395
2396 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2257 if (!region) 2397 if (!region)
2258 return -ENOMEM; 2398 return -ENOMEM;
2259 nvconfig = region + NVCONFIG_OFFSET; 2399 nvconfig = region + NVCONFIG_OFFSET;
2260 2400
2261 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom; 2401 mutex_lock(&efx->spi_lock);
2262 rc = falcon_spi_read(spi, 0, NVCONFIG_END, NULL, region); 2402 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2403 mutex_unlock(&efx->spi_lock);
2263 if (rc) { 2404 if (rc) {
2264 EFX_ERR(efx, "Failed to read %s\n", 2405 EFX_ERR(efx, "Failed to read %s\n",
2265 efx->spi_flash ? "flash" : "EEPROM"); 2406 efx->spi_flash ? "flash" : "EEPROM");
@@ -2283,7 +2424,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2283 limit = (__le16 *) (nvconfig + 1); 2424 limit = (__le16 *) (nvconfig + 1);
2284 } else { 2425 } else {
2285 word = region; 2426 word = region;
2286 limit = region + NVCONFIG_END; 2427 limit = region + FALCON_NVCONFIG_END;
2287 } 2428 }
2288 for (csum = 0; word < limit; ++word) 2429 for (csum = 0; word < limit; ++word)
2289 csum += le16_to_cpu(*word); 2430 csum += le16_to_cpu(*word);
@@ -2325,6 +2466,10 @@ static struct {
2325 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 2466 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2326 { DP_CTRL_REG, 2467 { DP_CTRL_REG,
2327 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 2468 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2469 { GM_CFG2_REG,
2470 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2471 { GMF_CFG0_REG,
2472 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2328 { XM_GLB_CFG_REG, 2473 { XM_GLB_CFG_REG,
2329 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, 2474 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2330 { XM_TX_CFG_REG, 2475 { XM_TX_CFG_REG,
@@ -2545,7 +2690,7 @@ static int falcon_spi_device_init(struct efx_nic *efx,
2545 struct efx_spi_device *spi_device; 2690 struct efx_spi_device *spi_device;
2546 2691
2547 if (device_type != 0) { 2692 if (device_type != 0) {
2548 spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL); 2693 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2549 if (!spi_device) 2694 if (!spi_device)
2550 return -ENOMEM; 2695 return -ENOMEM;
2551 spi_device->device_id = device_id; 2696 spi_device->device_id = device_id;
@@ -2555,6 +2700,11 @@ static int falcon_spi_device_init(struct efx_nic *efx,
2555 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); 2700 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2556 spi_device->munge_address = (spi_device->size == 1 << 9 && 2701 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2557 spi_device->addr_len == 1); 2702 spi_device->addr_len == 1);
2703 spi_device->erase_command =
2704 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2705 spi_device->erase_size =
2706 1 << SPI_DEV_TYPE_FIELD(device_type,
2707 SPI_DEV_TYPE_ERASE_SIZE);
2558 spi_device->block_size = 2708 spi_device->block_size =
2559 1 << SPI_DEV_TYPE_FIELD(device_type, 2709 1 << SPI_DEV_TYPE_FIELD(device_type,
2560 SPI_DEV_TYPE_BLOCK_SIZE); 2710 SPI_DEV_TYPE_BLOCK_SIZE);
@@ -2645,6 +2795,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2645static int falcon_probe_nic_variant(struct efx_nic *efx) 2795static int falcon_probe_nic_variant(struct efx_nic *efx)
2646{ 2796{
2647 efx_oword_t altera_build; 2797 efx_oword_t altera_build;
2798 efx_oword_t nic_stat;
2648 2799
2649 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); 2800 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2650 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { 2801 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
@@ -2652,27 +2803,20 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2652 return -ENODEV; 2803 return -ENODEV;
2653 } 2804 }
2654 2805
2806 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2807
2655 switch (falcon_rev(efx)) { 2808 switch (falcon_rev(efx)) {
2656 case FALCON_REV_A0: 2809 case FALCON_REV_A0:
2657 case 0xff: 2810 case 0xff:
2658 EFX_ERR(efx, "Falcon rev A0 not supported\n"); 2811 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2659 return -ENODEV; 2812 return -ENODEV;
2660 2813
2661 case FALCON_REV_A1:{ 2814 case FALCON_REV_A1:
2662 efx_oword_t nic_stat;
2663
2664 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2665
2666 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { 2815 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2667 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); 2816 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2668 return -ENODEV; 2817 return -ENODEV;
2669 } 2818 }
2670 if (!EFX_OWORD_FIELD(nic_stat, STRAP_10G)) {
2671 EFX_ERR(efx, "1G mode not supported\n");
2672 return -ENODEV;
2673 }
2674 break; 2819 break;
2675 }
2676 2820
2677 case FALCON_REV_B0: 2821 case FALCON_REV_B0:
2678 break; 2822 break;
@@ -2682,6 +2826,9 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2682 return -ENODEV; 2826 return -ENODEV;
2683 } 2827 }
2684 2828
2829 /* Initial assumed speed */
2830 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
2831
2685 return 0; 2832 return 0;
2686} 2833}
2687 2834
@@ -2689,80 +2836,37 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2689static void falcon_probe_spi_devices(struct efx_nic *efx) 2836static void falcon_probe_spi_devices(struct efx_nic *efx)
2690{ 2837{
2691 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 2838 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2692 bool has_flash, has_eeprom, boot_is_external; 2839 int boot_dev;
2693 2840
2694 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); 2841 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
2695 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2842 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2696 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2843 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2697 2844
2698 has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST); 2845 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) {
2699 has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST); 2846 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ?
2700 boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE); 2847 EE_SPI_FLASH : EE_SPI_EEPROM);
2701 2848 EFX_LOG(efx, "Booted from %s\n",
2702 if (has_flash) { 2849 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM");
2703 /* Default flash SPI device: Atmel AT25F1024 2850 } else {
2704 * 128 KB, 24-bit address, 32 KB erase block, 2851 /* Disable VPD and set clock dividers to safe
2705 * 256 B write block 2852 * values for initial programming. */
2706 */ 2853 boot_dev = -1;
2707 u32 flash_device_type = 2854 EFX_LOG(efx, "Booted from internal ASIC settings;"
2708 (17 << SPI_DEV_TYPE_SIZE_LBN) 2855 " setting SPI config\n");
2709 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) 2856 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2710 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) 2857 /* 125 MHz / 7 ~= 20 MHz */
2711 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) 2858 EE_SF_CLOCK_DIV, 7,
2712 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN); 2859 /* 125 MHz / 63 ~= 2 MHz */
2713 2860 EE_EE_CLOCK_DIV, 63);
2714 falcon_spi_device_init(efx, &efx->spi_flash, 2861 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2715 EE_SPI_FLASH, flash_device_type); 2862 }
2716 2863
2717 if (!boot_is_external) { 2864 if (boot_dev == EE_SPI_FLASH)
2718 /* Disable VPD and set clock dividers to safe 2865 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH,
2719 * values for initial programming. 2866 default_flash_type);
2720 */ 2867 if (boot_dev == EE_SPI_EEPROM)
2721 EFX_LOG(efx, "Booted from internal ASIC settings;" 2868 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM,
2722 " setting SPI config\n"); 2869 large_eeprom_type);
2723 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2724 /* 125 MHz / 7 ~= 20 MHz */
2725 EE_SF_CLOCK_DIV, 7,
2726 /* 125 MHz / 63 ~= 2 MHz */
2727 EE_EE_CLOCK_DIV, 63);
2728 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2729 }
2730 }
2731
2732 if (has_eeprom) {
2733 u32 eeprom_device_type;
2734
2735 /* If it has no flash, it must have a large EEPROM
2736 * for chip config; otherwise check whether 9-bit
2737 * addressing is used for VPD configuration
2738 */
2739 if (has_flash &&
2740 (!boot_is_external ||
2741 EFX_OWORD_FIELD(ee_vpd_cfg, EE_VPD_EN_AD9_MODE))) {
2742 /* Default SPI device: Atmel AT25040 or similar
2743 * 512 B, 9-bit address, 8 B write block
2744 */
2745 eeprom_device_type =
2746 (9 << SPI_DEV_TYPE_SIZE_LBN)
2747 | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2748 | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2749 } else {
2750 /* "Large" SPI device: Atmel AT25640 or similar
2751 * 8 KB, 16-bit address, 32 B write block
2752 */
2753 eeprom_device_type =
2754 (13 << SPI_DEV_TYPE_SIZE_LBN)
2755 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2756 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2757 }
2758
2759 falcon_spi_device_init(efx, &efx->spi_eeprom,
2760 EE_SPI_EEPROM, eeprom_device_type);
2761 }
2762
2763 EFX_LOG(efx, "flash is %s, EEPROM is %s\n",
2764 (has_flash ? "present" : "absent"),
2765 (has_eeprom ? "present" : "absent"));
2766} 2870}
2767 2871
2768int falcon_probe_nic(struct efx_nic *efx) 2872int falcon_probe_nic(struct efx_nic *efx)
@@ -2825,10 +2929,10 @@ int falcon_probe_nic(struct efx_nic *efx)
2825 goto fail5; 2929 goto fail5;
2826 2930
2827 /* Initialise I2C adapter */ 2931 /* Initialise I2C adapter */
2828 efx->i2c_adap.owner = THIS_MODULE; 2932 efx->i2c_adap.owner = THIS_MODULE;
2829 nic_data->i2c_data = falcon_i2c_bit_operations; 2933 nic_data->i2c_data = falcon_i2c_bit_operations;
2830 nic_data->i2c_data.data = efx; 2934 nic_data->i2c_data.data = efx;
2831 efx->i2c_adap.algo_data = &nic_data->i2c_data; 2935 efx->i2c_adap.algo_data = &nic_data->i2c_data;
2832 efx->i2c_adap.dev.parent = &efx->pci_dev->dev; 2936 efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2833 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name)); 2937 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
2834 rc = i2c_bit_add_bus(&efx->i2c_adap); 2938 rc = i2c_bit_add_bus(&efx->i2c_adap);
@@ -2862,20 +2966,18 @@ int falcon_init_nic(struct efx_nic *efx)
2862 unsigned thresh; 2966 unsigned thresh;
2863 int rc; 2967 int rc;
2864 2968
2865 /* Set up the address region register. This is only needed
2866 * for the B0 FPGA, but since we are just pushing in the
2867 * reset defaults this may as well be unconditional. */
2868 EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0,
2869 ADR_REGION1, (1 << 16),
2870 ADR_REGION2, (2 << 16),
2871 ADR_REGION3, (3 << 16));
2872 falcon_write(efx, &temp, ADR_REGION_REG_KER);
2873
2874 /* Use on-chip SRAM */ 2969 /* Use on-chip SRAM */
2875 falcon_read(efx, &temp, NIC_STAT_REG); 2970 falcon_read(efx, &temp, NIC_STAT_REG);
2876 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); 2971 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
2877 falcon_write(efx, &temp, NIC_STAT_REG); 2972 falcon_write(efx, &temp, NIC_STAT_REG);
2878 2973
2974 /* Set the source of the GMAC clock */
2975 if (falcon_rev(efx) == FALCON_REV_B0) {
2976 falcon_read(efx, &temp, GPIO_CTL_REG_KER);
2977 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true);
2978 falcon_write(efx, &temp, GPIO_CTL_REG_KER);
2979 }
2980
2879 /* Set buffer table mode */ 2981 /* Set buffer table mode */
2880 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL); 2982 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
2881 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER); 2983 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);