aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c312
1 files changed, 164 insertions, 148 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index d7c98515fdfd..fd398da4993b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.9" 45#define DRV_VERSION "1.1"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; 106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
107 107
108/* Don't need to look at whole 16K.
109 * last interesting register is descriptor poll timer.
110 */
111#define SKGE_REGS_LEN (29*128)
112
113static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
114{ 109{
115 return SKGE_REGS_LEN; 110 return 0x4000;
116} 111}
117 112
118/* 113/*
119 * Returns copy of control register region 114 * Returns copy of whole control register region
120 * I/O region is divided into banks and certain regions are unreadable 115 * Note: skip RAM address register because accessing it will
116 * cause bus hangs!
121 */ 117 */
122static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 118static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
123 void *p) 119 void *p)
124{ 120{
125 const struct skge_port *skge = netdev_priv(dev); 121 const struct skge_port *skge = netdev_priv(dev);
126 unsigned long offs;
127 const void __iomem *io = skge->hw->regs; 122 const void __iomem *io = skge->hw->regs;
128 static const unsigned long bankmap
129 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
130 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
131 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
132 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
133 123
134 regs->version = 1; 124 regs->version = 1;
135 for (offs = 0; offs < regs->len; offs += 128) { 125 memset(p, 0, regs->len);
136 u32 len = min_t(u32, 128, regs->len - offs); 126 memcpy_fromio(p, io, B3_RAM_ADDR);
137 127
138 if (bankmap & (1<<(offs/128))) 128 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
139 memcpy_fromio(p + offs, io + offs, len); 129 regs->len - B3_RI_WTO_R1);
140 else
141 memset(p + offs, 0, len);
142 }
143} 130}
144 131
145/* Wake on Lan only supported on Yukon chps with rev 1 or above */ 132/* Wake on Lan only supported on Yukon chps with rev 1 or above */
@@ -669,7 +656,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
669 PHY_M_LED_BLINK_RT(BLINK_84MS) | 656 PHY_M_LED_BLINK_RT(BLINK_84MS) |
670 PHY_M_LEDC_TX_CTRL | 657 PHY_M_LEDC_TX_CTRL |
671 PHY_M_LEDC_DP_CTRL); 658 PHY_M_LEDC_DP_CTRL);
672 659
673 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 660 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
674 PHY_M_LED_MO_RX(MO_LED_OFF) | 661 PHY_M_LED_MO_RX(MO_LED_OFF) |
675 (skge->speed == SPEED_100 ? 662 (skge->speed == SPEED_100 ?
@@ -775,17 +762,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
775 return 0; 762 return 0;
776} 763}
777 764
778static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
779{
780 struct sk_buff *skb = dev_alloc_skb(size);
781
782 if (likely(skb)) {
783 skb->dev = dev;
784 skb_reserve(skb, NET_IP_ALIGN);
785 }
786 return skb;
787}
788
789/* Allocate and setup a new buffer for receiving */ 765/* Allocate and setup a new buffer for receiving */
790static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 766static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
791 struct sk_buff *skb, unsigned int bufsize) 767 struct sk_buff *skb, unsigned int bufsize)
@@ -858,16 +834,17 @@ static int skge_rx_fill(struct skge_port *skge)
858{ 834{
859 struct skge_ring *ring = &skge->rx_ring; 835 struct skge_ring *ring = &skge->rx_ring;
860 struct skge_element *e; 836 struct skge_element *e;
861 unsigned int bufsize = skge->rx_buf_size;
862 837
863 e = ring->start; 838 e = ring->start;
864 do { 839 do {
865 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); 840 struct sk_buff *skb;
866 841
842 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
867 if (!skb) 843 if (!skb)
868 return -ENOMEM; 844 return -ENOMEM;
869 845
870 skge_rx_setup(skge, e, skb, bufsize); 846 skb_reserve(skb, NET_IP_ALIGN);
847 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
871 } while ( (e = e->next) != ring->start); 848 } while ( (e = e->next) != ring->start);
872 849
873 ring->to_clean = ring->start; 850 ring->to_clean = ring->start;
@@ -876,7 +853,7 @@ static int skge_rx_fill(struct skge_port *skge)
876 853
877static void skge_link_up(struct skge_port *skge) 854static void skge_link_up(struct skge_port *skge)
878{ 855{
879 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 856 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
880 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 857 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
881 858
882 netif_carrier_on(skge->netdev); 859 netif_carrier_on(skge->netdev);
@@ -987,6 +964,8 @@ static void genesis_reset(struct skge_hw *hw, int port)
987{ 964{
988 const u8 zero[8] = { 0 }; 965 const u8 zero[8] = { 0 };
989 966
967 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
968
990 /* reset the statistics module */ 969 /* reset the statistics module */
991 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 970 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
992 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 971 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
@@ -1021,8 +1000,6 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1021 (void) xm_phy_read(hw, port, PHY_BCOM_STAT); 1000 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1022 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1001 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1023 1002
1024 pr_debug("bcom_check_link status=0x%x\n", status);
1025
1026 if ((status & PHY_ST_LSYNC) == 0) { 1003 if ((status & PHY_ST_LSYNC) == 0) {
1027 u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1004 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1028 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1005 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
@@ -1106,8 +1083,6 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo)
1106 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1083 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1107 }; 1084 };
1108 1085
1109 pr_debug("bcom_phy_init\n");
1110
1111 /* read Id from external PHY (all have the same address) */ 1086 /* read Id from external PHY (all have the same address) */
1112 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); 1087 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1113 1088
@@ -1340,6 +1315,8 @@ static void genesis_stop(struct skge_port *skge)
1340 int port = skge->port; 1315 int port = skge->port;
1341 u32 reg; 1316 u32 reg;
1342 1317
1318 genesis_reset(hw, port);
1319
1343 /* Clear Tx packet arbiter timeout IRQ */ 1320 /* Clear Tx packet arbiter timeout IRQ */
1344 skge_write16(hw, B3_PA_CTRL, 1321 skge_write16(hw, B3_PA_CTRL,
1345 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); 1322 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
@@ -1465,7 +1442,6 @@ static void genesis_link_up(struct skge_port *skge)
1465 u16 cmd; 1442 u16 cmd;
1466 u32 mode, msk; 1443 u32 mode, msk;
1467 1444
1468 pr_debug("genesis_link_up\n");
1469 cmd = xm_read16(hw, port, XM_MMU_CMD); 1445 cmd = xm_read16(hw, port, XM_MMU_CMD);
1470 1446
1471 /* 1447 /*
@@ -1578,7 +1554,6 @@ static void yukon_init(struct skge_hw *hw, int port)
1578 struct skge_port *skge = netdev_priv(hw->dev[port]); 1554 struct skge_port *skge = netdev_priv(hw->dev[port]);
1579 u16 ctrl, ct1000, adv; 1555 u16 ctrl, ct1000, adv;
1580 1556
1581 pr_debug("yukon_init\n");
1582 if (skge->autoneg == AUTONEG_ENABLE) { 1557 if (skge->autoneg == AUTONEG_ENABLE) {
1583 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1558 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1584 1559
@@ -1668,6 +1643,22 @@ static void yukon_reset(struct skge_hw *hw, int port)
1668 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1643 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1669} 1644}
1670 1645
1646/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1647static int is_yukon_lite_a0(struct skge_hw *hw)
1648{
1649 u32 reg;
1650 int ret;
1651
1652 if (hw->chip_id != CHIP_ID_YUKON)
1653 return 0;
1654
1655 reg = skge_read32(hw, B2_FAR);
1656 skge_write8(hw, B2_FAR + 3, 0xff);
1657 ret = (skge_read8(hw, B2_FAR + 3) != 0);
1658 skge_write32(hw, B2_FAR, reg);
1659 return ret;
1660}
1661
1671static void yukon_mac_init(struct skge_hw *hw, int port) 1662static void yukon_mac_init(struct skge_hw *hw, int port)
1672{ 1663{
1673 struct skge_port *skge = netdev_priv(hw->dev[port]); 1664 struct skge_port *skge = netdev_priv(hw->dev[port]);
@@ -1677,9 +1668,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1677 1668
1678 /* WA code for COMA mode -- set PHY reset */ 1669 /* WA code for COMA mode -- set PHY reset */
1679 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1670 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1680 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1671 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1681 skge_write32(hw, B2_GP_IO, 1672 reg = skge_read32(hw, B2_GP_IO);
1682 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); 1673 reg |= GP_DIR_9 | GP_IO_9;
1674 skge_write32(hw, B2_GP_IO, reg);
1675 }
1683 1676
1684 /* hard reset */ 1677 /* hard reset */
1685 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1678 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
@@ -1687,10 +1680,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1687 1680
1688 /* WA code for COMA mode -- clear PHY reset */ 1681 /* WA code for COMA mode -- clear PHY reset */
1689 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1682 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1690 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1683 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1691 skge_write32(hw, B2_GP_IO, 1684 reg = skge_read32(hw, B2_GP_IO);
1692 (skge_read32(hw, B2_GP_IO) | GP_DIR_9) 1685 reg |= GP_DIR_9;
1693 & ~GP_IO_9); 1686 reg &= ~GP_IO_9;
1687 skge_write32(hw, B2_GP_IO, reg);
1688 }
1694 1689
1695 /* Set hardware config mode */ 1690 /* Set hardware config mode */
1696 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 1691 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
@@ -1729,7 +1724,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1729 } 1724 }
1730 1725
1731 gma_write16(hw, port, GM_GP_CTRL, reg); 1726 gma_write16(hw, port, GM_GP_CTRL, reg);
1732 skge_read16(hw, GMAC_IRQ_SRC); 1727 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1733 1728
1734 yukon_init(hw, port); 1729 yukon_init(hw, port);
1735 1730
@@ -1779,9 +1774,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1779 /* Configure Rx MAC FIFO */ 1774 /* Configure Rx MAC FIFO */
1780 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1775 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1781 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1776 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1782 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1777
1783 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1778 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1779 if (is_yukon_lite_a0(hw))
1784 reg &= ~GMF_RX_F_FL_ON; 1780 reg &= ~GMF_RX_F_FL_ON;
1781
1785 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1782 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1786 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1783 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1787 /* 1784 /*
@@ -1801,20 +1798,26 @@ static void yukon_stop(struct skge_port *skge)
1801 struct skge_hw *hw = skge->hw; 1798 struct skge_hw *hw = skge->hw;
1802 int port = skge->port; 1799 int port = skge->port;
1803 1800
1804 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1801 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1805 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 1802 yukon_reset(hw, port);
1806 skge_write32(hw, B2_GP_IO,
1807 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1808 }
1809 1803
1810 gma_write16(hw, port, GM_GP_CTRL, 1804 gma_write16(hw, port, GM_GP_CTRL,
1811 gma_read16(hw, port, GM_GP_CTRL) 1805 gma_read16(hw, port, GM_GP_CTRL)
1812 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); 1806 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1813 gma_read16(hw, port, GM_GP_CTRL); 1807 gma_read16(hw, port, GM_GP_CTRL);
1814 1808
1809 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1810 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1811 u32 io = skge_read32(hw, B2_GP_IO);
1812
1813 io |= GP_DIR_9 | GP_IO_9;
1814 skge_write32(hw, B2_GP_IO, io);
1815 skge_read32(hw, B2_GP_IO);
1816 }
1817
1815 /* set GPHY Control reset */ 1818 /* set GPHY Control reset */
1816 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1819 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1817 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 1820 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1818} 1821}
1819 1822
1820static void yukon_get_stats(struct skge_port *skge, u64 *data) 1823static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1873,10 +1876,8 @@ static void yukon_link_up(struct skge_port *skge)
1873 int port = skge->port; 1876 int port = skge->port;
1874 u16 reg; 1877 u16 reg;
1875 1878
1876 pr_debug("yukon_link_up\n");
1877
1878 /* Enable Transmit FIFO Underrun */ 1879 /* Enable Transmit FIFO Underrun */
1879 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1880 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1880 1881
1881 reg = gma_read16(hw, port, GM_GP_CTRL); 1882 reg = gma_read16(hw, port, GM_GP_CTRL);
1882 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 1883 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
@@ -1896,7 +1897,6 @@ static void yukon_link_down(struct skge_port *skge)
1896 int port = skge->port; 1897 int port = skge->port;
1897 u16 ctrl; 1898 u16 ctrl;
1898 1899
1899 pr_debug("yukon_link_down\n");
1900 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1900 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1901 1901
1902 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1902 ctrl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2112,7 +2112,6 @@ static int skge_up(struct net_device *dev)
2112 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2112 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2113 skge_led(skge, LED_MODE_ON); 2113 skge_led(skge, LED_MODE_ON);
2114 2114
2115 pr_debug("skge_up completed\n");
2116 return 0; 2115 return 0;
2117 2116
2118 free_rx_ring: 2117 free_rx_ring:
@@ -2135,15 +2134,20 @@ static int skge_down(struct net_device *dev)
2135 2134
2136 netif_stop_queue(dev); 2135 netif_stop_queue(dev);
2137 2136
2137 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2138 if (hw->chip_id == CHIP_ID_GENESIS)
2139 genesis_stop(skge);
2140 else
2141 yukon_stop(skge);
2142
2143 hw->intr_mask &= ~portirqmask[skge->port];
2144 skge_write32(hw, B0_IMSK, hw->intr_mask);
2145
2138 /* Stop transmitter */ 2146 /* Stop transmitter */
2139 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2147 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2140 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2148 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2141 RB_RST_SET|RB_DIS_OP_MD); 2149 RB_RST_SET|RB_DIS_OP_MD);
2142 2150
2143 if (hw->chip_id == CHIP_ID_GENESIS)
2144 genesis_stop(skge);
2145 else
2146 yukon_stop(skge);
2147 2151
2148 /* Disable Force Sync bit and Enable Alloc bit */ 2152 /* Disable Force Sync bit and Enable Alloc bit */
2149 skge_write8(hw, SK_REG(port, TXA_CTRL), 2153 skge_write8(hw, SK_REG(port, TXA_CTRL),
@@ -2367,8 +2371,6 @@ static void genesis_set_multicast(struct net_device *dev)
2367 u32 mode; 2371 u32 mode;
2368 u8 filter[8]; 2372 u8 filter[8];
2369 2373
2370 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2371
2372 mode = xm_read32(hw, port, XM_MODE); 2374 mode = xm_read32(hw, port, XM_MODE);
2373 mode |= XM_MD_ENA_HASH; 2375 mode |= XM_MD_ENA_HASH;
2374 if (dev->flags & IFF_PROMISC) 2376 if (dev->flags & IFF_PROMISC)
@@ -2435,6 +2437,14 @@ static void yukon_set_multicast(struct net_device *dev)
2435 gma_write16(hw, port, GM_RX_CTRL, reg); 2437 gma_write16(hw, port, GM_RX_CTRL, reg);
2436} 2438}
2437 2439
2440static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2441{
2442 if (hw->chip_id == CHIP_ID_GENESIS)
2443 return status >> XMR_FS_LEN_SHIFT;
2444 else
2445 return status >> GMR_FS_LEN_SHIFT;
2446}
2447
2438static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2448static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2439{ 2449{
2440 if (hw->chip_id == CHIP_ID_GENESIS) 2450 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2444,80 +2454,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2444 (status & GMR_FS_RX_OK) == 0; 2454 (status & GMR_FS_RX_OK) == 0;
2445} 2455}
2446 2456
2447static void skge_rx_error(struct skge_port *skge, int slot,
2448 u32 control, u32 status)
2449{
2450 if (netif_msg_rx_err(skge))
2451 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2452 skge->netdev->name, slot, control, status);
2453
2454 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2455 skge->net_stats.rx_length_errors++;
2456 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2457 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2458 skge->net_stats.rx_length_errors++;
2459 if (status & XMR_FS_FRA_ERR)
2460 skge->net_stats.rx_frame_errors++;
2461 if (status & XMR_FS_FCS_ERR)
2462 skge->net_stats.rx_crc_errors++;
2463 } else {
2464 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2465 skge->net_stats.rx_length_errors++;
2466 if (status & GMR_FS_FRAGMENT)
2467 skge->net_stats.rx_frame_errors++;
2468 if (status & GMR_FS_CRC_ERR)
2469 skge->net_stats.rx_crc_errors++;
2470 }
2471}
2472 2457
2473/* Get receive buffer from descriptor. 2458/* Get receive buffer from descriptor.
2474 * Handles copy of small buffers and reallocation failures 2459 * Handles copy of small buffers and reallocation failures
2475 */ 2460 */
2476static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2461static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2477 struct skge_element *e, 2462 struct skge_element *e,
2478 unsigned int len) 2463 u32 control, u32 status, u16 csum)
2479{ 2464{
2480 struct sk_buff *nskb, *skb; 2465 struct sk_buff *skb;
2466 u16 len = control & BMU_BBC;
2467
2468 if (unlikely(netif_msg_rx_status(skge)))
2469 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2470 skge->netdev->name, e - skge->rx_ring.start,
2471 status, len);
2472
2473 if (len > skge->rx_buf_size)
2474 goto error;
2475
2476 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2477 goto error;
2478
2479 if (bad_phy_status(skge->hw, status))
2480 goto error;
2481
2482 if (phy_length(skge->hw, status) != len)
2483 goto error;
2481 2484
2482 if (len < RX_COPY_THRESHOLD) { 2485 if (len < RX_COPY_THRESHOLD) {
2483 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); 2486 skb = dev_alloc_skb(len + 2);
2484 if (unlikely(!nskb)) 2487 if (!skb)
2485 return NULL; 2488 goto resubmit;
2486 2489
2490 skb_reserve(skb, 2);
2487 pci_dma_sync_single_for_cpu(skge->hw->pdev, 2491 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2488 pci_unmap_addr(e, mapaddr), 2492 pci_unmap_addr(e, mapaddr),
2489 len, PCI_DMA_FROMDEVICE); 2493 len, PCI_DMA_FROMDEVICE);
2490 memcpy(nskb->data, e->skb->data, len); 2494 memcpy(skb->data, e->skb->data, len);
2491 pci_dma_sync_single_for_device(skge->hw->pdev, 2495 pci_dma_sync_single_for_device(skge->hw->pdev,
2492 pci_unmap_addr(e, mapaddr), 2496 pci_unmap_addr(e, mapaddr),
2493 len, PCI_DMA_FROMDEVICE); 2497 len, PCI_DMA_FROMDEVICE);
2494
2495 if (skge->rx_csum) {
2496 struct skge_rx_desc *rd = e->desc;
2497 nskb->csum = le16_to_cpu(rd->csum2);
2498 nskb->ip_summed = CHECKSUM_HW;
2499 }
2500 skge_rx_reuse(e, skge->rx_buf_size); 2498 skge_rx_reuse(e, skge->rx_buf_size);
2501 return nskb;
2502 } else { 2499 } else {
2503 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); 2500 struct sk_buff *nskb;
2504 if (unlikely(!nskb)) 2501 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
2505 return NULL; 2502 if (!nskb)
2503 goto resubmit;
2506 2504
2507 pci_unmap_single(skge->hw->pdev, 2505 pci_unmap_single(skge->hw->pdev,
2508 pci_unmap_addr(e, mapaddr), 2506 pci_unmap_addr(e, mapaddr),
2509 pci_unmap_len(e, maplen), 2507 pci_unmap_len(e, maplen),
2510 PCI_DMA_FROMDEVICE); 2508 PCI_DMA_FROMDEVICE);
2511 skb = e->skb; 2509 skb = e->skb;
2512 if (skge->rx_csum) { 2510 prefetch(skb->data);
2513 struct skge_rx_desc *rd = e->desc;
2514 skb->csum = le16_to_cpu(rd->csum2);
2515 skb->ip_summed = CHECKSUM_HW;
2516 }
2517
2518 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 2511 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2519 return skb;
2520 } 2512 }
2513
2514 skb_put(skb, len);
2515 skb->dev = skge->netdev;
2516 if (skge->rx_csum) {
2517 skb->csum = csum;
2518 skb->ip_summed = CHECKSUM_HW;
2519 }
2520
2521 skb->protocol = eth_type_trans(skb, skge->netdev);
2522
2523 return skb;
2524error:
2525
2526 if (netif_msg_rx_err(skge))
2527 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2528 skge->netdev->name, e - skge->rx_ring.start,
2529 control, status);
2530
2531 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2532 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2533 skge->net_stats.rx_length_errors++;
2534 if (status & XMR_FS_FRA_ERR)
2535 skge->net_stats.rx_frame_errors++;
2536 if (status & XMR_FS_FCS_ERR)
2537 skge->net_stats.rx_crc_errors++;
2538 } else {
2539 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2540 skge->net_stats.rx_length_errors++;
2541 if (status & GMR_FS_FRAGMENT)
2542 skge->net_stats.rx_frame_errors++;
2543 if (status & GMR_FS_CRC_ERR)
2544 skge->net_stats.rx_crc_errors++;
2545 }
2546
2547resubmit:
2548 skge_rx_reuse(e, skge->rx_buf_size);
2549 return NULL;
2521} 2550}
2522 2551
2523 2552
@@ -2530,37 +2559,19 @@ static int skge_poll(struct net_device *dev, int *budget)
2530 unsigned int to_do = min(dev->quota, *budget); 2559 unsigned int to_do = min(dev->quota, *budget);
2531 unsigned int work_done = 0; 2560 unsigned int work_done = 0;
2532 2561
2533 pr_debug("skge_poll\n");
2534
2535 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2562 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2536 struct skge_rx_desc *rd = e->desc; 2563 struct skge_rx_desc *rd = e->desc;
2537 struct sk_buff *skb; 2564 struct sk_buff *skb;
2538 u32 control, len, status; 2565 u32 control;
2539 2566
2540 rmb(); 2567 rmb();
2541 control = rd->control; 2568 control = rd->control;
2542 if (control & BMU_OWN) 2569 if (control & BMU_OWN)
2543 break; 2570 break;
2544 2571
2545 len = control & BMU_BBC; 2572 skb = skge_rx_get(skge, e, control, rd->status,
2546 status = rd->status; 2573 le16_to_cpu(rd->csum2));
2547
2548 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2549 || bad_phy_status(hw, status))) {
2550 skge_rx_error(skge, e - ring->start, control, status);
2551 skge_rx_reuse(e, skge->rx_buf_size);
2552 continue;
2553 }
2554
2555 if (netif_msg_rx_status(skge))
2556 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2557 dev->name, e - ring->start, rd->status, len);
2558
2559 skb = skge_rx_get(skge, e, len);
2560 if (likely(skb)) { 2574 if (likely(skb)) {
2561 skb_put(skb, len);
2562 skb->protocol = eth_type_trans(skb, dev);
2563
2564 dev->last_rx = jiffies; 2575 dev->last_rx = jiffies;
2565 netif_receive_skb(skb); 2576 netif_receive_skb(skb);
2566 2577
@@ -2672,9 +2683,9 @@ static void skge_error_irq(struct skge_hw *hw)
2672 if (hw->chip_id == CHIP_ID_GENESIS) { 2683 if (hw->chip_id == CHIP_ID_GENESIS) {
2673 /* clear xmac errors */ 2684 /* clear xmac errors */
2674 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 2685 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2675 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); 2686 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
2676 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 2687 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2677 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); 2688 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
2678 } else { 2689 } else {
2679 /* Timestamp (unused) overflow */ 2690 /* Timestamp (unused) overflow */
2680 if (hwstatus & IS_IRQ_TIST_OV) 2691 if (hwstatus & IS_IRQ_TIST_OV)
@@ -3000,9 +3011,6 @@ static int skge_reset(struct skge_hw *hw)
3000 3011
3001 skge_write32(hw, B0_IMSK, hw->intr_mask); 3012 skge_write32(hw, B0_IMSK, hw->intr_mask);
3002 3013
3003 if (hw->chip_id != CHIP_ID_GENESIS)
3004 skge_write8(hw, GMAC_IRQ_MSK, 0);
3005
3006 spin_lock_bh(&hw->phy_lock); 3014 spin_lock_bh(&hw->phy_lock);
3007 for (i = 0; i < hw->ports; i++) { 3015 for (i = 0; i < hw->ports; i++) {
3008 if (hw->chip_id == CHIP_ID_GENESIS) 3016 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -3230,6 +3238,11 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3230 dev0 = hw->dev[0]; 3238 dev0 = hw->dev[0];
3231 unregister_netdev(dev0); 3239 unregister_netdev(dev0);
3232 3240
3241 skge_write32(hw, B0_IMSK, 0);
3242 skge_write16(hw, B0_LED, LED_STAT_OFF);
3243 skge_pci_clear(hw);
3244 skge_write8(hw, B0_CTST, CS_RST_SET);
3245
3233 tasklet_kill(&hw->ext_tasklet); 3246 tasklet_kill(&hw->ext_tasklet);
3234 3247
3235 free_irq(pdev->irq, hw); 3248 free_irq(pdev->irq, hw);
@@ -3238,7 +3251,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3238 if (dev1) 3251 if (dev1)
3239 free_netdev(dev1); 3252 free_netdev(dev1);
3240 free_netdev(dev0); 3253 free_netdev(dev0);
3241 skge_write16(hw, B0_LED, LED_STAT_OFF); 3254
3242 iounmap(hw->regs); 3255 iounmap(hw->regs);
3243 kfree(hw); 3256 kfree(hw);
3244 pci_set_drvdata(pdev, NULL); 3257 pci_set_drvdata(pdev, NULL);
@@ -3257,7 +3270,10 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3257 struct skge_port *skge = netdev_priv(dev); 3270 struct skge_port *skge = netdev_priv(dev);
3258 if (netif_running(dev)) { 3271 if (netif_running(dev)) {
3259 netif_carrier_off(dev); 3272 netif_carrier_off(dev);
3260 skge_down(dev); 3273 if (skge->wol)
3274 netif_stop_queue(dev);
3275 else
3276 skge_down(dev);
3261 } 3277 }
3262 netif_device_detach(dev); 3278 netif_device_detach(dev);
3263 wol |= skge->wol; 3279 wol |= skge->wol;