aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c485
1 files changed, 382 insertions, 103 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 2aae9fe38c5..b9961dc4760 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -36,13 +36,15 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/crc32.h> 37#include <linux/crc32.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/debugfs.h>
40#include <linux/seq_file.h>
39#include <linux/mii.h> 41#include <linux/mii.h>
40#include <asm/irq.h> 42#include <asm/irq.h>
41 43
42#include "skge.h" 44#include "skge.h"
43 45
44#define DRV_NAME "skge" 46#define DRV_NAME "skge"
45#define DRV_VERSION "1.11" 47#define DRV_VERSION "1.12"
46#define PFX DRV_NAME " " 48#define PFX DRV_NAME " "
47 49
48#define DEFAULT_TX_RING_SIZE 128 50#define DEFAULT_TX_RING_SIZE 128
@@ -57,7 +59,10 @@
57#define TX_WATCHDOG (5 * HZ) 59#define TX_WATCHDOG (5 * HZ)
58#define NAPI_WEIGHT 64 60#define NAPI_WEIGHT 64
59#define BLINK_MS 250 61#define BLINK_MS 250
60#define LINK_HZ (HZ/2) 62#define LINK_HZ HZ
63
64#define SKGE_EEPROM_MAGIC 0x9933aabb
65
61 66
62MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); 67MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
63MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); 68MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
@@ -445,15 +450,15 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev)
445 else 450 else
446 yukon_get_stats(skge, data); 451 yukon_get_stats(skge, data);
447 452
448 skge->net_stats.tx_bytes = data[0]; 453 dev->stats.tx_bytes = data[0];
449 skge->net_stats.rx_bytes = data[1]; 454 dev->stats.rx_bytes = data[1];
450 skge->net_stats.tx_packets = data[2] + data[4] + data[6]; 455 dev->stats.tx_packets = data[2] + data[4] + data[6];
451 skge->net_stats.rx_packets = data[3] + data[5] + data[7]; 456 dev->stats.rx_packets = data[3] + data[5] + data[7];
452 skge->net_stats.multicast = data[3] + data[5]; 457 dev->stats.multicast = data[3] + data[5];
453 skge->net_stats.collisions = data[10]; 458 dev->stats.collisions = data[10];
454 skge->net_stats.tx_aborted_errors = data[12]; 459 dev->stats.tx_aborted_errors = data[12];
455 460
456 return &skge->net_stats; 461 return &dev->stats;
457} 462}
458 463
459static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) 464static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -798,6 +803,98 @@ static int skge_phys_id(struct net_device *dev, u32 data)
798 return 0; 803 return 0;
799} 804}
800 805
806static int skge_get_eeprom_len(struct net_device *dev)
807{
808 struct skge_port *skge = netdev_priv(dev);
809 u32 reg2;
810
811 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
812 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
813}
814
815static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
816{
817 u32 val;
818
819 pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset);
820
821 do {
822 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
823 } while (!(offset & PCI_VPD_ADDR_F));
824
825 pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val);
826 return val;
827}
828
829static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val)
830{
831 pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val);
832 pci_write_config_word(pdev, cap + PCI_VPD_ADDR,
833 offset | PCI_VPD_ADDR_F);
834
835 do {
836 pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset);
837 } while (offset & PCI_VPD_ADDR_F);
838}
839
840static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
841 u8 *data)
842{
843 struct skge_port *skge = netdev_priv(dev);
844 struct pci_dev *pdev = skge->hw->pdev;
845 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
846 int length = eeprom->len;
847 u16 offset = eeprom->offset;
848
849 if (!cap)
850 return -EINVAL;
851
852 eeprom->magic = SKGE_EEPROM_MAGIC;
853
854 while (length > 0) {
855 u32 val = skge_vpd_read(pdev, cap, offset);
856 int n = min_t(int, length, sizeof(val));
857
858 memcpy(data, &val, n);
859 length -= n;
860 data += n;
861 offset += n;
862 }
863 return 0;
864}
865
866static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
867 u8 *data)
868{
869 struct skge_port *skge = netdev_priv(dev);
870 struct pci_dev *pdev = skge->hw->pdev;
871 int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
872 int length = eeprom->len;
873 u16 offset = eeprom->offset;
874
875 if (!cap)
876 return -EINVAL;
877
878 if (eeprom->magic != SKGE_EEPROM_MAGIC)
879 return -EINVAL;
880
881 while (length > 0) {
882 u32 val;
883 int n = min_t(int, length, sizeof(val));
884
885 if (n < sizeof(val))
886 val = skge_vpd_read(pdev, cap, offset);
887 memcpy(&val, data, n);
888
889 skge_vpd_write(pdev, cap, offset, val);
890
891 length -= n;
892 data += n;
893 offset += n;
894 }
895 return 0;
896}
897
801static const struct ethtool_ops skge_ethtool_ops = { 898static const struct ethtool_ops skge_ethtool_ops = {
802 .get_settings = skge_get_settings, 899 .get_settings = skge_get_settings,
803 .set_settings = skge_set_settings, 900 .set_settings = skge_set_settings,
@@ -810,6 +907,9 @@ static const struct ethtool_ops skge_ethtool_ops = {
810 .set_msglevel = skge_set_msglevel, 907 .set_msglevel = skge_set_msglevel,
811 .nway_reset = skge_nway_reset, 908 .nway_reset = skge_nway_reset,
812 .get_link = ethtool_op_get_link, 909 .get_link = ethtool_op_get_link,
910 .get_eeprom_len = skge_get_eeprom_len,
911 .get_eeprom = skge_get_eeprom,
912 .set_eeprom = skge_set_eeprom,
813 .get_ringparam = skge_get_ring_param, 913 .get_ringparam = skge_get_ring_param,
814 .set_ringparam = skge_set_ring_param, 914 .set_ringparam = skge_set_ring_param,
815 .get_pauseparam = skge_get_pauseparam, 915 .get_pauseparam = skge_get_pauseparam,
@@ -995,19 +1095,15 @@ static void xm_link_down(struct skge_hw *hw, int port)
995{ 1095{
996 struct net_device *dev = hw->dev[port]; 1096 struct net_device *dev = hw->dev[port];
997 struct skge_port *skge = netdev_priv(dev); 1097 struct skge_port *skge = netdev_priv(dev);
998 u16 cmd, msk; 1098 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
999 1099
1000 if (hw->phy_type == SK_PHY_XMAC) { 1100 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
1001 msk = xm_read16(hw, port, XM_IMSK);
1002 msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND;
1003 xm_write16(hw, port, XM_IMSK, msk);
1004 }
1005 1101
1006 cmd = xm_read16(hw, port, XM_MMU_CMD);
1007 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1102 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1008 xm_write16(hw, port, XM_MMU_CMD, cmd); 1103 xm_write16(hw, port, XM_MMU_CMD, cmd);
1104
1009 /* dummy read to ensure writing */ 1105 /* dummy read to ensure writing */
1010 (void) xm_read16(hw, port, XM_MMU_CMD); 1106 xm_read16(hw, port, XM_MMU_CMD);
1011 1107
1012 if (netif_carrier_ok(dev)) 1108 if (netif_carrier_ok(dev))
1013 skge_link_down(skge); 1109 skge_link_down(skge);
@@ -1103,7 +1199,7 @@ static void genesis_reset(struct skge_hw *hw, int port)
1103 1199
1104 /* reset the statistics module */ 1200 /* reset the statistics module */
1105 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 1201 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
1106 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 1202 xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE);
1107 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ 1203 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
1108 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ 1204 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
1109 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ 1205 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
@@ -1141,7 +1237,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1141 u16 status; 1237 u16 status;
1142 1238
1143 /* read twice because of latch */ 1239 /* read twice because of latch */
1144 (void) xm_phy_read(hw, port, PHY_BCOM_STAT); 1240 xm_phy_read(hw, port, PHY_BCOM_STAT);
1145 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1241 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1146 1242
1147 if ((status & PHY_ST_LSYNC) == 0) { 1243 if ((status & PHY_ST_LSYNC) == 0) {
@@ -1342,7 +1438,7 @@ static void xm_phy_init(struct skge_port *skge)
1342 mod_timer(&skge->link_timer, jiffies + LINK_HZ); 1438 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1343} 1439}
1344 1440
1345static void xm_check_link(struct net_device *dev) 1441static int xm_check_link(struct net_device *dev)
1346{ 1442{
1347 struct skge_port *skge = netdev_priv(dev); 1443 struct skge_port *skge = netdev_priv(dev);
1348 struct skge_hw *hw = skge->hw; 1444 struct skge_hw *hw = skge->hw;
@@ -1350,25 +1446,25 @@ static void xm_check_link(struct net_device *dev)
1350 u16 status; 1446 u16 status;
1351 1447
1352 /* read twice because of latch */ 1448 /* read twice because of latch */
1353 (void) xm_phy_read(hw, port, PHY_XMAC_STAT); 1449 xm_phy_read(hw, port, PHY_XMAC_STAT);
1354 status = xm_phy_read(hw, port, PHY_XMAC_STAT); 1450 status = xm_phy_read(hw, port, PHY_XMAC_STAT);
1355 1451
1356 if ((status & PHY_ST_LSYNC) == 0) { 1452 if ((status & PHY_ST_LSYNC) == 0) {
1357 xm_link_down(hw, port); 1453 xm_link_down(hw, port);
1358 return; 1454 return 0;
1359 } 1455 }
1360 1456
1361 if (skge->autoneg == AUTONEG_ENABLE) { 1457 if (skge->autoneg == AUTONEG_ENABLE) {
1362 u16 lpa, res; 1458 u16 lpa, res;
1363 1459
1364 if (!(status & PHY_ST_AN_OVER)) 1460 if (!(status & PHY_ST_AN_OVER))
1365 return; 1461 return 0;
1366 1462
1367 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); 1463 lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
1368 if (lpa & PHY_B_AN_RF) { 1464 if (lpa & PHY_B_AN_RF) {
1369 printk(KERN_NOTICE PFX "%s: remote fault\n", 1465 printk(KERN_NOTICE PFX "%s: remote fault\n",
1370 dev->name); 1466 dev->name);
1371 return; 1467 return 0;
1372 } 1468 }
1373 1469
1374 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); 1470 res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI);
@@ -1384,7 +1480,7 @@ static void xm_check_link(struct net_device *dev)
1384 default: 1480 default:
1385 printk(KERN_NOTICE PFX "%s: duplex mismatch\n", 1481 printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
1386 dev->name); 1482 dev->name);
1387 return; 1483 return 0;
1388 } 1484 }
1389 1485
1390 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1486 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
@@ -1408,11 +1504,14 @@ static void xm_check_link(struct net_device *dev)
1408 1504
1409 if (!netif_carrier_ok(dev)) 1505 if (!netif_carrier_ok(dev))
1410 genesis_link_up(skge); 1506 genesis_link_up(skge);
1507 return 1;
1411} 1508}
1412 1509
1413/* Poll to check for link coming up. 1510/* Poll to check for link coming up.
1511 *
1414 * Since internal PHY is wired to a level triggered pin, can't 1512 * Since internal PHY is wired to a level triggered pin, can't
1415 * get an interrupt when carrier is detected. 1513 * get an interrupt when carrier is detected, need to poll for
1514 * link coming up.
1416 */ 1515 */
1417static void xm_link_timer(unsigned long arg) 1516static void xm_link_timer(unsigned long arg)
1418{ 1517{
@@ -1420,29 +1519,35 @@ static void xm_link_timer(unsigned long arg)
1420 struct net_device *dev = skge->netdev; 1519 struct net_device *dev = skge->netdev;
1421 struct skge_hw *hw = skge->hw; 1520 struct skge_hw *hw = skge->hw;
1422 int port = skge->port; 1521 int port = skge->port;
1522 int i;
1523 unsigned long flags;
1423 1524
1424 if (!netif_running(dev)) 1525 if (!netif_running(dev))
1425 return; 1526 return;
1426 1527
1427 if (netif_carrier_ok(dev)) { 1528 spin_lock_irqsave(&hw->phy_lock, flags);
1529
1530 /*
1531 * Verify that the link by checking GPIO register three times.
1532 * This pin has the signal from the link_sync pin connected to it.
1533 */
1534 for (i = 0; i < 3; i++) {
1535 if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS)
1536 goto link_down;
1537 }
1538
1539 /* Re-enable interrupt to detect link down */
1540 if (xm_check_link(dev)) {
1541 u16 msk = xm_read16(hw, port, XM_IMSK);
1542 msk &= ~XM_IS_INP_ASS;
1543 xm_write16(hw, port, XM_IMSK, msk);
1428 xm_read16(hw, port, XM_ISRC); 1544 xm_read16(hw, port, XM_ISRC);
1429 if (!(xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS))
1430 goto nochange;
1431 } else { 1545 } else {
1432 if (xm_read32(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) 1546link_down:
1433 goto nochange; 1547 mod_timer(&skge->link_timer,
1434 xm_read16(hw, port, XM_ISRC); 1548 round_jiffies(jiffies + LINK_HZ));
1435 if (xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS)
1436 goto nochange;
1437 } 1549 }
1438 1550 spin_unlock_irqrestore(&hw->phy_lock, flags);
1439 spin_lock(&hw->phy_lock);
1440 xm_check_link(dev);
1441 spin_unlock(&hw->phy_lock);
1442
1443nochange:
1444 if (netif_running(dev))
1445 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1446} 1551}
1447 1552
1448static void genesis_mac_init(struct skge_hw *hw, int port) 1553static void genesis_mac_init(struct skge_hw *hw, int port)
@@ -1679,24 +1784,27 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data)
1679 1784
1680static void genesis_mac_intr(struct skge_hw *hw, int port) 1785static void genesis_mac_intr(struct skge_hw *hw, int port)
1681{ 1786{
1682 struct skge_port *skge = netdev_priv(hw->dev[port]); 1787 struct net_device *dev = hw->dev[port];
1788 struct skge_port *skge = netdev_priv(dev);
1683 u16 status = xm_read16(hw, port, XM_ISRC); 1789 u16 status = xm_read16(hw, port, XM_ISRC);
1684 1790
1685 if (netif_msg_intr(skge)) 1791 if (netif_msg_intr(skge))
1686 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", 1792 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1687 skge->netdev->name, status); 1793 dev->name, status);
1688 1794
1689 if (hw->phy_type == SK_PHY_XMAC && 1795 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
1690 (status & (XM_IS_INP_ASS | XM_IS_LIPA_RC))) 1796 xm_link_down(hw, port);
1691 xm_link_down(hw, port); 1797 mod_timer(&skge->link_timer, jiffies + 1);
1798 }
1692 1799
1693 if (status & XM_IS_TXF_UR) { 1800 if (status & XM_IS_TXF_UR) {
1694 xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1801 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1695 ++skge->net_stats.tx_fifo_errors; 1802 ++dev->stats.tx_fifo_errors;
1696 } 1803 }
1804
1697 if (status & XM_IS_RXF_OV) { 1805 if (status & XM_IS_RXF_OV) {
1698 xm_write32(hw, port, XM_MODE, XM_MD_FRF); 1806 xm_write32(hw, port, XM_MODE, XM_MD_FRF);
1699 ++skge->net_stats.rx_fifo_errors; 1807 ++dev->stats.rx_fifo_errors;
1700 } 1808 }
1701} 1809}
1702 1810
@@ -1753,11 +1861,12 @@ static void genesis_link_up(struct skge_port *skge)
1753 } 1861 }
1754 1862
1755 xm_write32(hw, port, XM_MODE, mode); 1863 xm_write32(hw, port, XM_MODE, mode);
1756 msk = XM_DEF_MSK;
1757 if (hw->phy_type != SK_PHY_XMAC)
1758 msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */
1759 1864
1865 /* Turn on detection of Tx underrun, Rx overrun */
1866 msk = xm_read16(hw, port, XM_IMSK);
1867 msk &= ~(XM_IS_RXF_OV | XM_IS_TXF_UR);
1760 xm_write16(hw, port, XM_IMSK, msk); 1868 xm_write16(hw, port, XM_IMSK, msk);
1869
1761 xm_read16(hw, port, XM_ISRC); 1870 xm_read16(hw, port, XM_ISRC);
1762 1871
1763 /* get MMU Command Reg. */ 1872 /* get MMU Command Reg. */
@@ -2192,12 +2301,12 @@ static void yukon_mac_intr(struct skge_hw *hw, int port)
2192 dev->name, status); 2301 dev->name, status);
2193 2302
2194 if (status & GM_IS_RX_FF_OR) { 2303 if (status & GM_IS_RX_FF_OR) {
2195 ++skge->net_stats.rx_fifo_errors; 2304 ++dev->stats.rx_fifo_errors;
2196 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); 2305 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2197 } 2306 }
2198 2307
2199 if (status & GM_IS_TX_FF_UR) { 2308 if (status & GM_IS_TX_FF_UR) {
2200 ++skge->net_stats.tx_fifo_errors; 2309 ++dev->stats.tx_fifo_errors;
2201 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); 2310 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2202 } 2311 }
2203 2312
@@ -2403,32 +2512,31 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2403 return err; 2512 return err;
2404} 2513}
2405 2514
2406static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) 2515/* Assign Ram Buffer allocation to queue */
2516static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, u32 space)
2407{ 2517{
2408 u32 end; 2518 u32 end;
2409 2519
2410 start /= 8; 2520 /* convert from K bytes to qwords used for hw register */
2411 len /= 8; 2521 start *= 1024/8;
2412 end = start + len - 1; 2522 space *= 1024/8;
2523 end = start + space - 1;
2413 2524
2414 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); 2525 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
2415 skge_write32(hw, RB_ADDR(q, RB_START), start); 2526 skge_write32(hw, RB_ADDR(q, RB_START), start);
2527 skge_write32(hw, RB_ADDR(q, RB_END), end);
2416 skge_write32(hw, RB_ADDR(q, RB_WP), start); 2528 skge_write32(hw, RB_ADDR(q, RB_WP), start);
2417 skge_write32(hw, RB_ADDR(q, RB_RP), start); 2529 skge_write32(hw, RB_ADDR(q, RB_RP), start);
2418 skge_write32(hw, RB_ADDR(q, RB_END), end);
2419 2530
2420 if (q == Q_R1 || q == Q_R2) { 2531 if (q == Q_R1 || q == Q_R2) {
2532 u32 tp = space - space/4;
2533
2421 /* Set thresholds on receive queue's */ 2534 /* Set thresholds on receive queue's */
2422 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), 2535 skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
2423 start + (2*len)/3); 2536 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
2424 skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), 2537 } else if (hw->chip_id != CHIP_ID_GENESIS)
2425 start + (len/3)); 2538 /* Genesis Tx Fifo is too small for normal store/forward */
2426 } else {
2427 /* Enable store & forward on Tx queue's because
2428 * Tx FIFO is only 4K on Genesis and 1K on Yukon
2429 */
2430 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); 2539 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
2431 }
2432 2540
2433 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); 2541 skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
2434} 2542}
@@ -2456,7 +2564,7 @@ static int skge_up(struct net_device *dev)
2456 struct skge_port *skge = netdev_priv(dev); 2564 struct skge_port *skge = netdev_priv(dev);
2457 struct skge_hw *hw = skge->hw; 2565 struct skge_hw *hw = skge->hw;
2458 int port = skge->port; 2566 int port = skge->port;
2459 u32 chunk, ram_addr; 2567 u32 ramaddr, ramsize, rxspace;
2460 size_t rx_size, tx_size; 2568 size_t rx_size, tx_size;
2461 int err; 2569 int err;
2462 2570
@@ -2511,14 +2619,15 @@ static int skge_up(struct net_device *dev)
2511 spin_unlock_bh(&hw->phy_lock); 2619 spin_unlock_bh(&hw->phy_lock);
2512 2620
2513 /* Configure RAMbuffers */ 2621 /* Configure RAMbuffers */
2514 chunk = hw->ram_size / ((hw->ports + 1)*2); 2622 ramsize = (hw->ram_size - hw->ram_offset) / hw->ports;
2515 ram_addr = hw->ram_offset + 2 * chunk * port; 2623 ramaddr = hw->ram_offset + port * ramsize;
2624 rxspace = 8 + (2*(ramsize - 16))/3;
2516 2625
2517 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); 2626 skge_ramset(hw, rxqaddr[port], ramaddr, rxspace);
2518 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); 2627 skge_ramset(hw, txqaddr[port], ramaddr + rxspace, ramsize - rxspace);
2519 2628
2629 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
2520 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); 2630 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
2521 skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
2522 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); 2631 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
2523 2632
2524 /* Start receiver BMU */ 2633 /* Start receiver BMU */
@@ -2544,6 +2653,15 @@ static int skge_up(struct net_device *dev)
2544 return err; 2653 return err;
2545} 2654}
2546 2655
2656/* stop receiver */
2657static void skge_rx_stop(struct skge_hw *hw, int port)
2658{
2659 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP);
2660 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2661 RB_RST_SET|RB_DIS_OP_MD);
2662 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2663}
2664
2547static int skge_down(struct net_device *dev) 2665static int skge_down(struct net_device *dev)
2548{ 2666{
2549 struct skge_port *skge = netdev_priv(dev); 2667 struct skge_port *skge = netdev_priv(dev);
@@ -2595,11 +2713,8 @@ static int skge_down(struct net_device *dev)
2595 2713
2596 /* Reset the RAM Buffer async Tx queue */ 2714 /* Reset the RAM Buffer async Tx queue */
2597 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); 2715 skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET);
2598 /* stop receiver */ 2716
2599 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); 2717 skge_rx_stop(hw, port);
2600 skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL),
2601 RB_RST_SET|RB_DIS_OP_MD);
2602 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2603 2718
2604 if (hw->chip_id == CHIP_ID_GENESIS) { 2719 if (hw->chip_id == CHIP_ID_GENESIS) {
2605 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); 2720 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
@@ -2782,7 +2897,11 @@ static void skge_tx_timeout(struct net_device *dev)
2782 2897
2783static int skge_change_mtu(struct net_device *dev, int new_mtu) 2898static int skge_change_mtu(struct net_device *dev, int new_mtu)
2784{ 2899{
2900 struct skge_port *skge = netdev_priv(dev);
2901 struct skge_hw *hw = skge->hw;
2902 int port = skge->port;
2785 int err; 2903 int err;
2904 u16 ctl, reg;
2786 2905
2787 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2906 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2788 return -EINVAL; 2907 return -EINVAL;
@@ -2792,13 +2911,40 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
2792 return 0; 2911 return 0;
2793 } 2912 }
2794 2913
2795 skge_down(dev); 2914 skge_write32(hw, B0_IMSK, 0);
2915 dev->trans_start = jiffies; /* prevent tx timeout */
2916 netif_stop_queue(dev);
2917 napi_disable(&skge->napi);
2918
2919 ctl = gma_read16(hw, port, GM_GP_CTRL);
2920 gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
2921
2922 skge_rx_clean(skge);
2923 skge_rx_stop(hw, port);
2796 2924
2797 dev->mtu = new_mtu; 2925 dev->mtu = new_mtu;
2798 2926
2799 err = skge_up(dev); 2927 reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
2928 if (new_mtu > 1500)
2929 reg |= GM_SMOD_JUMBO_ENA;
2930 gma_write16(hw, port, GM_SERIAL_MODE, reg);
2931
2932 skge_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
2933
2934 err = skge_rx_fill(dev);
2935 wmb();
2936 if (!err)
2937 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2938 skge_write32(hw, B0_IMSK, hw->intr_mask);
2939
2800 if (err) 2940 if (err)
2801 dev_close(dev); 2941 dev_close(dev);
2942 else {
2943 gma_write16(hw, port, GM_GP_CTRL, ctl);
2944
2945 napi_enable(&skge->napi);
2946 netif_wake_queue(dev);
2947 }
2802 2948
2803 return err; 2949 return err;
2804} 2950}
@@ -2994,18 +3140,18 @@ error:
2994 3140
2995 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 3141 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2996 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 3142 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2997 skge->net_stats.rx_length_errors++; 3143 dev->stats.rx_length_errors++;
2998 if (status & XMR_FS_FRA_ERR) 3144 if (status & XMR_FS_FRA_ERR)
2999 skge->net_stats.rx_frame_errors++; 3145 dev->stats.rx_frame_errors++;
3000 if (status & XMR_FS_FCS_ERR) 3146 if (status & XMR_FS_FCS_ERR)
3001 skge->net_stats.rx_crc_errors++; 3147 dev->stats.rx_crc_errors++;
3002 } else { 3148 } else {
3003 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 3149 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
3004 skge->net_stats.rx_length_errors++; 3150 dev->stats.rx_length_errors++;
3005 if (status & GMR_FS_FRAGMENT) 3151 if (status & GMR_FS_FRAGMENT)
3006 skge->net_stats.rx_frame_errors++; 3152 dev->stats.rx_frame_errors++;
3007 if (status & GMR_FS_CRC_ERR) 3153 if (status & GMR_FS_CRC_ERR)
3008 skge->net_stats.rx_crc_errors++; 3154 dev->stats.rx_crc_errors++;
3009 } 3155 }
3010 3156
3011resubmit: 3157resubmit:
@@ -3103,10 +3249,7 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
3103{ 3249{
3104 struct net_device *dev = hw->dev[port]; 3250 struct net_device *dev = hw->dev[port];
3105 3251
3106 if (dev) { 3252 ++dev->stats.tx_heartbeat_errors;
3107 struct skge_port *skge = netdev_priv(dev);
3108 ++skge->net_stats.tx_heartbeat_errors;
3109 }
3110 3253
3111 if (hw->chip_id == CHIP_ID_GENESIS) 3254 if (hw->chip_id == CHIP_ID_GENESIS)
3112 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 3255 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
@@ -3259,9 +3402,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3259 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); 3402 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
3260 3403
3261 if (status & IS_PA_TO_RX1) { 3404 if (status & IS_PA_TO_RX1) {
3262 struct skge_port *skge = netdev_priv(hw->dev[0]); 3405 ++hw->dev[0]->stats.rx_over_errors;
3263
3264 ++skge->net_stats.rx_over_errors;
3265 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); 3406 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
3266 } 3407 }
3267 3408
@@ -3278,7 +3419,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3278 } 3419 }
3279 3420
3280 if (status & IS_PA_TO_RX2) { 3421 if (status & IS_PA_TO_RX2) {
3281 ++skge->net_stats.rx_over_errors; 3422 ++hw->dev[1]->stats.rx_over_errors;
3282 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); 3423 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
3283 } 3424 }
3284 3425
@@ -3450,15 +3591,12 @@ static int skge_reset(struct skge_hw *hw)
3450 if (hw->chip_id == CHIP_ID_GENESIS) { 3591 if (hw->chip_id == CHIP_ID_GENESIS) {
3451 if (t8 == 3) { 3592 if (t8 == 3) {
3452 /* special case: 4 x 64k x 36, offset = 0x80000 */ 3593 /* special case: 4 x 64k x 36, offset = 0x80000 */
3453 hw->ram_size = 0x100000; 3594 hw->ram_size = 1024;
3454 hw->ram_offset = 0x80000; 3595 hw->ram_offset = 512;
3455 } else 3596 } else
3456 hw->ram_size = t8 * 512; 3597 hw->ram_size = t8 * 512;
3457 } 3598 } else /* Yukon */
3458 else if (t8 == 0) 3599 hw->ram_size = t8 ? t8 * 4 : 128;
3459 hw->ram_size = 0x20000;
3460 else
3461 hw->ram_size = t8 * 4096;
3462 3600
3463 hw->intr_mask = IS_HW_ERR; 3601 hw->intr_mask = IS_HW_ERR;
3464 3602
@@ -3540,6 +3678,145 @@ static int skge_reset(struct skge_hw *hw)
3540 return 0; 3678 return 0;
3541} 3679}
3542 3680
3681
3682#ifdef CONFIG_SKGE_DEBUG
3683
3684static struct dentry *skge_debug;
3685
3686static int skge_debug_show(struct seq_file *seq, void *v)
3687{
3688 struct net_device *dev = seq->private;
3689 const struct skge_port *skge = netdev_priv(dev);
3690 const struct skge_hw *hw = skge->hw;
3691 const struct skge_element *e;
3692
3693 if (!netif_running(dev))
3694 return -ENETDOWN;
3695
3696 seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC),
3697 skge_read32(hw, B0_IMSK));
3698
3699 seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring));
3700 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
3701 const struct skge_tx_desc *t = e->desc;
3702 seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n",
3703 t->control, t->dma_hi, t->dma_lo, t->status,
3704 t->csum_offs, t->csum_write, t->csum_start);
3705 }
3706
3707 seq_printf(seq, "\nRx Ring: \n");
3708 for (e = skge->rx_ring.to_clean; ; e = e->next) {
3709 const struct skge_rx_desc *r = e->desc;
3710
3711 if (r->control & BMU_OWN)
3712 break;
3713
3714 seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n",
3715 r->control, r->dma_hi, r->dma_lo, r->status,
3716 r->timestamp, r->csum1, r->csum1_start);
3717 }
3718
3719 return 0;
3720}
3721
3722static int skge_debug_open(struct inode *inode, struct file *file)
3723{
3724 return single_open(file, skge_debug_show, inode->i_private);
3725}
3726
3727static const struct file_operations skge_debug_fops = {
3728 .owner = THIS_MODULE,
3729 .open = skge_debug_open,
3730 .read = seq_read,
3731 .llseek = seq_lseek,
3732 .release = single_release,
3733};
3734
3735/*
3736 * Use network device events to create/remove/rename
3737 * debugfs file entries
3738 */
3739static int skge_device_event(struct notifier_block *unused,
3740 unsigned long event, void *ptr)
3741{
3742 struct net_device *dev = ptr;
3743 struct skge_port *skge;
3744 struct dentry *d;
3745
3746 if (dev->open != &skge_up || !skge_debug)
3747 goto done;
3748
3749 skge = netdev_priv(dev);
3750 switch(event) {
3751 case NETDEV_CHANGENAME:
3752 if (skge->debugfs) {
3753 d = debugfs_rename(skge_debug, skge->debugfs,
3754 skge_debug, dev->name);
3755 if (d)
3756 skge->debugfs = d;
3757 else {
3758 pr_info(PFX "%s: rename failed\n", dev->name);
3759 debugfs_remove(skge->debugfs);
3760 }
3761 }
3762 break;
3763
3764 case NETDEV_GOING_DOWN:
3765 if (skge->debugfs) {
3766 debugfs_remove(skge->debugfs);
3767 skge->debugfs = NULL;
3768 }
3769 break;
3770
3771 case NETDEV_UP:
3772 d = debugfs_create_file(dev->name, S_IRUGO,
3773 skge_debug, dev,
3774 &skge_debug_fops);
3775 if (!d || IS_ERR(d))
3776 pr_info(PFX "%s: debugfs create failed\n",
3777 dev->name);
3778 else
3779 skge->debugfs = d;
3780 break;
3781 }
3782
3783done:
3784 return NOTIFY_DONE;
3785}
3786
3787static struct notifier_block skge_notifier = {
3788 .notifier_call = skge_device_event,
3789};
3790
3791
3792static __init void skge_debug_init(void)
3793{
3794 struct dentry *ent;
3795
3796 ent = debugfs_create_dir("skge", NULL);
3797 if (!ent || IS_ERR(ent)) {
3798 pr_info(PFX "debugfs create directory failed\n");
3799 return;
3800 }
3801
3802 skge_debug = ent;
3803 register_netdevice_notifier(&skge_notifier);
3804}
3805
3806static __exit void skge_debug_cleanup(void)
3807{
3808 if (skge_debug) {
3809 unregister_netdevice_notifier(&skge_notifier);
3810 debugfs_remove(skge_debug);
3811 skge_debug = NULL;
3812 }
3813}
3814
3815#else
3816#define skge_debug_init()
3817#define skge_debug_cleanup()
3818#endif
3819
3543/* Initialize network device */ 3820/* Initialize network device */
3544static struct net_device *skge_devinit(struct skge_hw *hw, int port, 3821static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3545 int highmem) 3822 int highmem)
@@ -3904,12 +4181,14 @@ static struct pci_driver skge_driver = {
3904 4181
3905static int __init skge_init_module(void) 4182static int __init skge_init_module(void)
3906{ 4183{
4184 skge_debug_init();
3907 return pci_register_driver(&skge_driver); 4185 return pci_register_driver(&skge_driver);
3908} 4186}
3909 4187
3910static void __exit skge_cleanup_module(void) 4188static void __exit skge_cleanup_module(void)
3911{ 4189{
3912 pci_unregister_driver(&skge_driver); 4190 pci_unregister_driver(&skge_driver);
4191 skge_debug_cleanup();
3913} 4192}
3914 4193
3915module_init(skge_init_module); 4194module_init(skge_init_module);