diff options
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r-- | drivers/net/tg3.c | 1254 |
1 files changed, 853 insertions, 401 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ba5d3fe753b6..3a74d2168598 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -68,8 +68,8 @@ | |||
68 | 68 | ||
69 | #define DRV_MODULE_NAME "tg3" | 69 | #define DRV_MODULE_NAME "tg3" |
70 | #define PFX DRV_MODULE_NAME ": " | 70 | #define PFX DRV_MODULE_NAME ": " |
71 | #define DRV_MODULE_VERSION "3.102" | 71 | #define DRV_MODULE_VERSION "3.105" |
72 | #define DRV_MODULE_RELDATE "September 1, 2009" | 72 | #define DRV_MODULE_RELDATE "December 2, 2009" |
73 | 73 | ||
74 | #define TG3_DEF_MAC_MODE 0 | 74 | #define TG3_DEF_MAC_MODE 0 |
75 | #define TG3_DEF_RX_MODE 0 | 75 | #define TG3_DEF_RX_MODE 0 |
@@ -137,6 +137,12 @@ | |||
137 | #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) | 137 | #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) |
138 | #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) | 138 | #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) |
139 | 139 | ||
140 | #define TG3_RX_STD_BUFF_RING_SIZE \ | ||
141 | (sizeof(struct ring_info) * TG3_RX_RING_SIZE) | ||
142 | |||
143 | #define TG3_RX_JMB_BUFF_RING_SIZE \ | ||
144 | (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) | ||
145 | |||
140 | /* minimum number of free TX descriptors required to wake up TX process */ | 146 | /* minimum number of free TX descriptors required to wake up TX process */ |
141 | #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) | 147 | #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) |
142 | 148 | ||
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = { | |||
235 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, | 241 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, |
236 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, | 242 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, |
237 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, | 243 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, |
244 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, | ||
245 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, | ||
246 | {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)}, | ||
238 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, | 247 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, |
239 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, | 248 | {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, |
240 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, | 249 | {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, |
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val) | |||
396 | TG3_64BIT_REG_LOW, val); | 405 | TG3_64BIT_REG_LOW, val); |
397 | return; | 406 | return; |
398 | } | 407 | } |
399 | if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { | 408 | if (off == TG3_RX_STD_PROD_IDX_REG) { |
400 | pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + | 409 | pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + |
401 | TG3_64BIT_REG_LOW, val); | 410 | TG3_64BIT_REG_LOW, val); |
402 | return; | 411 | return; |
@@ -937,9 +946,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp) | |||
937 | u32 val; | 946 | u32 val; |
938 | struct phy_device *phydev; | 947 | struct phy_device *phydev; |
939 | 948 | ||
940 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 949 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
941 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { | 950 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { |
942 | case TG3_PHY_ID_BCM50610: | 951 | case TG3_PHY_ID_BCM50610: |
952 | case TG3_PHY_ID_BCM50610M: | ||
943 | val = MAC_PHYCFG2_50610_LED_MODES; | 953 | val = MAC_PHYCFG2_50610_LED_MODES; |
944 | break; | 954 | break; |
945 | case TG3_PHY_ID_BCMAC131: | 955 | case TG3_PHY_ID_BCMAC131: |
@@ -1031,7 +1041,7 @@ static void tg3_mdio_start(struct tg3 *tp) | |||
1031 | if (is_serdes) | 1041 | if (is_serdes) |
1032 | tp->phy_addr += 7; | 1042 | tp->phy_addr += 7; |
1033 | } else | 1043 | } else |
1034 | tp->phy_addr = PHY_ADDR; | 1044 | tp->phy_addr = TG3_PHY_MII_ADDR; |
1035 | 1045 | ||
1036 | if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && | 1046 | if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && |
1037 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | 1047 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) |
@@ -1062,7 +1072,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1062 | tp->mdio_bus->read = &tg3_mdio_read; | 1072 | tp->mdio_bus->read = &tg3_mdio_read; |
1063 | tp->mdio_bus->write = &tg3_mdio_write; | 1073 | tp->mdio_bus->write = &tg3_mdio_write; |
1064 | tp->mdio_bus->reset = &tg3_mdio_reset; | 1074 | tp->mdio_bus->reset = &tg3_mdio_reset; |
1065 | tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); | 1075 | tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR); |
1066 | tp->mdio_bus->irq = &tp->mdio_irq[0]; | 1076 | tp->mdio_bus->irq = &tp->mdio_irq[0]; |
1067 | 1077 | ||
1068 | for (i = 0; i < PHY_MAX_ADDR; i++) | 1078 | for (i = 0; i < PHY_MAX_ADDR; i++) |
@@ -1084,7 +1094,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1084 | return i; | 1094 | return i; |
1085 | } | 1095 | } |
1086 | 1096 | ||
1087 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1097 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1088 | 1098 | ||
1089 | if (!phydev || !phydev->drv) { | 1099 | if (!phydev || !phydev->drv) { |
1090 | printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); | 1100 | printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); |
@@ -1096,8 +1106,14 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1096 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { | 1106 | switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { |
1097 | case TG3_PHY_ID_BCM57780: | 1107 | case TG3_PHY_ID_BCM57780: |
1098 | phydev->interface = PHY_INTERFACE_MODE_GMII; | 1108 | phydev->interface = PHY_INTERFACE_MODE_GMII; |
1109 | phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; | ||
1099 | break; | 1110 | break; |
1100 | case TG3_PHY_ID_BCM50610: | 1111 | case TG3_PHY_ID_BCM50610: |
1112 | case TG3_PHY_ID_BCM50610M: | ||
1113 | phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE | | ||
1114 | PHY_BRCM_RX_REFCLK_UNUSED | | ||
1115 | PHY_BRCM_DIS_TXCRXC_NOENRGY | | ||
1116 | PHY_BRCM_AUTO_PWRDWN_ENABLE; | ||
1101 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) | 1117 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) |
1102 | phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; | 1118 | phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; |
1103 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) | 1119 | if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) |
@@ -1111,6 +1127,7 @@ static int tg3_mdio_init(struct tg3 *tp) | |||
1111 | case TG3_PHY_ID_RTL8201E: | 1127 | case TG3_PHY_ID_RTL8201E: |
1112 | case TG3_PHY_ID_BCMAC131: | 1128 | case TG3_PHY_ID_BCMAC131: |
1113 | phydev->interface = PHY_INTERFACE_MODE_MII; | 1129 | phydev->interface = PHY_INTERFACE_MODE_MII; |
1130 | phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE; | ||
1114 | tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; | 1131 | tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; |
1115 | break; | 1132 | break; |
1116 | } | 1133 | } |
@@ -1311,7 +1328,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv) | |||
1311 | u32 old_tx_mode = tp->tx_mode; | 1328 | u32 old_tx_mode = tp->tx_mode; |
1312 | 1329 | ||
1313 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) | 1330 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) |
1314 | autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; | 1331 | autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg; |
1315 | else | 1332 | else |
1316 | autoneg = tp->link_config.autoneg; | 1333 | autoneg = tp->link_config.autoneg; |
1317 | 1334 | ||
@@ -1348,7 +1365,7 @@ static void tg3_adjust_link(struct net_device *dev) | |||
1348 | u8 oldflowctrl, linkmesg = 0; | 1365 | u8 oldflowctrl, linkmesg = 0; |
1349 | u32 mac_mode, lcl_adv, rmt_adv; | 1366 | u32 mac_mode, lcl_adv, rmt_adv; |
1350 | struct tg3 *tp = netdev_priv(dev); | 1367 | struct tg3 *tp = netdev_priv(dev); |
1351 | struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1368 | struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1352 | 1369 | ||
1353 | spin_lock_bh(&tp->lock); | 1370 | spin_lock_bh(&tp->lock); |
1354 | 1371 | ||
@@ -1363,8 +1380,11 @@ static void tg3_adjust_link(struct net_device *dev) | |||
1363 | 1380 | ||
1364 | if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) | 1381 | if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) |
1365 | mac_mode |= MAC_MODE_PORT_MODE_MII; | 1382 | mac_mode |= MAC_MODE_PORT_MODE_MII; |
1366 | else | 1383 | else if (phydev->speed == SPEED_1000 || |
1384 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) | ||
1367 | mac_mode |= MAC_MODE_PORT_MODE_GMII; | 1385 | mac_mode |= MAC_MODE_PORT_MODE_GMII; |
1386 | else | ||
1387 | mac_mode |= MAC_MODE_PORT_MODE_MII; | ||
1368 | 1388 | ||
1369 | if (phydev->duplex == DUPLEX_HALF) | 1389 | if (phydev->duplex == DUPLEX_HALF) |
1370 | mac_mode |= MAC_MODE_HALF_DUPLEX; | 1390 | mac_mode |= MAC_MODE_HALF_DUPLEX; |
@@ -1434,7 +1454,7 @@ static int tg3_phy_init(struct tg3 *tp) | |||
1434 | /* Bring the PHY back to a known state. */ | 1454 | /* Bring the PHY back to a known state. */ |
1435 | tg3_bmcr_reset(tp); | 1455 | tg3_bmcr_reset(tp); |
1436 | 1456 | ||
1437 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1457 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1438 | 1458 | ||
1439 | /* Attach the MAC to the PHY. */ | 1459 | /* Attach the MAC to the PHY. */ |
1440 | phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, | 1460 | phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, |
@@ -1461,7 +1481,7 @@ static int tg3_phy_init(struct tg3 *tp) | |||
1461 | SUPPORTED_Asym_Pause); | 1481 | SUPPORTED_Asym_Pause); |
1462 | break; | 1482 | break; |
1463 | default: | 1483 | default: |
1464 | phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); | 1484 | phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
1465 | return -EINVAL; | 1485 | return -EINVAL; |
1466 | } | 1486 | } |
1467 | 1487 | ||
@@ -1479,7 +1499,7 @@ static void tg3_phy_start(struct tg3 *tp) | |||
1479 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 1499 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
1480 | return; | 1500 | return; |
1481 | 1501 | ||
1482 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 1502 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
1483 | 1503 | ||
1484 | if (tp->link_config.phy_is_low_power) { | 1504 | if (tp->link_config.phy_is_low_power) { |
1485 | tp->link_config.phy_is_low_power = 0; | 1505 | tp->link_config.phy_is_low_power = 0; |
@@ -1499,13 +1519,13 @@ static void tg3_phy_stop(struct tg3 *tp) | |||
1499 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 1519 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
1500 | return; | 1520 | return; |
1501 | 1521 | ||
1502 | phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); | 1522 | phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
1503 | } | 1523 | } |
1504 | 1524 | ||
1505 | static void tg3_phy_fini(struct tg3 *tp) | 1525 | static void tg3_phy_fini(struct tg3 *tp) |
1506 | { | 1526 | { |
1507 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { | 1527 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { |
1508 | phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); | 1528 | phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
1509 | tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; | 1529 | tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; |
1510 | } | 1530 | } |
1511 | } | 1531 | } |
@@ -2149,6 +2169,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) | |||
2149 | tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); | 2169 | tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); |
2150 | udelay(40); | 2170 | udelay(40); |
2151 | return; | 2171 | return; |
2172 | } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) { | ||
2173 | u32 phytest; | ||
2174 | if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) { | ||
2175 | u32 phy; | ||
2176 | |||
2177 | tg3_writephy(tp, MII_ADVERTISE, 0); | ||
2178 | tg3_writephy(tp, MII_BMCR, | ||
2179 | BMCR_ANENABLE | BMCR_ANRESTART); | ||
2180 | |||
2181 | tg3_writephy(tp, MII_TG3_FET_TEST, | ||
2182 | phytest | MII_TG3_FET_SHADOW_EN); | ||
2183 | if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) { | ||
2184 | phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD; | ||
2185 | tg3_writephy(tp, | ||
2186 | MII_TG3_FET_SHDW_AUXMODE4, | ||
2187 | phy); | ||
2188 | } | ||
2189 | tg3_writephy(tp, MII_TG3_FET_TEST, phytest); | ||
2190 | } | ||
2191 | return; | ||
2152 | } else if (do_low_power) { | 2192 | } else if (do_low_power) { |
2153 | tg3_writephy(tp, MII_TG3_EXT_CTRL, | 2193 | tg3_writephy(tp, MII_TG3_EXT_CTRL, |
2154 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); | 2194 | MII_TG3_EXT_CTRL_FORCE_LED_OFF); |
@@ -2218,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp) | |||
2218 | static void tg3_enable_nvram_access(struct tg3 *tp) | 2258 | static void tg3_enable_nvram_access(struct tg3 *tp) |
2219 | { | 2259 | { |
2220 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 2260 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
2221 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { | 2261 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { |
2222 | u32 nvaccess = tr32(NVRAM_ACCESS); | 2262 | u32 nvaccess = tr32(NVRAM_ACCESS); |
2223 | 2263 | ||
2224 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); | 2264 | tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); |
@@ -2229,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp) | |||
2229 | static void tg3_disable_nvram_access(struct tg3 *tp) | 2269 | static void tg3_disable_nvram_access(struct tg3 *tp) |
2230 | { | 2270 | { |
2231 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 2271 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
2232 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { | 2272 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) { |
2233 | u32 nvaccess = tr32(NVRAM_ACCESS); | 2273 | u32 nvaccess = tr32(NVRAM_ACCESS); |
2234 | 2274 | ||
2235 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); | 2275 | tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); |
@@ -2474,7 +2514,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) | |||
2474 | struct phy_device *phydev; | 2514 | struct phy_device *phydev; |
2475 | u32 phyid, advertising; | 2515 | u32 phyid, advertising; |
2476 | 2516 | ||
2477 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 2517 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
2478 | 2518 | ||
2479 | tp->link_config.phy_is_low_power = 1; | 2519 | tp->link_config.phy_is_low_power = 1; |
2480 | 2520 | ||
@@ -3243,15 +3283,6 @@ relink: | |||
3243 | pci_write_config_word(tp->pdev, | 3283 | pci_write_config_word(tp->pdev, |
3244 | tp->pcie_cap + PCI_EXP_LNKCTL, | 3284 | tp->pcie_cap + PCI_EXP_LNKCTL, |
3245 | newlnkctl); | 3285 | newlnkctl); |
3246 | } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { | ||
3247 | u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL); | ||
3248 | if (tp->link_config.active_speed == SPEED_100 || | ||
3249 | tp->link_config.active_speed == SPEED_10) | ||
3250 | newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | ||
3251 | else | ||
3252 | newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN; | ||
3253 | if (newreg != oldreg) | ||
3254 | tw32(TG3_PCIE_LNKCTL, newreg); | ||
3255 | } | 3286 | } |
3256 | 3287 | ||
3257 | if (current_link_up != netif_carrier_ok(tp->dev)) { | 3288 | if (current_link_up != netif_carrier_ok(tp->dev)) { |
@@ -4320,13 +4351,13 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4320 | struct netdev_queue *txq; | 4351 | struct netdev_queue *txq; |
4321 | int index = tnapi - tp->napi; | 4352 | int index = tnapi - tp->napi; |
4322 | 4353 | ||
4323 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 4354 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
4324 | index--; | 4355 | index--; |
4325 | 4356 | ||
4326 | txq = netdev_get_tx_queue(tp->dev, index); | 4357 | txq = netdev_get_tx_queue(tp->dev, index); |
4327 | 4358 | ||
4328 | while (sw_idx != hw_idx) { | 4359 | while (sw_idx != hw_idx) { |
4329 | struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; | 4360 | struct ring_info *ri = &tnapi->tx_buffers[sw_idx]; |
4330 | struct sk_buff *skb = ri->skb; | 4361 | struct sk_buff *skb = ri->skb; |
4331 | int i, tx_bug = 0; | 4362 | int i, tx_bug = 0; |
4332 | 4363 | ||
@@ -4335,7 +4366,10 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4335 | return; | 4366 | return; |
4336 | } | 4367 | } |
4337 | 4368 | ||
4338 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | 4369 | pci_unmap_single(tp->pdev, |
4370 | pci_unmap_addr(ri, mapping), | ||
4371 | skb_headlen(skb), | ||
4372 | PCI_DMA_TODEVICE); | ||
4339 | 4373 | ||
4340 | ri->skb = NULL; | 4374 | ri->skb = NULL; |
4341 | 4375 | ||
@@ -4345,6 +4379,11 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4345 | ri = &tnapi->tx_buffers[sw_idx]; | 4379 | ri = &tnapi->tx_buffers[sw_idx]; |
4346 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) | 4380 | if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) |
4347 | tx_bug = 1; | 4381 | tx_bug = 1; |
4382 | |||
4383 | pci_unmap_page(tp->pdev, | ||
4384 | pci_unmap_addr(ri, mapping), | ||
4385 | skb_shinfo(skb)->frags[i].size, | ||
4386 | PCI_DMA_TODEVICE); | ||
4348 | sw_idx = NEXT_TX(sw_idx); | 4387 | sw_idx = NEXT_TX(sw_idx); |
4349 | } | 4388 | } |
4350 | 4389 | ||
@@ -4375,6 +4414,17 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4375 | } | 4414 | } |
4376 | } | 4415 | } |
4377 | 4416 | ||
4417 | static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) | ||
4418 | { | ||
4419 | if (!ri->skb) | ||
4420 | return; | ||
4421 | |||
4422 | pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), | ||
4423 | map_sz, PCI_DMA_FROMDEVICE); | ||
4424 | dev_kfree_skb_any(ri->skb); | ||
4425 | ri->skb = NULL; | ||
4426 | } | ||
4427 | |||
4378 | /* Returns size of skb allocated or < 0 on error. | 4428 | /* Returns size of skb allocated or < 0 on error. |
4379 | * | 4429 | * |
4380 | * We only need to fill in the address because the other members | 4430 | * We only need to fill in the address because the other members |
@@ -4386,16 +4436,14 @@ static void tg3_tx(struct tg3_napi *tnapi) | |||
4386 | * buffers the cpu only reads the last cacheline of the RX descriptor | 4436 | * buffers the cpu only reads the last cacheline of the RX descriptor |
4387 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). | 4437 | * (to fetch the error flags, vlan tag, checksum, and opaque cookie). |
4388 | */ | 4438 | */ |
4389 | static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | 4439 | static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, |
4390 | int src_idx, u32 dest_idx_unmasked) | 4440 | u32 opaque_key, u32 dest_idx_unmasked) |
4391 | { | 4441 | { |
4392 | struct tg3 *tp = tnapi->tp; | ||
4393 | struct tg3_rx_buffer_desc *desc; | 4442 | struct tg3_rx_buffer_desc *desc; |
4394 | struct ring_info *map, *src_map; | 4443 | struct ring_info *map, *src_map; |
4395 | struct sk_buff *skb; | 4444 | struct sk_buff *skb; |
4396 | dma_addr_t mapping; | 4445 | dma_addr_t mapping; |
4397 | int skb_size, dest_idx; | 4446 | int skb_size, dest_idx; |
4398 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | ||
4399 | 4447 | ||
4400 | src_map = NULL; | 4448 | src_map = NULL; |
4401 | switch (opaque_key) { | 4449 | switch (opaque_key) { |
@@ -4403,8 +4451,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4403 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 4451 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; |
4404 | desc = &tpr->rx_std[dest_idx]; | 4452 | desc = &tpr->rx_std[dest_idx]; |
4405 | map = &tpr->rx_std_buffers[dest_idx]; | 4453 | map = &tpr->rx_std_buffers[dest_idx]; |
4406 | if (src_idx >= 0) | ||
4407 | src_map = &tpr->rx_std_buffers[src_idx]; | ||
4408 | skb_size = tp->rx_pkt_map_sz; | 4454 | skb_size = tp->rx_pkt_map_sz; |
4409 | break; | 4455 | break; |
4410 | 4456 | ||
@@ -4412,8 +4458,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4412 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 4458 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; |
4413 | desc = &tpr->rx_jmb[dest_idx].std; | 4459 | desc = &tpr->rx_jmb[dest_idx].std; |
4414 | map = &tpr->rx_jmb_buffers[dest_idx]; | 4460 | map = &tpr->rx_jmb_buffers[dest_idx]; |
4415 | if (src_idx >= 0) | ||
4416 | src_map = &tpr->rx_jmb_buffers[src_idx]; | ||
4417 | skb_size = TG3_RX_JMB_MAP_SZ; | 4461 | skb_size = TG3_RX_JMB_MAP_SZ; |
4418 | break; | 4462 | break; |
4419 | 4463 | ||
@@ -4435,13 +4479,14 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4435 | 4479 | ||
4436 | mapping = pci_map_single(tp->pdev, skb->data, skb_size, | 4480 | mapping = pci_map_single(tp->pdev, skb->data, skb_size, |
4437 | PCI_DMA_FROMDEVICE); | 4481 | PCI_DMA_FROMDEVICE); |
4482 | if (pci_dma_mapping_error(tp->pdev, mapping)) { | ||
4483 | dev_kfree_skb(skb); | ||
4484 | return -EIO; | ||
4485 | } | ||
4438 | 4486 | ||
4439 | map->skb = skb; | 4487 | map->skb = skb; |
4440 | pci_unmap_addr_set(map, mapping, mapping); | 4488 | pci_unmap_addr_set(map, mapping, mapping); |
4441 | 4489 | ||
4442 | if (src_map != NULL) | ||
4443 | src_map->skb = NULL; | ||
4444 | |||
4445 | desc->addr_hi = ((u64)mapping >> 32); | 4490 | desc->addr_hi = ((u64)mapping >> 32); |
4446 | desc->addr_lo = ((u64)mapping & 0xffffffff); | 4491 | desc->addr_lo = ((u64)mapping & 0xffffffff); |
4447 | 4492 | ||
@@ -4452,30 +4497,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, | |||
4452 | * members of the RX descriptor are invariant. See notes above | 4497 | * members of the RX descriptor are invariant. See notes above |
4453 | * tg3_alloc_rx_skb for full details. | 4498 | * tg3_alloc_rx_skb for full details. |
4454 | */ | 4499 | */ |
4455 | static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, | 4500 | static void tg3_recycle_rx(struct tg3_napi *tnapi, |
4456 | int src_idx, u32 dest_idx_unmasked) | 4501 | struct tg3_rx_prodring_set *dpr, |
4502 | u32 opaque_key, int src_idx, | ||
4503 | u32 dest_idx_unmasked) | ||
4457 | { | 4504 | { |
4458 | struct tg3 *tp = tnapi->tp; | 4505 | struct tg3 *tp = tnapi->tp; |
4459 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; | 4506 | struct tg3_rx_buffer_desc *src_desc, *dest_desc; |
4460 | struct ring_info *src_map, *dest_map; | 4507 | struct ring_info *src_map, *dest_map; |
4461 | int dest_idx; | 4508 | int dest_idx; |
4462 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 4509 | struct tg3_rx_prodring_set *spr = &tp->prodring[0]; |
4463 | 4510 | ||
4464 | switch (opaque_key) { | 4511 | switch (opaque_key) { |
4465 | case RXD_OPAQUE_RING_STD: | 4512 | case RXD_OPAQUE_RING_STD: |
4466 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; | 4513 | dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; |
4467 | dest_desc = &tpr->rx_std[dest_idx]; | 4514 | dest_desc = &dpr->rx_std[dest_idx]; |
4468 | dest_map = &tpr->rx_std_buffers[dest_idx]; | 4515 | dest_map = &dpr->rx_std_buffers[dest_idx]; |
4469 | src_desc = &tpr->rx_std[src_idx]; | 4516 | src_desc = &spr->rx_std[src_idx]; |
4470 | src_map = &tpr->rx_std_buffers[src_idx]; | 4517 | src_map = &spr->rx_std_buffers[src_idx]; |
4471 | break; | 4518 | break; |
4472 | 4519 | ||
4473 | case RXD_OPAQUE_RING_JUMBO: | 4520 | case RXD_OPAQUE_RING_JUMBO: |
4474 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; | 4521 | dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; |
4475 | dest_desc = &tpr->rx_jmb[dest_idx].std; | 4522 | dest_desc = &dpr->rx_jmb[dest_idx].std; |
4476 | dest_map = &tpr->rx_jmb_buffers[dest_idx]; | 4523 | dest_map = &dpr->rx_jmb_buffers[dest_idx]; |
4477 | src_desc = &tpr->rx_jmb[src_idx].std; | 4524 | src_desc = &spr->rx_jmb[src_idx].std; |
4478 | src_map = &tpr->rx_jmb_buffers[src_idx]; | 4525 | src_map = &spr->rx_jmb_buffers[src_idx]; |
4479 | break; | 4526 | break; |
4480 | 4527 | ||
4481 | default: | 4528 | default: |
@@ -4487,7 +4534,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, | |||
4487 | pci_unmap_addr(src_map, mapping)); | 4534 | pci_unmap_addr(src_map, mapping)); |
4488 | dest_desc->addr_hi = src_desc->addr_hi; | 4535 | dest_desc->addr_hi = src_desc->addr_hi; |
4489 | dest_desc->addr_lo = src_desc->addr_lo; | 4536 | dest_desc->addr_lo = src_desc->addr_lo; |
4490 | |||
4491 | src_map->skb = NULL; | 4537 | src_map->skb = NULL; |
4492 | } | 4538 | } |
4493 | 4539 | ||
@@ -4519,10 +4565,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4519 | { | 4565 | { |
4520 | struct tg3 *tp = tnapi->tp; | 4566 | struct tg3 *tp = tnapi->tp; |
4521 | u32 work_mask, rx_std_posted = 0; | 4567 | u32 work_mask, rx_std_posted = 0; |
4568 | u32 std_prod_idx, jmb_prod_idx; | ||
4522 | u32 sw_idx = tnapi->rx_rcb_ptr; | 4569 | u32 sw_idx = tnapi->rx_rcb_ptr; |
4523 | u16 hw_idx; | 4570 | u16 hw_idx; |
4524 | int received; | 4571 | int received; |
4525 | struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; | 4572 | struct tg3_rx_prodring_set *tpr = tnapi->prodring; |
4526 | 4573 | ||
4527 | hw_idx = *(tnapi->rx_rcb_prod_idx); | 4574 | hw_idx = *(tnapi->rx_rcb_prod_idx); |
4528 | /* | 4575 | /* |
@@ -4532,7 +4579,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4532 | rmb(); | 4579 | rmb(); |
4533 | work_mask = 0; | 4580 | work_mask = 0; |
4534 | received = 0; | 4581 | received = 0; |
4582 | std_prod_idx = tpr->rx_std_prod_idx; | ||
4583 | jmb_prod_idx = tpr->rx_jmb_prod_idx; | ||
4535 | while (sw_idx != hw_idx && budget > 0) { | 4584 | while (sw_idx != hw_idx && budget > 0) { |
4585 | struct ring_info *ri; | ||
4536 | struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; | 4586 | struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; |
4537 | unsigned int len; | 4587 | unsigned int len; |
4538 | struct sk_buff *skb; | 4588 | struct sk_buff *skb; |
@@ -4542,16 +4592,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4542 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; | 4592 | desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; |
4543 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; | 4593 | opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; |
4544 | if (opaque_key == RXD_OPAQUE_RING_STD) { | 4594 | if (opaque_key == RXD_OPAQUE_RING_STD) { |
4545 | struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; | 4595 | ri = &tp->prodring[0].rx_std_buffers[desc_idx]; |
4546 | dma_addr = pci_unmap_addr(ri, mapping); | 4596 | dma_addr = pci_unmap_addr(ri, mapping); |
4547 | skb = ri->skb; | 4597 | skb = ri->skb; |
4548 | post_ptr = &tpr->rx_std_ptr; | 4598 | post_ptr = &std_prod_idx; |
4549 | rx_std_posted++; | 4599 | rx_std_posted++; |
4550 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { | 4600 | } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { |
4551 | struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; | 4601 | ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; |
4552 | dma_addr = pci_unmap_addr(ri, mapping); | 4602 | dma_addr = pci_unmap_addr(ri, mapping); |
4553 | skb = ri->skb; | 4603 | skb = ri->skb; |
4554 | post_ptr = &tpr->rx_jmb_ptr; | 4604 | post_ptr = &jmb_prod_idx; |
4555 | } else | 4605 | } else |
4556 | goto next_pkt_nopost; | 4606 | goto next_pkt_nopost; |
4557 | 4607 | ||
@@ -4560,7 +4610,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4560 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && | 4610 | if ((desc->err_vlan & RXD_ERR_MASK) != 0 && |
4561 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { | 4611 | (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { |
4562 | drop_it: | 4612 | drop_it: |
4563 | tg3_recycle_rx(tnapi, opaque_key, | 4613 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4564 | desc_idx, *post_ptr); | 4614 | desc_idx, *post_ptr); |
4565 | drop_it_no_recycle: | 4615 | drop_it_no_recycle: |
4566 | /* Other statistics kept track of by card. */ | 4616 | /* Other statistics kept track of by card. */ |
@@ -4571,20 +4621,21 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4571 | len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - | 4621 | len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - |
4572 | ETH_FCS_LEN; | 4622 | ETH_FCS_LEN; |
4573 | 4623 | ||
4574 | if (len > RX_COPY_THRESHOLD | 4624 | if (len > RX_COPY_THRESHOLD && |
4575 | && tp->rx_offset == NET_IP_ALIGN | 4625 | tp->rx_offset == NET_IP_ALIGN) { |
4576 | /* rx_offset will likely not equal NET_IP_ALIGN | 4626 | /* rx_offset will likely not equal NET_IP_ALIGN |
4577 | * if this is a 5701 card running in PCI-X mode | 4627 | * if this is a 5701 card running in PCI-X mode |
4578 | * [see tg3_get_invariants()] | 4628 | * [see tg3_get_invariants()] |
4579 | */ | 4629 | */ |
4580 | ) { | ||
4581 | int skb_size; | 4630 | int skb_size; |
4582 | 4631 | ||
4583 | skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, | 4632 | skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, |
4584 | desc_idx, *post_ptr); | 4633 | *post_ptr); |
4585 | if (skb_size < 0) | 4634 | if (skb_size < 0) |
4586 | goto drop_it; | 4635 | goto drop_it; |
4587 | 4636 | ||
4637 | ri->skb = NULL; | ||
4638 | |||
4588 | pci_unmap_single(tp->pdev, dma_addr, skb_size, | 4639 | pci_unmap_single(tp->pdev, dma_addr, skb_size, |
4589 | PCI_DMA_FROMDEVICE); | 4640 | PCI_DMA_FROMDEVICE); |
4590 | 4641 | ||
@@ -4592,7 +4643,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) | |||
4592 | } else { | 4643 | } else { |
4593 | struct sk_buff *copy_skb; | 4644 | struct sk_buff *copy_skb; |
4594 | 4645 | ||
4595 | tg3_recycle_rx(tnapi, opaque_key, | 4646 | tg3_recycle_rx(tnapi, tpr, opaque_key, |
4596 | desc_idx, *post_ptr); | 4647 | desc_idx, *post_ptr); |
4597 | 4648 | ||
4598 | copy_skb = netdev_alloc_skb(tp->dev, | 4649 | copy_skb = netdev_alloc_skb(tp->dev, |
@@ -4643,9 +4694,7 @@ next_pkt: | |||
4643 | 4694 | ||
4644 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { | 4695 | if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { |
4645 | u32 idx = *post_ptr % TG3_RX_RING_SIZE; | 4696 | u32 idx = *post_ptr % TG3_RX_RING_SIZE; |
4646 | 4697 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx); | |
4647 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + | ||
4648 | TG3_64BIT_REG_LOW, idx); | ||
4649 | work_mask &= ~RXD_OPAQUE_RING_STD; | 4698 | work_mask &= ~RXD_OPAQUE_RING_STD; |
4650 | rx_std_posted = 0; | 4699 | rx_std_posted = 0; |
4651 | } | 4700 | } |
@@ -4665,33 +4714,45 @@ next_pkt_nopost: | |||
4665 | tw32_rx_mbox(tnapi->consmbox, sw_idx); | 4714 | tw32_rx_mbox(tnapi->consmbox, sw_idx); |
4666 | 4715 | ||
4667 | /* Refill RX ring(s). */ | 4716 | /* Refill RX ring(s). */ |
4668 | if (work_mask & RXD_OPAQUE_RING_STD) { | 4717 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) { |
4669 | sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; | 4718 | if (work_mask & RXD_OPAQUE_RING_STD) { |
4670 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, | 4719 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; |
4671 | sw_idx); | 4720 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, |
4672 | } | 4721 | tpr->rx_std_prod_idx); |
4673 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { | 4722 | } |
4674 | sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; | 4723 | if (work_mask & RXD_OPAQUE_RING_JUMBO) { |
4675 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, | 4724 | tpr->rx_jmb_prod_idx = jmb_prod_idx % |
4676 | sw_idx); | 4725 | TG3_RX_JUMBO_RING_SIZE; |
4726 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, | ||
4727 | tpr->rx_jmb_prod_idx); | ||
4728 | } | ||
4729 | mmiowb(); | ||
4730 | } else if (work_mask) { | ||
4731 | /* rx_std_buffers[] and rx_jmb_buffers[] entries must be | ||
4732 | * updated before the producer indices can be updated. | ||
4733 | */ | ||
4734 | smp_wmb(); | ||
4735 | |||
4736 | tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; | ||
4737 | tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; | ||
4738 | |||
4739 | napi_schedule(&tp->napi[1].napi); | ||
4677 | } | 4740 | } |
4678 | mmiowb(); | ||
4679 | 4741 | ||
4680 | return received; | 4742 | return received; |
4681 | } | 4743 | } |
4682 | 4744 | ||
4683 | static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | 4745 | static void tg3_poll_link(struct tg3 *tp) |
4684 | { | 4746 | { |
4685 | struct tg3 *tp = tnapi->tp; | ||
4686 | struct tg3_hw_status *sblk = tnapi->hw_status; | ||
4687 | |||
4688 | /* handle link change and other phy events */ | 4747 | /* handle link change and other phy events */ |
4689 | if (!(tp->tg3_flags & | 4748 | if (!(tp->tg3_flags & |
4690 | (TG3_FLAG_USE_LINKCHG_REG | | 4749 | (TG3_FLAG_USE_LINKCHG_REG | |
4691 | TG3_FLAG_POLL_SERDES))) { | 4750 | TG3_FLAG_POLL_SERDES))) { |
4751 | struct tg3_hw_status *sblk = tp->napi[0].hw_status; | ||
4752 | |||
4692 | if (sblk->status & SD_STATUS_LINK_CHG) { | 4753 | if (sblk->status & SD_STATUS_LINK_CHG) { |
4693 | sblk->status = SD_STATUS_UPDATED | | 4754 | sblk->status = SD_STATUS_UPDATED | |
4694 | (sblk->status & ~SD_STATUS_LINK_CHG); | 4755 | (sblk->status & ~SD_STATUS_LINK_CHG); |
4695 | spin_lock(&tp->lock); | 4756 | spin_lock(&tp->lock); |
4696 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 4757 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
4697 | tw32_f(MAC_STATUS, | 4758 | tw32_f(MAC_STATUS, |
@@ -4705,6 +4766,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | |||
4705 | spin_unlock(&tp->lock); | 4766 | spin_unlock(&tp->lock); |
4706 | } | 4767 | } |
4707 | } | 4768 | } |
4769 | } | ||
4770 | |||
4771 | static void tg3_rx_prodring_xfer(struct tg3 *tp, | ||
4772 | struct tg3_rx_prodring_set *dpr, | ||
4773 | struct tg3_rx_prodring_set *spr) | ||
4774 | { | ||
4775 | u32 si, di, cpycnt, src_prod_idx; | ||
4776 | int i; | ||
4777 | |||
4778 | while (1) { | ||
4779 | src_prod_idx = spr->rx_std_prod_idx; | ||
4780 | |||
4781 | /* Make sure updates to the rx_std_buffers[] entries and the | ||
4782 | * standard producer index are seen in the correct order. | ||
4783 | */ | ||
4784 | smp_rmb(); | ||
4785 | |||
4786 | if (spr->rx_std_cons_idx == src_prod_idx) | ||
4787 | break; | ||
4788 | |||
4789 | if (spr->rx_std_cons_idx < src_prod_idx) | ||
4790 | cpycnt = src_prod_idx - spr->rx_std_cons_idx; | ||
4791 | else | ||
4792 | cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; | ||
4793 | |||
4794 | cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); | ||
4795 | |||
4796 | si = spr->rx_std_cons_idx; | ||
4797 | di = dpr->rx_std_prod_idx; | ||
4798 | |||
4799 | memcpy(&dpr->rx_std_buffers[di], | ||
4800 | &spr->rx_std_buffers[si], | ||
4801 | cpycnt * sizeof(struct ring_info)); | ||
4802 | |||
4803 | for (i = 0; i < cpycnt; i++, di++, si++) { | ||
4804 | struct tg3_rx_buffer_desc *sbd, *dbd; | ||
4805 | sbd = &spr->rx_std[si]; | ||
4806 | dbd = &dpr->rx_std[di]; | ||
4807 | dbd->addr_hi = sbd->addr_hi; | ||
4808 | dbd->addr_lo = sbd->addr_lo; | ||
4809 | } | ||
4810 | |||
4811 | spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % | ||
4812 | TG3_RX_RING_SIZE; | ||
4813 | dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % | ||
4814 | TG3_RX_RING_SIZE; | ||
4815 | } | ||
4816 | |||
4817 | while (1) { | ||
4818 | src_prod_idx = spr->rx_jmb_prod_idx; | ||
4819 | |||
4820 | /* Make sure updates to the rx_jmb_buffers[] entries and | ||
4821 | * the jumbo producer index are seen in the correct order. | ||
4822 | */ | ||
4823 | smp_rmb(); | ||
4824 | |||
4825 | if (spr->rx_jmb_cons_idx == src_prod_idx) | ||
4826 | break; | ||
4827 | |||
4828 | if (spr->rx_jmb_cons_idx < src_prod_idx) | ||
4829 | cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; | ||
4830 | else | ||
4831 | cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; | ||
4832 | |||
4833 | cpycnt = min(cpycnt, | ||
4834 | TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); | ||
4835 | |||
4836 | si = spr->rx_jmb_cons_idx; | ||
4837 | di = dpr->rx_jmb_prod_idx; | ||
4838 | |||
4839 | memcpy(&dpr->rx_jmb_buffers[di], | ||
4840 | &spr->rx_jmb_buffers[si], | ||
4841 | cpycnt * sizeof(struct ring_info)); | ||
4842 | |||
4843 | for (i = 0; i < cpycnt; i++, di++, si++) { | ||
4844 | struct tg3_rx_buffer_desc *sbd, *dbd; | ||
4845 | sbd = &spr->rx_jmb[si].std; | ||
4846 | dbd = &dpr->rx_jmb[di].std; | ||
4847 | dbd->addr_hi = sbd->addr_hi; | ||
4848 | dbd->addr_lo = sbd->addr_lo; | ||
4849 | } | ||
4850 | |||
4851 | spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % | ||
4852 | TG3_RX_JUMBO_RING_SIZE; | ||
4853 | dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % | ||
4854 | TG3_RX_JUMBO_RING_SIZE; | ||
4855 | } | ||
4856 | } | ||
4857 | |||
4858 | static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | ||
4859 | { | ||
4860 | struct tg3 *tp = tnapi->tp; | ||
4708 | 4861 | ||
4709 | /* run TX completion thread */ | 4862 | /* run TX completion thread */ |
4710 | if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { | 4863 | if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { |
@@ -4720,6 +4873,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | |||
4720 | if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) | 4873 | if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) |
4721 | work_done += tg3_rx(tnapi, budget - work_done); | 4874 | work_done += tg3_rx(tnapi, budget - work_done); |
4722 | 4875 | ||
4876 | if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { | ||
4877 | int i; | ||
4878 | u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx; | ||
4879 | u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx; | ||
4880 | |||
4881 | for (i = 2; i < tp->irq_cnt; i++) | ||
4882 | tg3_rx_prodring_xfer(tp, tnapi->prodring, | ||
4883 | tp->napi[i].prodring); | ||
4884 | |||
4885 | wmb(); | ||
4886 | |||
4887 | if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) { | ||
4888 | u32 mbox = TG3_RX_STD_PROD_IDX_REG; | ||
4889 | tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx); | ||
4890 | } | ||
4891 | |||
4892 | if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) { | ||
4893 | u32 mbox = TG3_RX_JMB_PROD_IDX_REG; | ||
4894 | tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx); | ||
4895 | } | ||
4896 | |||
4897 | mmiowb(); | ||
4898 | } | ||
4899 | |||
4900 | return work_done; | ||
4901 | } | ||
4902 | |||
4903 | static int tg3_poll_msix(struct napi_struct *napi, int budget) | ||
4904 | { | ||
4905 | struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); | ||
4906 | struct tg3 *tp = tnapi->tp; | ||
4907 | int work_done = 0; | ||
4908 | struct tg3_hw_status *sblk = tnapi->hw_status; | ||
4909 | |||
4910 | while (1) { | ||
4911 | work_done = tg3_poll_work(tnapi, work_done, budget); | ||
4912 | |||
4913 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | ||
4914 | goto tx_recovery; | ||
4915 | |||
4916 | if (unlikely(work_done >= budget)) | ||
4917 | break; | ||
4918 | |||
4919 | /* tp->last_tag is used in tg3_restart_ints() below | ||
4920 | * to tell the hw how much work has been processed, | ||
4921 | * so we must read it before checking for more work. | ||
4922 | */ | ||
4923 | tnapi->last_tag = sblk->status_tag; | ||
4924 | tnapi->last_irq_tag = tnapi->last_tag; | ||
4925 | rmb(); | ||
4926 | |||
4927 | /* check for RX/TX work to do */ | ||
4928 | if (sblk->idx[0].tx_consumer == tnapi->tx_cons && | ||
4929 | *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { | ||
4930 | napi_complete(napi); | ||
4931 | /* Reenable interrupts. */ | ||
4932 | tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); | ||
4933 | mmiowb(); | ||
4934 | break; | ||
4935 | } | ||
4936 | } | ||
4937 | |||
4938 | return work_done; | ||
4939 | |||
4940 | tx_recovery: | ||
4941 | /* work_done is guaranteed to be less than budget. */ | ||
4942 | napi_complete(napi); | ||
4943 | schedule_work(&tp->reset_task); | ||
4723 | return work_done; | 4944 | return work_done; |
4724 | } | 4945 | } |
4725 | 4946 | ||
@@ -4731,6 +4952,8 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
4731 | struct tg3_hw_status *sblk = tnapi->hw_status; | 4952 | struct tg3_hw_status *sblk = tnapi->hw_status; |
4732 | 4953 | ||
4733 | while (1) { | 4954 | while (1) { |
4955 | tg3_poll_link(tp); | ||
4956 | |||
4734 | work_done = tg3_poll_work(tnapi, work_done, budget); | 4957 | work_done = tg3_poll_work(tnapi, work_done, budget); |
4735 | 4958 | ||
4736 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) | 4959 | if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) |
@@ -5093,11 +5316,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, | |||
5093 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); | 5316 | static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); |
5094 | 5317 | ||
5095 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ | 5318 | /* Workaround 4GB and 40-bit hardware DMA bugs. */ |
5096 | static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | 5319 | static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, |
5097 | u32 last_plus_one, u32 *start, | 5320 | struct sk_buff *skb, u32 last_plus_one, |
5098 | u32 base_flags, u32 mss) | 5321 | u32 *start, u32 base_flags, u32 mss) |
5099 | { | 5322 | { |
5100 | struct tg3_napi *tnapi = &tp->napi[0]; | 5323 | struct tg3 *tp = tnapi->tp; |
5101 | struct sk_buff *new_skb; | 5324 | struct sk_buff *new_skb; |
5102 | dma_addr_t new_addr = 0; | 5325 | dma_addr_t new_addr = 0; |
5103 | u32 entry = *start; | 5326 | u32 entry = *start; |
@@ -5118,16 +5341,21 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
5118 | } else { | 5341 | } else { |
5119 | /* New SKB is guaranteed to be linear. */ | 5342 | /* New SKB is guaranteed to be linear. */ |
5120 | entry = *start; | 5343 | entry = *start; |
5121 | ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE); | 5344 | new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, |
5122 | new_addr = skb_shinfo(new_skb)->dma_head; | 5345 | PCI_DMA_TODEVICE); |
5346 | /* Make sure the mapping succeeded */ | ||
5347 | if (pci_dma_mapping_error(tp->pdev, new_addr)) { | ||
5348 | ret = -1; | ||
5349 | dev_kfree_skb(new_skb); | ||
5350 | new_skb = NULL; | ||
5123 | 5351 | ||
5124 | /* Make sure new skb does not cross any 4G boundaries. | 5352 | /* Make sure new skb does not cross any 4G boundaries. |
5125 | * Drop the packet if it does. | 5353 | * Drop the packet if it does. |
5126 | */ | 5354 | */ |
5127 | if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { | 5355 | } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && |
5128 | if (!ret) | 5356 | tg3_4g_overflow_test(new_addr, new_skb->len)) { |
5129 | skb_dma_unmap(&tp->pdev->dev, new_skb, | 5357 | pci_unmap_single(tp->pdev, new_addr, new_skb->len, |
5130 | DMA_TO_DEVICE); | 5358 | PCI_DMA_TODEVICE); |
5131 | ret = -1; | 5359 | ret = -1; |
5132 | dev_kfree_skb(new_skb); | 5360 | dev_kfree_skb(new_skb); |
5133 | new_skb = NULL; | 5361 | new_skb = NULL; |
@@ -5141,15 +5369,28 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, | |||
5141 | /* Now clean up the sw ring entries. */ | 5369 | /* Now clean up the sw ring entries. */ |
5142 | i = 0; | 5370 | i = 0; |
5143 | while (entry != last_plus_one) { | 5371 | while (entry != last_plus_one) { |
5372 | int len; | ||
5373 | |||
5144 | if (i == 0) | 5374 | if (i == 0) |
5145 | tnapi->tx_buffers[entry].skb = new_skb; | 5375 | len = skb_headlen(skb); |
5146 | else | 5376 | else |
5377 | len = skb_shinfo(skb)->frags[i-1].size; | ||
5378 | |||
5379 | pci_unmap_single(tp->pdev, | ||
5380 | pci_unmap_addr(&tnapi->tx_buffers[entry], | ||
5381 | mapping), | ||
5382 | len, PCI_DMA_TODEVICE); | ||
5383 | if (i == 0) { | ||
5384 | tnapi->tx_buffers[entry].skb = new_skb; | ||
5385 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5386 | new_addr); | ||
5387 | } else { | ||
5147 | tnapi->tx_buffers[entry].skb = NULL; | 5388 | tnapi->tx_buffers[entry].skb = NULL; |
5389 | } | ||
5148 | entry = NEXT_TX(entry); | 5390 | entry = NEXT_TX(entry); |
5149 | i++; | 5391 | i++; |
5150 | } | 5392 | } |
5151 | 5393 | ||
5152 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | ||
5153 | dev_kfree_skb(skb); | 5394 | dev_kfree_skb(skb); |
5154 | 5395 | ||
5155 | return ret; | 5396 | return ret; |
@@ -5179,21 +5420,22 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry, | |||
5179 | } | 5420 | } |
5180 | 5421 | ||
5181 | /* hard_start_xmit for devices that don't have any bugs and | 5422 | /* hard_start_xmit for devices that don't have any bugs and |
5182 | * support TG3_FLG2_HW_TSO_2 only. | 5423 | * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only. |
5183 | */ | 5424 | */ |
5184 | static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | 5425 | static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, |
5185 | struct net_device *dev) | 5426 | struct net_device *dev) |
5186 | { | 5427 | { |
5187 | struct tg3 *tp = netdev_priv(dev); | 5428 | struct tg3 *tp = netdev_priv(dev); |
5188 | u32 len, entry, base_flags, mss; | 5429 | u32 len, entry, base_flags, mss; |
5189 | struct skb_shared_info *sp; | ||
5190 | dma_addr_t mapping; | 5430 | dma_addr_t mapping; |
5191 | struct tg3_napi *tnapi; | 5431 | struct tg3_napi *tnapi; |
5192 | struct netdev_queue *txq; | 5432 | struct netdev_queue *txq; |
5433 | unsigned int i, last; | ||
5434 | |||
5193 | 5435 | ||
5194 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 5436 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
5195 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | 5437 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; |
5196 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 5438 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
5197 | tnapi++; | 5439 | tnapi++; |
5198 | 5440 | ||
5199 | /* We are running in BH disabled context with netif_tx_lock | 5441 | /* We are running in BH disabled context with netif_tx_lock |
@@ -5238,7 +5480,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5238 | hdrlen = ip_tcp_len + tcp_opt_len; | 5480 | hdrlen = ip_tcp_len + tcp_opt_len; |
5239 | } | 5481 | } |
5240 | 5482 | ||
5241 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 5483 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { |
5242 | mss |= (hdrlen & 0xc) << 12; | 5484 | mss |= (hdrlen & 0xc) << 12; |
5243 | if (hdrlen & 0x10) | 5485 | if (hdrlen & 0x10) |
5244 | base_flags |= 0x00000010; | 5486 | base_flags |= 0x00000010; |
@@ -5260,20 +5502,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5260 | (vlan_tx_tag_get(skb) << 16)); | 5502 | (vlan_tx_tag_get(skb) << 16)); |
5261 | #endif | 5503 | #endif |
5262 | 5504 | ||
5263 | if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { | 5505 | len = skb_headlen(skb); |
5506 | |||
5507 | /* Queue skb data, a.k.a. the main skb fragment. */ | ||
5508 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
5509 | if (pci_dma_mapping_error(tp->pdev, mapping)) { | ||
5264 | dev_kfree_skb(skb); | 5510 | dev_kfree_skb(skb); |
5265 | goto out_unlock; | 5511 | goto out_unlock; |
5266 | } | 5512 | } |
5267 | 5513 | ||
5268 | sp = skb_shinfo(skb); | ||
5269 | |||
5270 | mapping = sp->dma_head; | ||
5271 | |||
5272 | tnapi->tx_buffers[entry].skb = skb; | 5514 | tnapi->tx_buffers[entry].skb = skb; |
5515 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); | ||
5273 | 5516 | ||
5274 | len = skb_headlen(skb); | 5517 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && |
5275 | |||
5276 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | ||
5277 | !mss && skb->len > ETH_DATA_LEN) | 5518 | !mss && skb->len > ETH_DATA_LEN) |
5278 | base_flags |= TXD_FLAG_JMB_PKT; | 5519 | base_flags |= TXD_FLAG_JMB_PKT; |
5279 | 5520 | ||
@@ -5284,15 +5525,21 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, | |||
5284 | 5525 | ||
5285 | /* Now loop through additional data fragments, and queue them. */ | 5526 | /* Now loop through additional data fragments, and queue them. */ |
5286 | if (skb_shinfo(skb)->nr_frags > 0) { | 5527 | if (skb_shinfo(skb)->nr_frags > 0) { |
5287 | unsigned int i, last; | ||
5288 | |||
5289 | last = skb_shinfo(skb)->nr_frags - 1; | 5528 | last = skb_shinfo(skb)->nr_frags - 1; |
5290 | for (i = 0; i <= last; i++) { | 5529 | for (i = 0; i <= last; i++) { |
5291 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 5530 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
5292 | 5531 | ||
5293 | len = frag->size; | 5532 | len = frag->size; |
5294 | mapping = sp->dma_maps[i]; | 5533 | mapping = pci_map_page(tp->pdev, |
5534 | frag->page, | ||
5535 | frag->page_offset, | ||
5536 | len, PCI_DMA_TODEVICE); | ||
5537 | if (pci_dma_mapping_error(tp->pdev, mapping)) | ||
5538 | goto dma_error; | ||
5539 | |||
5295 | tnapi->tx_buffers[entry].skb = NULL; | 5540 | tnapi->tx_buffers[entry].skb = NULL; |
5541 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5542 | mapping); | ||
5296 | 5543 | ||
5297 | tg3_set_txd(tnapi, entry, mapping, len, | 5544 | tg3_set_txd(tnapi, entry, mapping, len, |
5298 | base_flags, (i == last) | (mss << 1)); | 5545 | base_flags, (i == last) | (mss << 1)); |
@@ -5315,6 +5562,27 @@ out_unlock: | |||
5315 | mmiowb(); | 5562 | mmiowb(); |
5316 | 5563 | ||
5317 | return NETDEV_TX_OK; | 5564 | return NETDEV_TX_OK; |
5565 | |||
5566 | dma_error: | ||
5567 | last = i; | ||
5568 | entry = tnapi->tx_prod; | ||
5569 | tnapi->tx_buffers[entry].skb = NULL; | ||
5570 | pci_unmap_single(tp->pdev, | ||
5571 | pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), | ||
5572 | skb_headlen(skb), | ||
5573 | PCI_DMA_TODEVICE); | ||
5574 | for (i = 0; i <= last; i++) { | ||
5575 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5576 | entry = NEXT_TX(entry); | ||
5577 | |||
5578 | pci_unmap_page(tp->pdev, | ||
5579 | pci_unmap_addr(&tnapi->tx_buffers[entry], | ||
5580 | mapping), | ||
5581 | frag->size, PCI_DMA_TODEVICE); | ||
5582 | } | ||
5583 | |||
5584 | dev_kfree_skb(skb); | ||
5585 | return NETDEV_TX_OK; | ||
5318 | } | 5586 | } |
5319 | 5587 | ||
5320 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, | 5588 | static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, |
@@ -5362,12 +5630,17 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5362 | { | 5630 | { |
5363 | struct tg3 *tp = netdev_priv(dev); | 5631 | struct tg3 *tp = netdev_priv(dev); |
5364 | u32 len, entry, base_flags, mss; | 5632 | u32 len, entry, base_flags, mss; |
5365 | struct skb_shared_info *sp; | ||
5366 | int would_hit_hwbug; | 5633 | int would_hit_hwbug; |
5367 | dma_addr_t mapping; | 5634 | dma_addr_t mapping; |
5368 | struct tg3_napi *tnapi = &tp->napi[0]; | 5635 | struct tg3_napi *tnapi; |
5636 | struct netdev_queue *txq; | ||
5637 | unsigned int i, last; | ||
5369 | 5638 | ||
5370 | len = skb_headlen(skb); | 5639 | |
5640 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
5641 | tnapi = &tp->napi[skb_get_queue_mapping(skb)]; | ||
5642 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) | ||
5643 | tnapi++; | ||
5371 | 5644 | ||
5372 | /* We are running in BH disabled context with netif_tx_lock | 5645 | /* We are running in BH disabled context with netif_tx_lock |
5373 | * and TX reclaim runs via tp->napi.poll inside of a software | 5646 | * and TX reclaim runs via tp->napi.poll inside of a software |
@@ -5375,8 +5648,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5375 | * no IRQ context deadlocks to worry about either. Rejoice! | 5648 | * no IRQ context deadlocks to worry about either. Rejoice! |
5376 | */ | 5649 | */ |
5377 | if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { | 5650 | if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { |
5378 | if (!netif_queue_stopped(dev)) { | 5651 | if (!netif_tx_queue_stopped(txq)) { |
5379 | netif_stop_queue(dev); | 5652 | netif_tx_stop_queue(txq); |
5380 | 5653 | ||
5381 | /* This is a hard error, log it. */ | 5654 | /* This is a hard error, log it. */ |
5382 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | 5655 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " |
@@ -5389,10 +5662,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5389 | base_flags = 0; | 5662 | base_flags = 0; |
5390 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 5663 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
5391 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | 5664 | base_flags |= TXD_FLAG_TCPUDP_CSUM; |
5392 | mss = 0; | 5665 | |
5393 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { | 5666 | if ((mss = skb_shinfo(skb)->gso_size) != 0) { |
5394 | struct iphdr *iph; | 5667 | struct iphdr *iph; |
5395 | int tcp_opt_len, ip_tcp_len, hdr_len; | 5668 | u32 tcp_opt_len, ip_tcp_len, hdr_len; |
5396 | 5669 | ||
5397 | if (skb_header_cloned(skb) && | 5670 | if (skb_header_cloned(skb) && |
5398 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | 5671 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { |
@@ -5423,8 +5696,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5423 | IPPROTO_TCP, | 5696 | IPPROTO_TCP, |
5424 | 0); | 5697 | 0); |
5425 | 5698 | ||
5426 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || | 5699 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) { |
5427 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { | 5700 | mss |= (hdr_len & 0xc) << 12; |
5701 | if (hdr_len & 0x10) | ||
5702 | base_flags |= 0x00000010; | ||
5703 | base_flags |= (hdr_len & 0x3e0) << 5; | ||
5704 | } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) | ||
5705 | mss |= hdr_len << 9; | ||
5706 | else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) || | ||
5707 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { | ||
5428 | if (tcp_opt_len || iph->ihl > 5) { | 5708 | if (tcp_opt_len || iph->ihl > 5) { |
5429 | int tsflags; | 5709 | int tsflags; |
5430 | 5710 | ||
@@ -5446,22 +5726,35 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5446 | (vlan_tx_tag_get(skb) << 16)); | 5726 | (vlan_tx_tag_get(skb) << 16)); |
5447 | #endif | 5727 | #endif |
5448 | 5728 | ||
5449 | if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { | 5729 | if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && |
5730 | !mss && skb->len > ETH_DATA_LEN) | ||
5731 | base_flags |= TXD_FLAG_JMB_PKT; | ||
5732 | |||
5733 | len = skb_headlen(skb); | ||
5734 | |||
5735 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
5736 | if (pci_dma_mapping_error(tp->pdev, mapping)) { | ||
5450 | dev_kfree_skb(skb); | 5737 | dev_kfree_skb(skb); |
5451 | goto out_unlock; | 5738 | goto out_unlock; |
5452 | } | 5739 | } |
5453 | 5740 | ||
5454 | sp = skb_shinfo(skb); | ||
5455 | |||
5456 | mapping = sp->dma_head; | ||
5457 | |||
5458 | tnapi->tx_buffers[entry].skb = skb; | 5741 | tnapi->tx_buffers[entry].skb = skb; |
5742 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); | ||
5459 | 5743 | ||
5460 | would_hit_hwbug = 0; | 5744 | would_hit_hwbug = 0; |
5461 | 5745 | ||
5462 | if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) | 5746 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8) |
5463 | would_hit_hwbug = 1; | 5747 | would_hit_hwbug = 1; |
5464 | else if (tg3_4g_overflow_test(mapping, len)) | 5748 | |
5749 | if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && | ||
5750 | tg3_4g_overflow_test(mapping, len)) | ||
5751 | would_hit_hwbug = 1; | ||
5752 | |||
5753 | if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && | ||
5754 | tg3_40bit_overflow_test(tp, mapping, len)) | ||
5755 | would_hit_hwbug = 1; | ||
5756 | |||
5757 | if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) | ||
5465 | would_hit_hwbug = 1; | 5758 | would_hit_hwbug = 1; |
5466 | 5759 | ||
5467 | tg3_set_txd(tnapi, entry, mapping, len, base_flags, | 5760 | tg3_set_txd(tnapi, entry, mapping, len, base_flags, |
@@ -5471,21 +5764,32 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5471 | 5764 | ||
5472 | /* Now loop through additional data fragments, and queue them. */ | 5765 | /* Now loop through additional data fragments, and queue them. */ |
5473 | if (skb_shinfo(skb)->nr_frags > 0) { | 5766 | if (skb_shinfo(skb)->nr_frags > 0) { |
5474 | unsigned int i, last; | ||
5475 | |||
5476 | last = skb_shinfo(skb)->nr_frags - 1; | 5767 | last = skb_shinfo(skb)->nr_frags - 1; |
5477 | for (i = 0; i <= last; i++) { | 5768 | for (i = 0; i <= last; i++) { |
5478 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 5769 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
5479 | 5770 | ||
5480 | len = frag->size; | 5771 | len = frag->size; |
5481 | mapping = sp->dma_maps[i]; | 5772 | mapping = pci_map_page(tp->pdev, |
5773 | frag->page, | ||
5774 | frag->page_offset, | ||
5775 | len, PCI_DMA_TODEVICE); | ||
5482 | 5776 | ||
5483 | tnapi->tx_buffers[entry].skb = NULL; | 5777 | tnapi->tx_buffers[entry].skb = NULL; |
5778 | pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, | ||
5779 | mapping); | ||
5780 | if (pci_dma_mapping_error(tp->pdev, mapping)) | ||
5781 | goto dma_error; | ||
5484 | 5782 | ||
5485 | if (tg3_4g_overflow_test(mapping, len)) | 5783 | if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && |
5784 | len <= 8) | ||
5486 | would_hit_hwbug = 1; | 5785 | would_hit_hwbug = 1; |
5487 | 5786 | ||
5488 | if (tg3_40bit_overflow_test(tp, mapping, len)) | 5787 | if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) && |
5788 | tg3_4g_overflow_test(mapping, len)) | ||
5789 | would_hit_hwbug = 1; | ||
5790 | |||
5791 | if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) && | ||
5792 | tg3_40bit_overflow_test(tp, mapping, len)) | ||
5489 | would_hit_hwbug = 1; | 5793 | would_hit_hwbug = 1; |
5490 | 5794 | ||
5491 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 5795 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
@@ -5509,7 +5813,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5509 | /* If the workaround fails due to memory/mapping | 5813 | /* If the workaround fails due to memory/mapping |
5510 | * failure, silently drop this packet. | 5814 | * failure, silently drop this packet. |
5511 | */ | 5815 | */ |
5512 | if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, | 5816 | if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one, |
5513 | &start, base_flags, mss)) | 5817 | &start, base_flags, mss)) |
5514 | goto out_unlock; | 5818 | goto out_unlock; |
5515 | 5819 | ||
@@ -5517,19 +5821,40 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, | |||
5517 | } | 5821 | } |
5518 | 5822 | ||
5519 | /* Packets are ready, update Tx producer idx local and on card. */ | 5823 | /* Packets are ready, update Tx producer idx local and on card. */ |
5520 | tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); | 5824 | tw32_tx_mbox(tnapi->prodmbox, entry); |
5521 | 5825 | ||
5522 | tnapi->tx_prod = entry; | 5826 | tnapi->tx_prod = entry; |
5523 | if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { | 5827 | if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { |
5524 | netif_stop_queue(dev); | 5828 | netif_tx_stop_queue(txq); |
5525 | if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) | 5829 | if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) |
5526 | netif_wake_queue(tp->dev); | 5830 | netif_tx_wake_queue(txq); |
5527 | } | 5831 | } |
5528 | 5832 | ||
5529 | out_unlock: | 5833 | out_unlock: |
5530 | mmiowb(); | 5834 | mmiowb(); |
5531 | 5835 | ||
5532 | return NETDEV_TX_OK; | 5836 | return NETDEV_TX_OK; |
5837 | |||
5838 | dma_error: | ||
5839 | last = i; | ||
5840 | entry = tnapi->tx_prod; | ||
5841 | tnapi->tx_buffers[entry].skb = NULL; | ||
5842 | pci_unmap_single(tp->pdev, | ||
5843 | pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), | ||
5844 | skb_headlen(skb), | ||
5845 | PCI_DMA_TODEVICE); | ||
5846 | for (i = 0; i <= last; i++) { | ||
5847 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
5848 | entry = NEXT_TX(entry); | ||
5849 | |||
5850 | pci_unmap_page(tp->pdev, | ||
5851 | pci_unmap_addr(&tnapi->tx_buffers[entry], | ||
5852 | mapping), | ||
5853 | frag->size, PCI_DMA_TODEVICE); | ||
5854 | } | ||
5855 | |||
5856 | dev_kfree_skb(skb); | ||
5857 | return NETDEV_TX_OK; | ||
5533 | } | 5858 | } |
5534 | 5859 | ||
5535 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | 5860 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, |
@@ -5594,36 +5919,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp, | |||
5594 | struct tg3_rx_prodring_set *tpr) | 5919 | struct tg3_rx_prodring_set *tpr) |
5595 | { | 5920 | { |
5596 | int i; | 5921 | int i; |
5597 | struct ring_info *rxp; | ||
5598 | |||
5599 | for (i = 0; i < TG3_RX_RING_SIZE; i++) { | ||
5600 | rxp = &tpr->rx_std_buffers[i]; | ||
5601 | 5922 | ||
5602 | if (rxp->skb == NULL) | 5923 | if (tpr != &tp->prodring[0]) { |
5603 | continue; | 5924 | for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; |
5925 | i = (i + 1) % TG3_RX_RING_SIZE) | ||
5926 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], | ||
5927 | tp->rx_pkt_map_sz); | ||
5928 | |||
5929 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | ||
5930 | for (i = tpr->rx_jmb_cons_idx; | ||
5931 | i != tpr->rx_jmb_prod_idx; | ||
5932 | i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { | ||
5933 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], | ||
5934 | TG3_RX_JMB_MAP_SZ); | ||
5935 | } | ||
5936 | } | ||
5604 | 5937 | ||
5605 | pci_unmap_single(tp->pdev, | 5938 | return; |
5606 | pci_unmap_addr(rxp, mapping), | ||
5607 | tp->rx_pkt_map_sz, | ||
5608 | PCI_DMA_FROMDEVICE); | ||
5609 | dev_kfree_skb_any(rxp->skb); | ||
5610 | rxp->skb = NULL; | ||
5611 | } | 5939 | } |
5612 | 5940 | ||
5613 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 5941 | for (i = 0; i < TG3_RX_RING_SIZE; i++) |
5614 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { | 5942 | tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], |
5615 | rxp = &tpr->rx_jmb_buffers[i]; | 5943 | tp->rx_pkt_map_sz); |
5616 | |||
5617 | if (rxp->skb == NULL) | ||
5618 | continue; | ||
5619 | 5944 | ||
5620 | pci_unmap_single(tp->pdev, | 5945 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { |
5621 | pci_unmap_addr(rxp, mapping), | 5946 | for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) |
5622 | TG3_RX_JMB_MAP_SZ, | 5947 | tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], |
5623 | PCI_DMA_FROMDEVICE); | 5948 | TG3_RX_JMB_MAP_SZ); |
5624 | dev_kfree_skb_any(rxp->skb); | ||
5625 | rxp->skb = NULL; | ||
5626 | } | ||
5627 | } | 5949 | } |
5628 | } | 5950 | } |
5629 | 5951 | ||
@@ -5638,7 +5960,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5638 | struct tg3_rx_prodring_set *tpr) | 5960 | struct tg3_rx_prodring_set *tpr) |
5639 | { | 5961 | { |
5640 | u32 i, rx_pkt_dma_sz; | 5962 | u32 i, rx_pkt_dma_sz; |
5641 | struct tg3_napi *tnapi = &tp->napi[0]; | 5963 | |
5964 | tpr->rx_std_cons_idx = 0; | ||
5965 | tpr->rx_std_prod_idx = 0; | ||
5966 | tpr->rx_jmb_cons_idx = 0; | ||
5967 | tpr->rx_jmb_prod_idx = 0; | ||
5968 | |||
5969 | if (tpr != &tp->prodring[0]) { | ||
5970 | memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); | ||
5971 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) | ||
5972 | memset(&tpr->rx_jmb_buffers[0], 0, | ||
5973 | TG3_RX_JMB_BUFF_RING_SIZE); | ||
5974 | goto done; | ||
5975 | } | ||
5642 | 5976 | ||
5643 | /* Zero out all descriptors. */ | 5977 | /* Zero out all descriptors. */ |
5644 | memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); | 5978 | memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); |
@@ -5665,7 +5999,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5665 | 5999 | ||
5666 | /* Now allocate fresh SKBs for each rx ring. */ | 6000 | /* Now allocate fresh SKBs for each rx ring. */ |
5667 | for (i = 0; i < tp->rx_pending; i++) { | 6001 | for (i = 0; i < tp->rx_pending; i++) { |
5668 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { | 6002 | if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { |
5669 | printk(KERN_WARNING PFX | 6003 | printk(KERN_WARNING PFX |
5670 | "%s: Using a smaller RX standard ring, " | 6004 | "%s: Using a smaller RX standard ring, " |
5671 | "only %d out of %d buffers were allocated " | 6005 | "only %d out of %d buffers were allocated " |
@@ -5696,8 +6030,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp, | |||
5696 | } | 6030 | } |
5697 | 6031 | ||
5698 | for (i = 0; i < tp->rx_jumbo_pending; i++) { | 6032 | for (i = 0; i < tp->rx_jumbo_pending; i++) { |
5699 | if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, | 6033 | if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, |
5700 | -1, i) < 0) { | 6034 | i) < 0) { |
5701 | printk(KERN_WARNING PFX | 6035 | printk(KERN_WARNING PFX |
5702 | "%s: Using a smaller RX jumbo ring, " | 6036 | "%s: Using a smaller RX jumbo ring, " |
5703 | "only %d out of %d buffers were " | 6037 | "only %d out of %d buffers were " |
@@ -5741,8 +6075,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp, | |||
5741 | static int tg3_rx_prodring_init(struct tg3 *tp, | 6075 | static int tg3_rx_prodring_init(struct tg3 *tp, |
5742 | struct tg3_rx_prodring_set *tpr) | 6076 | struct tg3_rx_prodring_set *tpr) |
5743 | { | 6077 | { |
5744 | tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * | 6078 | tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); |
5745 | TG3_RX_RING_SIZE, GFP_KERNEL); | ||
5746 | if (!tpr->rx_std_buffers) | 6079 | if (!tpr->rx_std_buffers) |
5747 | return -ENOMEM; | 6080 | return -ENOMEM; |
5748 | 6081 | ||
@@ -5752,8 +6085,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp, | |||
5752 | goto err_out; | 6085 | goto err_out; |
5753 | 6086 | ||
5754 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { | 6087 | if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { |
5755 | tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * | 6088 | tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, |
5756 | TG3_RX_JUMBO_RING_SIZE, | ||
5757 | GFP_KERNEL); | 6089 | GFP_KERNEL); |
5758 | if (!tpr->rx_jmb_buffers) | 6090 | if (!tpr->rx_jmb_buffers) |
5759 | goto err_out; | 6091 | goto err_out; |
@@ -5790,8 +6122,9 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5790 | continue; | 6122 | continue; |
5791 | 6123 | ||
5792 | for (i = 0; i < TG3_TX_RING_SIZE; ) { | 6124 | for (i = 0; i < TG3_TX_RING_SIZE; ) { |
5793 | struct tx_ring_info *txp; | 6125 | struct ring_info *txp; |
5794 | struct sk_buff *skb; | 6126 | struct sk_buff *skb; |
6127 | unsigned int k; | ||
5795 | 6128 | ||
5796 | txp = &tnapi->tx_buffers[i]; | 6129 | txp = &tnapi->tx_buffers[i]; |
5797 | skb = txp->skb; | 6130 | skb = txp->skb; |
@@ -5801,17 +6134,29 @@ static void tg3_free_rings(struct tg3 *tp) | |||
5801 | continue; | 6134 | continue; |
5802 | } | 6135 | } |
5803 | 6136 | ||
5804 | skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE); | 6137 | pci_unmap_single(tp->pdev, |
5805 | 6138 | pci_unmap_addr(txp, mapping), | |
6139 | skb_headlen(skb), | ||
6140 | PCI_DMA_TODEVICE); | ||
5806 | txp->skb = NULL; | 6141 | txp->skb = NULL; |
5807 | 6142 | ||
5808 | i += skb_shinfo(skb)->nr_frags + 1; | 6143 | i++; |
6144 | |||
6145 | for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { | ||
6146 | txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; | ||
6147 | pci_unmap_page(tp->pdev, | ||
6148 | pci_unmap_addr(txp, mapping), | ||
6149 | skb_shinfo(skb)->frags[k].size, | ||
6150 | PCI_DMA_TODEVICE); | ||
6151 | i++; | ||
6152 | } | ||
5809 | 6153 | ||
5810 | dev_kfree_skb_any(skb); | 6154 | dev_kfree_skb_any(skb); |
5811 | } | 6155 | } |
5812 | } | ||
5813 | 6156 | ||
5814 | tg3_rx_prodring_free(tp, &tp->prodring[0]); | 6157 | if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1) |
6158 | tg3_rx_prodring_free(tp, &tp->prodring[j]); | ||
6159 | } | ||
5815 | } | 6160 | } |
5816 | 6161 | ||
5817 | /* Initialize tx/rx rings for packet processing. | 6162 | /* Initialize tx/rx rings for packet processing. |
@@ -5845,9 +6190,13 @@ static int tg3_init_rings(struct tg3 *tp) | |||
5845 | tnapi->rx_rcb_ptr = 0; | 6190 | tnapi->rx_rcb_ptr = 0; |
5846 | if (tnapi->rx_rcb) | 6191 | if (tnapi->rx_rcb) |
5847 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 6192 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
6193 | |||
6194 | if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) && | ||
6195 | tg3_rx_prodring_alloc(tp, &tp->prodring[i])) | ||
6196 | return -ENOMEM; | ||
5848 | } | 6197 | } |
5849 | 6198 | ||
5850 | return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); | 6199 | return 0; |
5851 | } | 6200 | } |
5852 | 6201 | ||
5853 | /* | 6202 | /* |
@@ -5891,7 +6240,8 @@ static void tg3_free_consistent(struct tg3 *tp) | |||
5891 | tp->hw_stats = NULL; | 6240 | tp->hw_stats = NULL; |
5892 | } | 6241 | } |
5893 | 6242 | ||
5894 | tg3_rx_prodring_fini(tp, &tp->prodring[0]); | 6243 | for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) |
6244 | tg3_rx_prodring_fini(tp, &tp->prodring[i]); | ||
5895 | } | 6245 | } |
5896 | 6246 | ||
5897 | /* | 6247 | /* |
@@ -5902,8 +6252,10 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5902 | { | 6252 | { |
5903 | int i; | 6253 | int i; |
5904 | 6254 | ||
5905 | if (tg3_rx_prodring_init(tp, &tp->prodring[0])) | 6255 | for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) { |
5906 | return -ENOMEM; | 6256 | if (tg3_rx_prodring_init(tp, &tp->prodring[i])) |
6257 | goto err_out; | ||
6258 | } | ||
5907 | 6259 | ||
5908 | tp->hw_stats = pci_alloc_consistent(tp->pdev, | 6260 | tp->hw_stats = pci_alloc_consistent(tp->pdev, |
5909 | sizeof(struct tg3_hw_stats), | 6261 | sizeof(struct tg3_hw_stats), |
@@ -5926,6 +6278,24 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5926 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 6278 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); |
5927 | sblk = tnapi->hw_status; | 6279 | sblk = tnapi->hw_status; |
5928 | 6280 | ||
6281 | /* If multivector TSS is enabled, vector 0 does not handle | ||
6282 | * tx interrupts. Don't allocate any resources for it. | ||
6283 | */ | ||
6284 | if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) || | ||
6285 | (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) { | ||
6286 | tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) * | ||
6287 | TG3_TX_RING_SIZE, | ||
6288 | GFP_KERNEL); | ||
6289 | if (!tnapi->tx_buffers) | ||
6290 | goto err_out; | ||
6291 | |||
6292 | tnapi->tx_ring = pci_alloc_consistent(tp->pdev, | ||
6293 | TG3_TX_RING_BYTES, | ||
6294 | &tnapi->tx_desc_mapping); | ||
6295 | if (!tnapi->tx_ring) | ||
6296 | goto err_out; | ||
6297 | } | ||
6298 | |||
5929 | /* | 6299 | /* |
5930 | * When RSS is enabled, the status block format changes | 6300 | * When RSS is enabled, the status block format changes |
5931 | * slightly. The "rx_jumbo_consumer", "reserved", | 6301 | * slightly. The "rx_jumbo_consumer", "reserved", |
@@ -5947,6 +6317,11 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5947 | break; | 6317 | break; |
5948 | } | 6318 | } |
5949 | 6319 | ||
6320 | if (tp->irq_cnt == 1) | ||
6321 | tnapi->prodring = &tp->prodring[0]; | ||
6322 | else if (i) | ||
6323 | tnapi->prodring = &tp->prodring[i - 1]; | ||
6324 | |||
5950 | /* | 6325 | /* |
5951 | * If multivector RSS is enabled, vector 0 does not handle | 6326 | * If multivector RSS is enabled, vector 0 does not handle |
5952 | * rx or tx interrupts. Don't allocate any resources for it. | 6327 | * rx or tx interrupts. Don't allocate any resources for it. |
@@ -5961,17 +6336,6 @@ static int tg3_alloc_consistent(struct tg3 *tp) | |||
5961 | goto err_out; | 6336 | goto err_out; |
5962 | 6337 | ||
5963 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); | 6338 | memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); |
5964 | |||
5965 | tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) * | ||
5966 | TG3_TX_RING_SIZE, GFP_KERNEL); | ||
5967 | if (!tnapi->tx_buffers) | ||
5968 | goto err_out; | ||
5969 | |||
5970 | tnapi->tx_ring = pci_alloc_consistent(tp->pdev, | ||
5971 | TG3_TX_RING_BYTES, | ||
5972 | &tnapi->tx_desc_mapping); | ||
5973 | if (!tnapi->tx_ring) | ||
5974 | goto err_out; | ||
5975 | } | 6339 | } |
5976 | 6340 | ||
5977 | return 0; | 6341 | return 0; |
@@ -6580,10 +6944,35 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
6580 | 6944 | ||
6581 | tg3_mdio_start(tp); | 6945 | tg3_mdio_start(tp); |
6582 | 6946 | ||
6947 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) { | ||
6948 | u8 phy_addr; | ||
6949 | |||
6950 | phy_addr = tp->phy_addr; | ||
6951 | tp->phy_addr = TG3_PHY_PCIE_ADDR; | ||
6952 | |||
6953 | tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, | ||
6954 | TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT); | ||
6955 | val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL | | ||
6956 | TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL | | ||
6957 | TG3_PCIEPHY_TX0CTRL1_NB_EN; | ||
6958 | tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val); | ||
6959 | udelay(10); | ||
6960 | |||
6961 | tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR, | ||
6962 | TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT); | ||
6963 | val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN | | ||
6964 | TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN; | ||
6965 | tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val); | ||
6966 | udelay(10); | ||
6967 | |||
6968 | tp->phy_addr = phy_addr; | ||
6969 | } | ||
6970 | |||
6583 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && | 6971 | if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && |
6584 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && | 6972 | tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && |
6585 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 6973 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
6586 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 6974 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
6975 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | ||
6587 | val = tr32(0x7c00); | 6976 | val = tr32(0x7c00); |
6588 | 6977 | ||
6589 | tw32(0x7c00, val | (1 << 25)); | 6978 | tw32(0x7c00, val | (1 << 25)); |
@@ -6935,19 +7324,21 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
6935 | { | 7324 | { |
6936 | int i; | 7325 | int i; |
6937 | 7326 | ||
6938 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { | 7327 | if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) { |
6939 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); | 7328 | tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs); |
6940 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); | 7329 | tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames); |
6941 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); | 7330 | tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq); |
6942 | |||
6943 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | ||
6944 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | ||
6945 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | ||
6946 | } else { | 7331 | } else { |
6947 | tw32(HOSTCC_TXCOL_TICKS, 0); | 7332 | tw32(HOSTCC_TXCOL_TICKS, 0); |
6948 | tw32(HOSTCC_TXMAX_FRAMES, 0); | 7333 | tw32(HOSTCC_TXMAX_FRAMES, 0); |
6949 | tw32(HOSTCC_TXCOAL_MAXF_INT, 0); | 7334 | tw32(HOSTCC_TXCOAL_MAXF_INT, 0); |
7335 | } | ||
6950 | 7336 | ||
7337 | if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { | ||
7338 | tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs); | ||
7339 | tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames); | ||
7340 | tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq); | ||
7341 | } else { | ||
6951 | tw32(HOSTCC_RXCOL_TICKS, 0); | 7342 | tw32(HOSTCC_RXCOL_TICKS, 0); |
6952 | tw32(HOSTCC_RXMAX_FRAMES, 0); | 7343 | tw32(HOSTCC_RXMAX_FRAMES, 0); |
6953 | tw32(HOSTCC_RXCOAL_MAXF_INT, 0); | 7344 | tw32(HOSTCC_RXCOAL_MAXF_INT, 0); |
@@ -6970,25 +7361,31 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) | |||
6970 | 7361 | ||
6971 | reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; | 7362 | reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18; |
6972 | tw32(reg, ec->rx_coalesce_usecs); | 7363 | tw32(reg, ec->rx_coalesce_usecs); |
6973 | reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; | ||
6974 | tw32(reg, ec->tx_coalesce_usecs); | ||
6975 | reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; | 7364 | reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18; |
6976 | tw32(reg, ec->rx_max_coalesced_frames); | 7365 | tw32(reg, ec->rx_max_coalesced_frames); |
6977 | reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; | ||
6978 | tw32(reg, ec->tx_max_coalesced_frames); | ||
6979 | reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; | 7366 | reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18; |
6980 | tw32(reg, ec->rx_max_coalesced_frames_irq); | 7367 | tw32(reg, ec->rx_max_coalesced_frames_irq); |
6981 | reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; | 7368 | |
6982 | tw32(reg, ec->tx_max_coalesced_frames_irq); | 7369 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { |
7370 | reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18; | ||
7371 | tw32(reg, ec->tx_coalesce_usecs); | ||
7372 | reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18; | ||
7373 | tw32(reg, ec->tx_max_coalesced_frames); | ||
7374 | reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18; | ||
7375 | tw32(reg, ec->tx_max_coalesced_frames_irq); | ||
7376 | } | ||
6983 | } | 7377 | } |
6984 | 7378 | ||
6985 | for (; i < tp->irq_max - 1; i++) { | 7379 | for (; i < tp->irq_max - 1; i++) { |
6986 | tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); | 7380 | tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0); |
6987 | tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); | ||
6988 | tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); | 7381 | tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0); |
6989 | tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); | ||
6990 | tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 7382 | tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); |
6991 | tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | 7383 | |
7384 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) { | ||
7385 | tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0); | ||
7386 | tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0); | ||
7387 | tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0); | ||
7388 | } | ||
6992 | } | 7389 | } |
6993 | } | 7390 | } |
6994 | 7391 | ||
@@ -7002,6 +7399,8 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7002 | /* Disable all transmit rings but the first. */ | 7399 | /* Disable all transmit rings but the first. */ |
7003 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7400 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
7004 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; | 7401 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; |
7402 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
7403 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; | ||
7005 | else | 7404 | else |
7006 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; | 7405 | limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE; |
7007 | 7406 | ||
@@ -7016,7 +7415,8 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7016 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; | 7415 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17; |
7017 | else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7416 | else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
7018 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; | 7417 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16; |
7019 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) | 7418 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
7419 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
7020 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; | 7420 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4; |
7021 | else | 7421 | else |
7022 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; | 7422 | limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE; |
@@ -7089,17 +7489,19 @@ static void tg3_rings_reset(struct tg3 *tp) | |||
7089 | /* Clear status block in ram. */ | 7489 | /* Clear status block in ram. */ |
7090 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); | 7490 | memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); |
7091 | 7491 | ||
7092 | tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, | 7492 | if (tnapi->tx_ring) { |
7093 | (TG3_TX_RING_SIZE << | 7493 | tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, |
7094 | BDINFO_FLAGS_MAXLEN_SHIFT), | 7494 | (TG3_TX_RING_SIZE << |
7095 | NIC_SRAM_TX_BUFFER_DESC); | 7495 | BDINFO_FLAGS_MAXLEN_SHIFT), |
7496 | NIC_SRAM_TX_BUFFER_DESC); | ||
7497 | txrcb += TG3_BDINFO_SIZE; | ||
7498 | } | ||
7096 | 7499 | ||
7097 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, | 7500 | tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, |
7098 | (TG3_RX_RCB_RING_SIZE(tp) << | 7501 | (TG3_RX_RCB_RING_SIZE(tp) << |
7099 | BDINFO_FLAGS_MAXLEN_SHIFT), 0); | 7502 | BDINFO_FLAGS_MAXLEN_SHIFT), 0); |
7100 | 7503 | ||
7101 | stblk += 8; | 7504 | stblk += 8; |
7102 | txrcb += TG3_BDINFO_SIZE; | ||
7103 | rxrcb += TG3_BDINFO_SIZE; | 7505 | rxrcb += TG3_BDINFO_SIZE; |
7104 | } | 7506 | } |
7105 | } | 7507 | } |
@@ -7162,15 +7564,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7162 | tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); | 7564 | tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); |
7163 | 7565 | ||
7164 | tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); | 7566 | tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); |
7165 | } | ||
7166 | 7567 | ||
7167 | if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { | 7568 | val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; |
7168 | val = tr32(TG3_PCIE_LNKCTL); | 7569 | tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); |
7169 | if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) | ||
7170 | val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS; | ||
7171 | else | ||
7172 | val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS; | ||
7173 | tw32(TG3_PCIE_LNKCTL, val); | ||
7174 | } | 7570 | } |
7175 | 7571 | ||
7176 | /* This works around an issue with Athlon chipsets on | 7572 | /* This works around an issue with Athlon chipsets on |
@@ -7217,9 +7613,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7217 | if (err) | 7613 | if (err) |
7218 | return err; | 7614 | return err; |
7219 | 7615 | ||
7220 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && | 7616 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
7221 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && | 7617 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { |
7222 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 7618 | val = tr32(TG3PCI_DMA_RW_CTRL) & |
7619 | ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | ||
7620 | tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); | ||
7621 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && | ||
7622 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { | ||
7223 | /* This value is determined during the probe time DMA | 7623 | /* This value is determined during the probe time DMA |
7224 | * engine test, tg3_test_dma. | 7624 | * engine test, tg3_test_dma. |
7225 | */ | 7625 | */ |
@@ -7342,8 +7742,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7342 | ((u64) tpr->rx_std_mapping >> 32)); | 7742 | ((u64) tpr->rx_std_mapping >> 32)); |
7343 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, | 7743 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, |
7344 | ((u64) tpr->rx_std_mapping & 0xffffffff)); | 7744 | ((u64) tpr->rx_std_mapping & 0xffffffff)); |
7345 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, | 7745 | if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) |
7346 | NIC_SRAM_RX_BUFFER_DESC); | 7746 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, |
7747 | NIC_SRAM_RX_BUFFER_DESC); | ||
7347 | 7748 | ||
7348 | /* Disable the mini ring */ | 7749 | /* Disable the mini ring */ |
7349 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) | 7750 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
@@ -7366,14 +7767,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7366 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 7767 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, |
7367 | (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | | 7768 | (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | |
7368 | BDINFO_FLAGS_USE_EXT_RECV); | 7769 | BDINFO_FLAGS_USE_EXT_RECV); |
7369 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, | 7770 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) |
7370 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); | 7771 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, |
7772 | NIC_SRAM_RX_JUMBO_BUFFER_DESC); | ||
7371 | } else { | 7773 | } else { |
7372 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, | 7774 | tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, |
7373 | BDINFO_FLAGS_DISABLED); | 7775 | BDINFO_FLAGS_DISABLED); |
7374 | } | 7776 | } |
7375 | 7777 | ||
7376 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 7778 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
7779 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
7377 | val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | | 7780 | val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | |
7378 | (RX_STD_MAX_SIZE << 2); | 7781 | (RX_STD_MAX_SIZE << 2); |
7379 | else | 7782 | else |
@@ -7383,16 +7786,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7383 | 7786 | ||
7384 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); | 7787 | tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); |
7385 | 7788 | ||
7386 | tpr->rx_std_ptr = tp->rx_pending; | 7789 | tpr->rx_std_prod_idx = tp->rx_pending; |
7387 | tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, | 7790 | tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx); |
7388 | tpr->rx_std_ptr); | ||
7389 | 7791 | ||
7390 | tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? | 7792 | tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? |
7391 | tp->rx_jumbo_pending : 0; | 7793 | tp->rx_jumbo_pending : 0; |
7392 | tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, | 7794 | tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx); |
7393 | tpr->rx_jmb_ptr); | ||
7394 | 7795 | ||
7395 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 7796 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
7797 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | ||
7396 | tw32(STD_REPLENISH_LWM, 32); | 7798 | tw32(STD_REPLENISH_LWM, 32); |
7397 | tw32(JMB_REPLENISH_LWM, 16); | 7799 | tw32(JMB_REPLENISH_LWM, 16); |
7398 | } | 7800 | } |
@@ -7453,7 +7855,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7453 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 7855 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
7454 | rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; | 7856 | rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; |
7455 | 7857 | ||
7456 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 7858 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || |
7859 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | ||
7457 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 7860 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
7458 | rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; | 7861 | rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; |
7459 | 7862 | ||
@@ -7602,6 +8005,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7602 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) | 8005 | if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) |
7603 | val |= WDMAC_MODE_STATUS_TAG_FIX; | 8006 | val |= WDMAC_MODE_STATUS_TAG_FIX; |
7604 | 8007 | ||
8008 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) | ||
8009 | val |= WDMAC_MODE_BURST_ALL_DATA; | ||
8010 | |||
7605 | tw32_f(WDMAC_MODE, val); | 8011 | tw32_f(WDMAC_MODE, val); |
7606 | udelay(40); | 8012 | udelay(40); |
7607 | 8013 | ||
@@ -7641,7 +8047,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) | |||
7641 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | 8047 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) |
7642 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); | 8048 | tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); |
7643 | val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; | 8049 | val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE; |
7644 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) | 8050 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) |
7645 | val |= SNDBDI_MODE_MULTI_TXQ_EN; | 8051 | val |= SNDBDI_MODE_MULTI_TXQ_EN; |
7646 | tw32(SNDBDI_MODE, val); | 8052 | tw32(SNDBDI_MODE, val); |
7647 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); | 8053 | tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE); |
@@ -8065,7 +8471,8 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
8065 | * Turn off MSI one shot mode. Otherwise this test has no | 8471 | * Turn off MSI one shot mode. Otherwise this test has no |
8066 | * observable way to know whether the interrupt was delivered. | 8472 | * observable way to know whether the interrupt was delivered. |
8067 | */ | 8473 | */ |
8068 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | 8474 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
8475 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && | ||
8069 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | 8476 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { |
8070 | val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; | 8477 | val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE; |
8071 | tw32(MSGINT_MODE, val); | 8478 | tw32(MSGINT_MODE, val); |
@@ -8108,7 +8515,8 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
8108 | 8515 | ||
8109 | if (intr_ok) { | 8516 | if (intr_ok) { |
8110 | /* Reenable MSI one shot mode. */ | 8517 | /* Reenable MSI one shot mode. */ |
8111 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 && | 8518 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
8519 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && | ||
8112 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { | 8520 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) { |
8113 | val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; | 8521 | val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE; |
8114 | tw32(MSGINT_MODE, val); | 8522 | tw32(MSGINT_MODE, val); |
@@ -8249,7 +8657,11 @@ static bool tg3_enable_msix(struct tg3 *tp) | |||
8249 | for (i = 0; i < tp->irq_max; i++) | 8657 | for (i = 0; i < tp->irq_max; i++) |
8250 | tp->napi[i].irq_vec = msix_ent[i].vector; | 8658 | tp->napi[i].irq_vec = msix_ent[i].vector; |
8251 | 8659 | ||
8252 | tp->dev->real_num_tx_queues = tp->irq_cnt - 1; | 8660 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { |
8661 | tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS; | ||
8662 | tp->dev->real_num_tx_queues = tp->irq_cnt - 1; | ||
8663 | } else | ||
8664 | tp->dev->real_num_tx_queues = 1; | ||
8253 | 8665 | ||
8254 | return true; | 8666 | return true; |
8255 | } | 8667 | } |
@@ -8400,6 +8812,7 @@ static int tg3_open(struct net_device *dev) | |||
8400 | } | 8812 | } |
8401 | 8813 | ||
8402 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && | 8814 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
8815 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 && | ||
8403 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && | 8816 | (tp->tg3_flags2 & TG3_FLG2_USING_MSI) && |
8404 | (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { | 8817 | (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) { |
8405 | u32 val = tr32(PCIE_TRANSACTION_CFG); | 8818 | u32 val = tr32(PCIE_TRANSACTION_CFG); |
@@ -9240,9 +9653,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9240 | struct tg3 *tp = netdev_priv(dev); | 9653 | struct tg3 *tp = netdev_priv(dev); |
9241 | 9654 | ||
9242 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9655 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9656 | struct phy_device *phydev; | ||
9243 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 9657 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9244 | return -EAGAIN; | 9658 | return -EAGAIN; |
9245 | return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); | 9659 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
9660 | return phy_ethtool_gset(phydev, cmd); | ||
9246 | } | 9661 | } |
9247 | 9662 | ||
9248 | cmd->supported = (SUPPORTED_Autoneg); | 9663 | cmd->supported = (SUPPORTED_Autoneg); |
@@ -9281,9 +9696,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
9281 | struct tg3 *tp = netdev_priv(dev); | 9696 | struct tg3 *tp = netdev_priv(dev); |
9282 | 9697 | ||
9283 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9698 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9699 | struct phy_device *phydev; | ||
9284 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 9700 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9285 | return -EAGAIN; | 9701 | return -EAGAIN; |
9286 | return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); | 9702 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
9703 | return phy_ethtool_sset(phydev, cmd); | ||
9287 | } | 9704 | } |
9288 | 9705 | ||
9289 | if (cmd->autoneg != AUTONEG_ENABLE && | 9706 | if (cmd->autoneg != AUTONEG_ENABLE && |
@@ -9436,15 +9853,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value) | |||
9436 | return 0; | 9853 | return 0; |
9437 | } | 9854 | } |
9438 | if ((dev->features & NETIF_F_IPV6_CSUM) && | 9855 | if ((dev->features & NETIF_F_IPV6_CSUM) && |
9439 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { | 9856 | ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || |
9857 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) { | ||
9440 | if (value) { | 9858 | if (value) { |
9441 | dev->features |= NETIF_F_TSO6; | 9859 | dev->features |= NETIF_F_TSO6; |
9442 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 9860 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || |
9861 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | ||
9443 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 9862 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && |
9444 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | 9863 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || |
9445 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 9864 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
9446 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 9865 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
9447 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | ||
9448 | dev->features |= NETIF_F_TSO_ECN; | 9866 | dev->features |= NETIF_F_TSO_ECN; |
9449 | } else | 9867 | } else |
9450 | dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); | 9868 | dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); |
@@ -9466,7 +9884,7 @@ static int tg3_nway_reset(struct net_device *dev) | |||
9466 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 9884 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
9467 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 9885 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
9468 | return -EAGAIN; | 9886 | return -EAGAIN; |
9469 | r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); | 9887 | r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]); |
9470 | } else { | 9888 | } else { |
9471 | u32 bmcr; | 9889 | u32 bmcr; |
9472 | 9890 | ||
@@ -9585,7 +10003,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
9585 | u32 newadv; | 10003 | u32 newadv; |
9586 | struct phy_device *phydev; | 10004 | struct phy_device *phydev; |
9587 | 10005 | ||
9588 | phydev = tp->mdio_bus->phy_map[PHY_ADDR]; | 10006 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
9589 | 10007 | ||
9590 | if (epause->rx_pause) { | 10008 | if (epause->rx_pause) { |
9591 | if (epause->tx_pause) | 10009 | if (epause->tx_pause) |
@@ -10339,6 +10757,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10339 | tx_data[i] = (u8) (i & 0xff); | 10757 | tx_data[i] = (u8) (i & 0xff); |
10340 | 10758 | ||
10341 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); | 10759 | map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); |
10760 | if (pci_dma_mapping_error(tp->pdev, map)) { | ||
10761 | dev_kfree_skb(skb); | ||
10762 | return -EIO; | ||
10763 | } | ||
10342 | 10764 | ||
10343 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 10765 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | |
10344 | rnapi->coal_now); | 10766 | rnapi->coal_now); |
@@ -10359,8 +10781,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) | |||
10359 | 10781 | ||
10360 | udelay(10); | 10782 | udelay(10); |
10361 | 10783 | ||
10362 | /* 250 usec to allow enough time on some 10/100 Mbps devices. */ | 10784 | /* 350 usec to allow enough time on some 10/100 Mbps devices. */ |
10363 | for (i = 0; i < 25; i++) { | 10785 | for (i = 0; i < 35; i++) { |
10364 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | | 10786 | tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | |
10365 | coal_now); | 10787 | coal_now); |
10366 | 10788 | ||
@@ -10565,9 +10987,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
10565 | int err; | 10987 | int err; |
10566 | 10988 | ||
10567 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { | 10989 | if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { |
10990 | struct phy_device *phydev; | ||
10568 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) | 10991 | if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) |
10569 | return -EAGAIN; | 10992 | return -EAGAIN; |
10570 | return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); | 10993 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; |
10994 | return phy_mii_ioctl(phydev, data, cmd); | ||
10571 | } | 10995 | } |
10572 | 10996 | ||
10573 | switch(cmd) { | 10997 | switch(cmd) { |
@@ -10887,7 +11311,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) | |||
10887 | 11311 | ||
10888 | /* NVRAM protection for TPM */ | 11312 | /* NVRAM protection for TPM */ |
10889 | if (nvcfg1 & (1 << 27)) | 11313 | if (nvcfg1 & (1 << 27)) |
10890 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 11314 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; |
10891 | 11315 | ||
10892 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | 11316 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { |
10893 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: | 11317 | case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: |
@@ -10928,7 +11352,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) | |||
10928 | 11352 | ||
10929 | /* NVRAM protection for TPM */ | 11353 | /* NVRAM protection for TPM */ |
10930 | if (nvcfg1 & (1 << 27)) { | 11354 | if (nvcfg1 & (1 << 27)) { |
10931 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 11355 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; |
10932 | protect = 1; | 11356 | protect = 1; |
10933 | } | 11357 | } |
10934 | 11358 | ||
@@ -11022,7 +11446,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) | |||
11022 | 11446 | ||
11023 | /* NVRAM protection for TPM */ | 11447 | /* NVRAM protection for TPM */ |
11024 | if (nvcfg1 & (1 << 27)) { | 11448 | if (nvcfg1 & (1 << 27)) { |
11025 | tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; | 11449 | tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM; |
11026 | protect = 1; | 11450 | protect = 1; |
11027 | } | 11451 | } |
11028 | 11452 | ||
@@ -11283,7 +11707,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
11283 | tg3_get_5761_nvram_info(tp); | 11707 | tg3_get_5761_nvram_info(tp); |
11284 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 11708 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
11285 | tg3_get_5906_nvram_info(tp); | 11709 | tg3_get_5906_nvram_info(tp); |
11286 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 11710 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
11711 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
11287 | tg3_get_57780_nvram_info(tp); | 11712 | tg3_get_57780_nvram_info(tp); |
11288 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 11713 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) |
11289 | tg3_get_5717_nvram_info(tp); | 11714 | tg3_get_5717_nvram_info(tp); |
@@ -11524,7 +11949,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) | |||
11524 | 11949 | ||
11525 | tg3_enable_nvram_access(tp); | 11950 | tg3_enable_nvram_access(tp); |
11526 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 11951 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
11527 | !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) | 11952 | !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) |
11528 | tw32(NVRAM_WRITE1, 0x406); | 11953 | tw32(NVRAM_WRITE1, 0x406); |
11529 | 11954 | ||
11530 | grc_mode = tr32(GRC_MODE); | 11955 | grc_mode = tr32(GRC_MODE); |
@@ -12008,7 +12433,7 @@ skip_phy_reset: | |||
12008 | 12433 | ||
12009 | static void __devinit tg3_read_partno(struct tg3 *tp) | 12434 | static void __devinit tg3_read_partno(struct tg3 *tp) |
12010 | { | 12435 | { |
12011 | unsigned char vpd_data[256]; /* in little-endian format */ | 12436 | unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */ |
12012 | unsigned int i; | 12437 | unsigned int i; |
12013 | u32 magic; | 12438 | u32 magic; |
12014 | 12439 | ||
@@ -12017,48 +12442,37 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
12017 | goto out_not_found; | 12442 | goto out_not_found; |
12018 | 12443 | ||
12019 | if (magic == TG3_EEPROM_MAGIC) { | 12444 | if (magic == TG3_EEPROM_MAGIC) { |
12020 | for (i = 0; i < 256; i += 4) { | 12445 | for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { |
12021 | u32 tmp; | 12446 | u32 tmp; |
12022 | 12447 | ||
12023 | /* The data is in little-endian format in NVRAM. | 12448 | /* The data is in little-endian format in NVRAM. |
12024 | * Use the big-endian read routines to preserve | 12449 | * Use the big-endian read routines to preserve |
12025 | * the byte order as it exists in NVRAM. | 12450 | * the byte order as it exists in NVRAM. |
12026 | */ | 12451 | */ |
12027 | if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp)) | 12452 | if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp)) |
12028 | goto out_not_found; | 12453 | goto out_not_found; |
12029 | 12454 | ||
12030 | memcpy(&vpd_data[i], &tmp, sizeof(tmp)); | 12455 | memcpy(&vpd_data[i], &tmp, sizeof(tmp)); |
12031 | } | 12456 | } |
12032 | } else { | 12457 | } else { |
12033 | int vpd_cap; | 12458 | ssize_t cnt; |
12034 | 12459 | unsigned int pos = 0, i = 0; | |
12035 | vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); | 12460 | |
12036 | for (i = 0; i < 256; i += 4) { | 12461 | for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) { |
12037 | u32 tmp, j = 0; | 12462 | cnt = pci_read_vpd(tp->pdev, pos, |
12038 | __le32 v; | 12463 | TG3_NVM_VPD_LEN - pos, |
12039 | u16 tmp16; | 12464 | &vpd_data[pos]); |
12040 | 12465 | if (cnt == -ETIMEDOUT || -EINTR) | |
12041 | pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, | 12466 | cnt = 0; |
12042 | i); | 12467 | else if (cnt < 0) |
12043 | while (j++ < 100) { | ||
12044 | pci_read_config_word(tp->pdev, vpd_cap + | ||
12045 | PCI_VPD_ADDR, &tmp16); | ||
12046 | if (tmp16 & 0x8000) | ||
12047 | break; | ||
12048 | msleep(1); | ||
12049 | } | ||
12050 | if (!(tmp16 & 0x8000)) | ||
12051 | goto out_not_found; | 12468 | goto out_not_found; |
12052 | |||
12053 | pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, | ||
12054 | &tmp); | ||
12055 | v = cpu_to_le32(tmp); | ||
12056 | memcpy(&vpd_data[i], &v, sizeof(v)); | ||
12057 | } | 12469 | } |
12470 | if (pos != TG3_NVM_VPD_LEN) | ||
12471 | goto out_not_found; | ||
12058 | } | 12472 | } |
12059 | 12473 | ||
12060 | /* Now parse and find the part number. */ | 12474 | /* Now parse and find the part number. */ |
12061 | for (i = 0; i < 254; ) { | 12475 | for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) { |
12062 | unsigned char val = vpd_data[i]; | 12476 | unsigned char val = vpd_data[i]; |
12063 | unsigned int block_end; | 12477 | unsigned int block_end; |
12064 | 12478 | ||
@@ -12077,7 +12491,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
12077 | (vpd_data[i + 2] << 8))); | 12491 | (vpd_data[i + 2] << 8))); |
12078 | i += 3; | 12492 | i += 3; |
12079 | 12493 | ||
12080 | if (block_end > 256) | 12494 | if (block_end > TG3_NVM_VPD_LEN) |
12081 | goto out_not_found; | 12495 | goto out_not_found; |
12082 | 12496 | ||
12083 | while (i < (block_end - 2)) { | 12497 | while (i < (block_end - 2)) { |
@@ -12086,7 +12500,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
12086 | int partno_len = vpd_data[i + 2]; | 12500 | int partno_len = vpd_data[i + 2]; |
12087 | 12501 | ||
12088 | i += 3; | 12502 | i += 3; |
12089 | if (partno_len > 24 || (partno_len + i) > 256) | 12503 | if (partno_len > TG3_BPN_SIZE || |
12504 | (partno_len + i) > TG3_NVM_VPD_LEN) | ||
12090 | goto out_not_found; | 12505 | goto out_not_found; |
12091 | 12506 | ||
12092 | memcpy(tp->board_part_number, | 12507 | memcpy(tp->board_part_number, |
@@ -12117,6 +12532,8 @@ out_not_found: | |||
12117 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && | 12532 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && |
12118 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) | 12533 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) |
12119 | strcpy(tp->board_part_number, "BCM57788"); | 12534 | strcpy(tp->board_part_number, "BCM57788"); |
12535 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
12536 | strcpy(tp->board_part_number, "BCM57765"); | ||
12120 | else | 12537 | else |
12121 | strcpy(tp->board_part_number, "none"); | 12538 | strcpy(tp->board_part_number, "none"); |
12122 | } | 12539 | } |
@@ -12400,13 +12817,21 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12400 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { | 12817 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { |
12401 | u32 prod_id_asic_rev; | 12818 | u32 prod_id_asic_rev; |
12402 | 12819 | ||
12403 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || | 12820 | if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || |
12404 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || | 12821 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || |
12405 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || | 12822 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724) |
12406 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S) | ||
12407 | pci_read_config_dword(tp->pdev, | 12823 | pci_read_config_dword(tp->pdev, |
12408 | TG3PCI_GEN2_PRODID_ASICREV, | 12824 | TG3PCI_GEN2_PRODID_ASICREV, |
12409 | &prod_id_asic_rev); | 12825 | &prod_id_asic_rev); |
12826 | else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || | ||
12827 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || | ||
12828 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || | ||
12829 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 || | ||
12830 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 || | ||
12831 | tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) | ||
12832 | pci_read_config_dword(tp->pdev, | ||
12833 | TG3PCI_GEN15_PRODID_ASICREV, | ||
12834 | &prod_id_asic_rev); | ||
12410 | else | 12835 | else |
12411 | pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, | 12836 | pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, |
12412 | &prod_id_asic_rev); | 12837 | &prod_id_asic_rev); |
@@ -12560,7 +12985,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12560 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 12985 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
12561 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 12986 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
12562 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 12987 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
12563 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 12988 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
12989 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
12564 | tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; | 12990 | tp->tg3_flags3 |= TG3_FLG3_5755_PLUS; |
12565 | 12991 | ||
12566 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 12992 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || |
@@ -12586,6 +13012,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12586 | tp->dev->features |= NETIF_F_IPV6_CSUM; | 13012 | tp->dev->features |= NETIF_F_IPV6_CSUM; |
12587 | } | 13013 | } |
12588 | 13014 | ||
13015 | /* Determine TSO capabilities */ | ||
13016 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
13017 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
13018 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; | ||
13019 | else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | ||
13020 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | ||
13021 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | ||
13022 | else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | ||
13023 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; | ||
13024 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 && | ||
13025 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) | ||
13026 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; | ||
13027 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | ||
13028 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 && | ||
13029 | tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { | ||
13030 | tp->tg3_flags2 |= TG3_FLG2_TSO_BUG; | ||
13031 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) | ||
13032 | tp->fw_needed = FIRMWARE_TG3TSO5; | ||
13033 | else | ||
13034 | tp->fw_needed = FIRMWARE_TG3TSO; | ||
13035 | } | ||
13036 | |||
13037 | tp->irq_max = 1; | ||
13038 | |||
12589 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { | 13039 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { |
12590 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; | 13040 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; |
12591 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || | 13041 | if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || |
@@ -12597,29 +13047,31 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12597 | 13047 | ||
12598 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 13048 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || |
12599 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { | 13049 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { |
12600 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | ||
12601 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; | 13050 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; |
12602 | } else { | ||
12603 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG; | ||
12604 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | ||
12605 | ASIC_REV_5750 && | ||
12606 | tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2) | ||
12607 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG; | ||
12608 | } | 13051 | } |
12609 | } | ||
12610 | 13052 | ||
12611 | tp->irq_max = 1; | 13053 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13054 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | ||
13055 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; | ||
13056 | tp->irq_max = TG3_IRQ_MAX_VECS; | ||
13057 | } | ||
13058 | } | ||
12612 | 13059 | ||
12613 | #ifdef TG3_NAPI | 13060 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
12614 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | 13061 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) |
12615 | tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; | 13062 | tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG; |
12616 | tp->irq_max = TG3_IRQ_MAX_VECS; | 13063 | else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) { |
13064 | tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG; | ||
13065 | tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; | ||
12617 | } | 13066 | } |
12618 | #endif | 13067 | |
13068 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
13069 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
13070 | tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; | ||
12619 | 13071 | ||
12620 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || | 13072 | if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || |
12621 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || | 13073 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || |
12622 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 13074 | (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) |
12623 | tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; | 13075 | tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; |
12624 | 13076 | ||
12625 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, | 13077 | pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, |
@@ -12812,7 +13264,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12812 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 13264 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || |
12813 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 13265 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
12814 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 13266 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || |
12815 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | 13267 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || |
13268 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
12816 | tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; | 13269 | tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; |
12817 | 13270 | ||
12818 | /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). | 13271 | /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). |
@@ -12891,7 +13344,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12891 | !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && | 13344 | !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) && |
12892 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && | 13345 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && |
12893 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && | 13346 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 && |
12894 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 13347 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
13348 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | ||
12895 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || | 13349 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || |
12896 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | 13350 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || |
12897 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || | 13351 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || |
@@ -12926,11 +13380,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
12926 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) | 13380 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
12927 | tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; | 13381 | tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; |
12928 | 13382 | ||
12929 | if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 && | ||
12930 | tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) || | ||
12931 | tp->pci_chip_rev_id == CHIPREV_ID_57780_A0) | ||
12932 | tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD; | ||
12933 | |||
12934 | err = tg3_mdio_init(tp); | 13383 | err = tg3_mdio_init(tp); |
12935 | if (err) | 13384 | if (err) |
12936 | return err; | 13385 | return err; |
@@ -13220,6 +13669,12 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) | |||
13220 | #endif | 13669 | #endif |
13221 | #endif | 13670 | #endif |
13222 | 13671 | ||
13672 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
13673 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { | ||
13674 | val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT; | ||
13675 | goto out; | ||
13676 | } | ||
13677 | |||
13223 | if (!goal) | 13678 | if (!goal) |
13224 | goto out; | 13679 | goto out; |
13225 | 13680 | ||
@@ -13414,7 +13869,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13414 | { | 13869 | { |
13415 | dma_addr_t buf_dma; | 13870 | dma_addr_t buf_dma; |
13416 | u32 *buf, saved_dma_rwctrl; | 13871 | u32 *buf, saved_dma_rwctrl; |
13417 | int ret; | 13872 | int ret = 0; |
13418 | 13873 | ||
13419 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); | 13874 | buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); |
13420 | if (!buf) { | 13875 | if (!buf) { |
@@ -13427,6 +13882,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13427 | 13882 | ||
13428 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); | 13883 | tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); |
13429 | 13884 | ||
13885 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || | ||
13886 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) | ||
13887 | goto out; | ||
13888 | |||
13430 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { | 13889 | if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { |
13431 | /* DMA read watermark not used on PCIE */ | 13890 | /* DMA read watermark not used on PCIE */ |
13432 | tp->dma_rwctrl |= 0x00180000; | 13891 | tp->dma_rwctrl |= 0x00180000; |
@@ -13499,7 +13958,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp) | |||
13499 | tg3_switch_clocks(tp); | 13958 | tg3_switch_clocks(tp); |
13500 | #endif | 13959 | #endif |
13501 | 13960 | ||
13502 | ret = 0; | ||
13503 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && | 13961 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && |
13504 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) | 13962 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) |
13505 | goto out; | 13963 | goto out; |
@@ -13618,7 +14076,8 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) | |||
13618 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) | 14076 | static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) |
13619 | { | 14077 | { |
13620 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && | 14078 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS && |
13621 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { | 14079 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && |
14080 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) { | ||
13622 | tp->bufmgr_config.mbuf_read_dma_low_water = | 14081 | tp->bufmgr_config.mbuf_read_dma_low_water = |
13623 | DEFAULT_MB_RDMA_LOW_WATER_5705; | 14082 | DEFAULT_MB_RDMA_LOW_WATER_5705; |
13624 | tp->bufmgr_config.mbuf_mac_rx_low_water = | 14083 | tp->bufmgr_config.mbuf_mac_rx_low_water = |
@@ -13678,6 +14137,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) | |||
13678 | case PHY_ID_BCM5756: return "5722/5756"; | 14137 | case PHY_ID_BCM5756: return "5722/5756"; |
13679 | case PHY_ID_BCM5906: return "5906"; | 14138 | case PHY_ID_BCM5906: return "5906"; |
13680 | case PHY_ID_BCM5761: return "5761"; | 14139 | case PHY_ID_BCM5761: return "5761"; |
14140 | case PHY_ID_BCM5717: return "5717"; | ||
13681 | case PHY_ID_BCM8002: return "8002/serdes"; | 14141 | case PHY_ID_BCM8002: return "8002/serdes"; |
13682 | case 0: return "serdes"; | 14142 | case 0: return "serdes"; |
13683 | default: return "unknown"; | 14143 | default: return "unknown"; |
@@ -13919,51 +14379,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
13919 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; | 14379 | tp->rx_pending = TG3_DEF_RX_RING_PENDING; |
13920 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; | 14380 | tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; |
13921 | 14381 | ||
13922 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; | ||
13923 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; | ||
13924 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; | ||
13925 | for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { | ||
13926 | struct tg3_napi *tnapi = &tp->napi[i]; | ||
13927 | |||
13928 | tnapi->tp = tp; | ||
13929 | tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; | ||
13930 | |||
13931 | tnapi->int_mbox = intmbx; | ||
13932 | if (i < 4) | ||
13933 | intmbx += 0x8; | ||
13934 | else | ||
13935 | intmbx += 0x4; | ||
13936 | |||
13937 | tnapi->consmbox = rcvmbx; | ||
13938 | tnapi->prodmbox = sndmbx; | ||
13939 | |||
13940 | if (i) | ||
13941 | tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); | ||
13942 | else | ||
13943 | tnapi->coal_now = HOSTCC_MODE_NOW; | ||
13944 | |||
13945 | if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) | ||
13946 | break; | ||
13947 | |||
13948 | /* | ||
13949 | * If we support MSIX, we'll be using RSS. If we're using | ||
13950 | * RSS, the first vector only handles link interrupts and the | ||
13951 | * remaining vectors handle rx and tx interrupts. Reuse the | ||
13952 | * mailbox values for the next iteration. The values we setup | ||
13953 | * above are still useful for the single vectored mode. | ||
13954 | */ | ||
13955 | if (!i) | ||
13956 | continue; | ||
13957 | |||
13958 | rcvmbx += 0x8; | ||
13959 | |||
13960 | if (sndmbx & 0x4) | ||
13961 | sndmbx -= 0x4; | ||
13962 | else | ||
13963 | sndmbx += 0xc; | ||
13964 | } | ||
13965 | |||
13966 | netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64); | ||
13967 | dev->ethtool_ops = &tg3_ethtool_ops; | 14382 | dev->ethtool_ops = &tg3_ethtool_ops; |
13968 | dev->watchdog_timeo = TG3_TX_TIMEOUT; | 14383 | dev->watchdog_timeo = TG3_TX_TIMEOUT; |
13969 | dev->irq = pdev->irq; | 14384 | dev->irq = pdev->irq; |
@@ -13975,8 +14390,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
13975 | goto err_out_iounmap; | 14390 | goto err_out_iounmap; |
13976 | } | 14391 | } |
13977 | 14392 | ||
13978 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || | 14393 | if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && |
13979 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | 14394 | tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) |
13980 | dev->netdev_ops = &tg3_netdev_ops; | 14395 | dev->netdev_ops = &tg3_netdev_ops; |
13981 | else | 14396 | else |
13982 | dev->netdev_ops = &tg3_netdev_ops_dma_bug; | 14397 | dev->netdev_ops = &tg3_netdev_ops_dma_bug; |
@@ -14023,46 +14438,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14023 | 14438 | ||
14024 | tg3_init_bufmgr_config(tp); | 14439 | tg3_init_bufmgr_config(tp); |
14025 | 14440 | ||
14026 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) | 14441 | /* Selectively allow TSO based on operating conditions */ |
14027 | tp->fw_needed = FIRMWARE_TG3; | 14442 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || |
14028 | 14443 | (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) | |
14029 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | ||
14030 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 14444 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; |
14445 | else { | ||
14446 | tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG); | ||
14447 | tp->fw_needed = NULL; | ||
14031 | } | 14448 | } |
14032 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || | 14449 | |
14033 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || | 14450 | if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) |
14034 | tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 || | 14451 | tp->fw_needed = FIRMWARE_TG3; |
14035 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || | ||
14036 | (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { | ||
14037 | tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; | ||
14038 | } else { | ||
14039 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; | ||
14040 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) | ||
14041 | tp->fw_needed = FIRMWARE_TG3TSO5; | ||
14042 | else | ||
14043 | tp->fw_needed = FIRMWARE_TG3TSO; | ||
14044 | } | ||
14045 | 14452 | ||
14046 | /* TSO is on by default on chips that support hardware TSO. | 14453 | /* TSO is on by default on chips that support hardware TSO. |
14047 | * Firmware TSO on older chips gives lower performance, so it | 14454 | * Firmware TSO on older chips gives lower performance, so it |
14048 | * is off by default, but can be enabled using ethtool. | 14455 | * is off by default, but can be enabled using ethtool. |
14049 | */ | 14456 | */ |
14050 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { | 14457 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) && |
14051 | if (dev->features & NETIF_F_IP_CSUM) | 14458 | (dev->features & NETIF_F_IP_CSUM)) |
14052 | dev->features |= NETIF_F_TSO; | 14459 | dev->features |= NETIF_F_TSO; |
14053 | if ((dev->features & NETIF_F_IPV6_CSUM) && | 14460 | |
14054 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) | 14461 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) || |
14462 | (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) { | ||
14463 | if (dev->features & NETIF_F_IPV6_CSUM) | ||
14055 | dev->features |= NETIF_F_TSO6; | 14464 | dev->features |= NETIF_F_TSO6; |
14056 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | 14465 | if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) || |
14466 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || | ||
14057 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && | 14467 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && |
14058 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || | 14468 | GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || |
14059 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || | 14469 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || |
14060 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || | 14470 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) |
14061 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) | ||
14062 | dev->features |= NETIF_F_TSO_ECN; | 14471 | dev->features |= NETIF_F_TSO_ECN; |
14063 | } | 14472 | } |
14064 | 14473 | ||
14065 | |||
14066 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && | 14474 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && |
14067 | !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && | 14475 | !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && |
14068 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { | 14476 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { |
@@ -14074,7 +14482,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14074 | if (err) { | 14482 | if (err) { |
14075 | printk(KERN_ERR PFX "Could not obtain valid ethernet address, " | 14483 | printk(KERN_ERR PFX "Could not obtain valid ethernet address, " |
14076 | "aborting.\n"); | 14484 | "aborting.\n"); |
14077 | goto err_out_fw; | 14485 | goto err_out_iounmap; |
14078 | } | 14486 | } |
14079 | 14487 | ||
14080 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { | 14488 | if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { |
@@ -14083,7 +14491,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14083 | printk(KERN_ERR PFX "Cannot map APE registers, " | 14491 | printk(KERN_ERR PFX "Cannot map APE registers, " |
14084 | "aborting.\n"); | 14492 | "aborting.\n"); |
14085 | err = -ENOMEM; | 14493 | err = -ENOMEM; |
14086 | goto err_out_fw; | 14494 | goto err_out_iounmap; |
14087 | } | 14495 | } |
14088 | 14496 | ||
14089 | tg3_ape_lock_init(tp); | 14497 | tg3_ape_lock_init(tp); |
@@ -14113,6 +14521,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14113 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; | 14521 | tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; |
14114 | tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; | 14522 | tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; |
14115 | 14523 | ||
14524 | intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; | ||
14525 | rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; | ||
14526 | sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; | ||
14527 | for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { | ||
14528 | struct tg3_napi *tnapi = &tp->napi[i]; | ||
14529 | |||
14530 | tnapi->tp = tp; | ||
14531 | tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; | ||
14532 | |||
14533 | tnapi->int_mbox = intmbx; | ||
14534 | if (i < 4) | ||
14535 | intmbx += 0x8; | ||
14536 | else | ||
14537 | intmbx += 0x4; | ||
14538 | |||
14539 | tnapi->consmbox = rcvmbx; | ||
14540 | tnapi->prodmbox = sndmbx; | ||
14541 | |||
14542 | if (i) { | ||
14543 | tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); | ||
14544 | netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); | ||
14545 | } else { | ||
14546 | tnapi->coal_now = HOSTCC_MODE_NOW; | ||
14547 | netif_napi_add(dev, &tnapi->napi, tg3_poll, 64); | ||
14548 | } | ||
14549 | |||
14550 | if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) | ||
14551 | break; | ||
14552 | |||
14553 | /* | ||
14554 | * If we support MSIX, we'll be using RSS. If we're using | ||
14555 | * RSS, the first vector only handles link interrupts and the | ||
14556 | * remaining vectors handle rx and tx interrupts. Reuse the | ||
14557 | * mailbox values for the next iteration. The values we setup | ||
14558 | * above are still useful for the single vectored mode. | ||
14559 | */ | ||
14560 | if (!i) | ||
14561 | continue; | ||
14562 | |||
14563 | rcvmbx += 0x8; | ||
14564 | |||
14565 | if (sndmbx & 0x4) | ||
14566 | sndmbx -= 0x4; | ||
14567 | else | ||
14568 | sndmbx += 0xc; | ||
14569 | } | ||
14570 | |||
14116 | tg3_init_coal(tp); | 14571 | tg3_init_coal(tp); |
14117 | 14572 | ||
14118 | pci_set_drvdata(pdev, dev); | 14573 | pci_set_drvdata(pdev, dev); |
@@ -14131,13 +14586,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
14131 | tg3_bus_string(tp, str), | 14586 | tg3_bus_string(tp, str), |
14132 | dev->dev_addr); | 14587 | dev->dev_addr); |
14133 | 14588 | ||
14134 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) | 14589 | if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { |
14590 | struct phy_device *phydev; | ||
14591 | phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; | ||
14135 | printk(KERN_INFO | 14592 | printk(KERN_INFO |
14136 | "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", | 14593 | "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", |
14137 | tp->dev->name, | 14594 | tp->dev->name, phydev->drv->name, |
14138 | tp->mdio_bus->phy_map[PHY_ADDR]->drv->name, | 14595 | dev_name(&phydev->dev)); |
14139 | dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev)); | 14596 | } else |
14140 | else | ||
14141 | printk(KERN_INFO | 14597 | printk(KERN_INFO |
14142 | "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", | 14598 | "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", |
14143 | tp->dev->name, tg3_phy_string(tp), | 14599 | tp->dev->name, tg3_phy_string(tp), |
@@ -14166,10 +14622,6 @@ err_out_apeunmap: | |||
14166 | tp->aperegs = NULL; | 14622 | tp->aperegs = NULL; |
14167 | } | 14623 | } |
14168 | 14624 | ||
14169 | err_out_fw: | ||
14170 | if (tp->fw) | ||
14171 | release_firmware(tp->fw); | ||
14172 | |||
14173 | err_out_iounmap: | 14625 | err_out_iounmap: |
14174 | if (tp->regs) { | 14626 | if (tp->regs) { |
14175 | iounmap(tp->regs); | 14627 | iounmap(tp->regs); |