aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c887
1 files changed, 605 insertions, 282 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ba5d3fe753b6..6e6db955b4a9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.102" 71#define DRV_MODULE_VERSION "3.104"
72#define DRV_MODULE_RELDATE "September 1, 2009" 72#define DRV_MODULE_RELDATE "November 13, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -137,6 +137,12 @@
137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 139
140#define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142
143#define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145
140/* minimum number of free TX descriptors required to wake up TX process */ 146/* minimum number of free TX descriptors required to wake up TX process */
141#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 147#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
142 148
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 TG3_64BIT_REG_LOW, val); 405 TG3_64BIT_REG_LOW, val);
397 return; 406 return;
398 } 407 }
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { 408 if (off == TG3_RX_STD_PROD_IDX_REG) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 409 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val); 410 TG3_64BIT_REG_LOW, val);
402 return; 411 return;
@@ -937,9 +946,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
937 u32 val; 946 u32 val;
938 struct phy_device *phydev; 947 struct phy_device *phydev;
939 948
940 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 949 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 950 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
942 case TG3_PHY_ID_BCM50610: 951 case TG3_PHY_ID_BCM50610:
952 case TG3_PHY_ID_BCM50610M:
943 val = MAC_PHYCFG2_50610_LED_MODES; 953 val = MAC_PHYCFG2_50610_LED_MODES;
944 break; 954 break;
945 case TG3_PHY_ID_BCMAC131: 955 case TG3_PHY_ID_BCMAC131:
@@ -1031,7 +1041,7 @@ static void tg3_mdio_start(struct tg3 *tp)
1031 if (is_serdes) 1041 if (is_serdes)
1032 tp->phy_addr += 7; 1042 tp->phy_addr += 7;
1033 } else 1043 } else
1034 tp->phy_addr = PHY_ADDR; 1044 tp->phy_addr = TG3_PHY_MII_ADDR;
1035 1045
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && 1046 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -1062,7 +1072,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1062 tp->mdio_bus->read = &tg3_mdio_read; 1072 tp->mdio_bus->read = &tg3_mdio_read;
1063 tp->mdio_bus->write = &tg3_mdio_write; 1073 tp->mdio_bus->write = &tg3_mdio_write;
1064 tp->mdio_bus->reset = &tg3_mdio_reset; 1074 tp->mdio_bus->reset = &tg3_mdio_reset;
1065 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); 1075 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1066 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1076 tp->mdio_bus->irq = &tp->mdio_irq[0];
1067 1077
1068 for (i = 0; i < PHY_MAX_ADDR; i++) 1078 for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1084,7 +1094,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1084 return i; 1094 return i;
1085 } 1095 }
1086 1096
1087 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1097 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1088 1098
1089 if (!phydev || !phydev->drv) { 1099 if (!phydev || !phydev->drv) {
1090 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); 1100 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
@@ -1096,8 +1106,14 @@ static int tg3_mdio_init(struct tg3 *tp)
1096 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1106 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1097 case TG3_PHY_ID_BCM57780: 1107 case TG3_PHY_ID_BCM57780:
1098 phydev->interface = PHY_INTERFACE_MODE_GMII; 1108 phydev->interface = PHY_INTERFACE_MODE_GMII;
1109 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1099 break; 1110 break;
1100 case TG3_PHY_ID_BCM50610: 1111 case TG3_PHY_ID_BCM50610:
1112 case TG3_PHY_ID_BCM50610M:
1113 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1114 PHY_BRCM_RX_REFCLK_UNUSED |
1115 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1116 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1101 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) 1117 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1102 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1118 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1103 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1119 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
@@ -1111,6 +1127,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1111 case TG3_PHY_ID_RTL8201E: 1127 case TG3_PHY_ID_RTL8201E:
1112 case TG3_PHY_ID_BCMAC131: 1128 case TG3_PHY_ID_BCMAC131:
1113 phydev->interface = PHY_INTERFACE_MODE_MII; 1129 phydev->interface = PHY_INTERFACE_MODE_MII;
1130 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 1131 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1115 break; 1132 break;
1116 } 1133 }
@@ -1311,7 +1328,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1311 u32 old_tx_mode = tp->tx_mode; 1328 u32 old_tx_mode = tp->tx_mode;
1312 1329
1313 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 1330 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1314 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; 1331 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1315 else 1332 else
1316 autoneg = tp->link_config.autoneg; 1333 autoneg = tp->link_config.autoneg;
1317 1334
@@ -1348,7 +1365,7 @@ static void tg3_adjust_link(struct net_device *dev)
1348 u8 oldflowctrl, linkmesg = 0; 1365 u8 oldflowctrl, linkmesg = 0;
1349 u32 mac_mode, lcl_adv, rmt_adv; 1366 u32 mac_mode, lcl_adv, rmt_adv;
1350 struct tg3 *tp = netdev_priv(dev); 1367 struct tg3 *tp = netdev_priv(dev);
1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1368 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1352 1369
1353 spin_lock_bh(&tp->lock); 1370 spin_lock_bh(&tp->lock);
1354 1371
@@ -1363,8 +1380,11 @@ static void tg3_adjust_link(struct net_device *dev)
1363 1380
1364 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 1381 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1365 mac_mode |= MAC_MODE_PORT_MODE_MII; 1382 mac_mode |= MAC_MODE_PORT_MODE_MII;
1366 else 1383 else if (phydev->speed == SPEED_1000 ||
1384 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1367 mac_mode |= MAC_MODE_PORT_MODE_GMII; 1385 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1386 else
1387 mac_mode |= MAC_MODE_PORT_MODE_MII;
1368 1388
1369 if (phydev->duplex == DUPLEX_HALF) 1389 if (phydev->duplex == DUPLEX_HALF)
1370 mac_mode |= MAC_MODE_HALF_DUPLEX; 1390 mac_mode |= MAC_MODE_HALF_DUPLEX;
@@ -1434,7 +1454,7 @@ static int tg3_phy_init(struct tg3 *tp)
1434 /* Bring the PHY back to a known state. */ 1454 /* Bring the PHY back to a known state. */
1435 tg3_bmcr_reset(tp); 1455 tg3_bmcr_reset(tp);
1436 1456
1437 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1457 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1438 1458
1439 /* Attach the MAC to the PHY. */ 1459 /* Attach the MAC to the PHY. */
1440 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1460 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
@@ -1461,7 +1481,7 @@ static int tg3_phy_init(struct tg3 *tp)
1461 SUPPORTED_Asym_Pause); 1481 SUPPORTED_Asym_Pause);
1462 break; 1482 break;
1463 default: 1483 default:
1464 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1484 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1465 return -EINVAL; 1485 return -EINVAL;
1466 } 1486 }
1467 1487
@@ -1479,7 +1499,7 @@ static void tg3_phy_start(struct tg3 *tp)
1479 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1480 return; 1500 return;
1481 1501
1482 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1502 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1483 1503
1484 if (tp->link_config.phy_is_low_power) { 1504 if (tp->link_config.phy_is_low_power) {
1485 tp->link_config.phy_is_low_power = 0; 1505 tp->link_config.phy_is_low_power = 0;
@@ -1499,13 +1519,13 @@ static void tg3_phy_stop(struct tg3 *tp)
1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1519 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1500 return; 1520 return;
1501 1521
1502 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); 1522 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1503} 1523}
1504 1524
1505static void tg3_phy_fini(struct tg3 *tp) 1525static void tg3_phy_fini(struct tg3 *tp)
1506{ 1526{
1507 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 1527 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1508 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1528 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1509 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; 1529 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1510 } 1530 }
1511} 1531}
@@ -2149,6 +2169,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2149 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 2169 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2150 udelay(40); 2170 udelay(40);
2151 return; 2171 return;
2172 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2173 u32 phytest;
2174 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2175 u32 phy;
2176
2177 tg3_writephy(tp, MII_ADVERTISE, 0);
2178 tg3_writephy(tp, MII_BMCR,
2179 BMCR_ANENABLE | BMCR_ANRESTART);
2180
2181 tg3_writephy(tp, MII_TG3_FET_TEST,
2182 phytest | MII_TG3_FET_SHADOW_EN);
2183 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2184 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2185 tg3_writephy(tp,
2186 MII_TG3_FET_SHDW_AUXMODE4,
2187 phy);
2188 }
2189 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2190 }
2191 return;
2152 } else if (do_low_power) { 2192 } else if (do_low_power) {
2153 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2193 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 2194 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
@@ -2218,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
2218static void tg3_enable_nvram_access(struct tg3 *tp) 2258static void tg3_enable_nvram_access(struct tg3 *tp)
2219{ 2259{
2220 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2260 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2221 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2261 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2222 u32 nvaccess = tr32(NVRAM_ACCESS); 2262 u32 nvaccess = tr32(NVRAM_ACCESS);
2223 2263
2224 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 2264 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2229,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
2229static void tg3_disable_nvram_access(struct tg3 *tp) 2269static void tg3_disable_nvram_access(struct tg3 *tp)
2230{ 2270{
2231 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2271 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2232 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2272 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2233 u32 nvaccess = tr32(NVRAM_ACCESS); 2273 u32 nvaccess = tr32(NVRAM_ACCESS);
2234 2274
2235 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 2275 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -2474,7 +2514,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2474 struct phy_device *phydev; 2514 struct phy_device *phydev;
2475 u32 phyid, advertising; 2515 u32 phyid, advertising;
2476 2516
2477 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 2517 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2478 2518
2479 tp->link_config.phy_is_low_power = 1; 2519 tp->link_config.phy_is_low_power = 1;
2480 2520
@@ -3243,15 +3283,6 @@ relink:
3243 pci_write_config_word(tp->pdev, 3283 pci_write_config_word(tp->pdev,
3244 tp->pcie_cap + PCI_EXP_LNKCTL, 3284 tp->pcie_cap + PCI_EXP_LNKCTL,
3245 newlnkctl); 3285 newlnkctl);
3246 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3247 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3248 if (tp->link_config.active_speed == SPEED_100 ||
3249 tp->link_config.active_speed == SPEED_10)
3250 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3251 else
3252 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3253 if (newreg != oldreg)
3254 tw32(TG3_PCIE_LNKCTL, newreg);
3255 } 3286 }
3256 3287
3257 if (current_link_up != netif_carrier_ok(tp->dev)) { 3288 if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4375,6 +4406,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
4375 } 4406 }
4376} 4407}
4377 4408
4409static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4410{
4411 if (!ri->skb)
4412 return;
4413
4414 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4415 map_sz, PCI_DMA_FROMDEVICE);
4416 dev_kfree_skb_any(ri->skb);
4417 ri->skb = NULL;
4418}
4419
4378/* Returns size of skb allocated or < 0 on error. 4420/* Returns size of skb allocated or < 0 on error.
4379 * 4421 *
4380 * We only need to fill in the address because the other members 4422 * We only need to fill in the address because the other members
@@ -4386,16 +4428,14 @@ static void tg3_tx(struct tg3_napi *tnapi)
4386 * buffers the cpu only reads the last cacheline of the RX descriptor 4428 * buffers the cpu only reads the last cacheline of the RX descriptor
4387 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 4429 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4388 */ 4430 */
4389static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, 4431static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4390 int src_idx, u32 dest_idx_unmasked) 4432 u32 opaque_key, u32 dest_idx_unmasked)
4391{ 4433{
4392 struct tg3 *tp = tnapi->tp;
4393 struct tg3_rx_buffer_desc *desc; 4434 struct tg3_rx_buffer_desc *desc;
4394 struct ring_info *map, *src_map; 4435 struct ring_info *map, *src_map;
4395 struct sk_buff *skb; 4436 struct sk_buff *skb;
4396 dma_addr_t mapping; 4437 dma_addr_t mapping;
4397 int skb_size, dest_idx; 4438 int skb_size, dest_idx;
4398 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4399 4439
4400 src_map = NULL; 4440 src_map = NULL;
4401 switch (opaque_key) { 4441 switch (opaque_key) {
@@ -4403,8 +4443,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4403 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4443 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4404 desc = &tpr->rx_std[dest_idx]; 4444 desc = &tpr->rx_std[dest_idx];
4405 map = &tpr->rx_std_buffers[dest_idx]; 4445 map = &tpr->rx_std_buffers[dest_idx];
4406 if (src_idx >= 0)
4407 src_map = &tpr->rx_std_buffers[src_idx];
4408 skb_size = tp->rx_pkt_map_sz; 4446 skb_size = tp->rx_pkt_map_sz;
4409 break; 4447 break;
4410 4448
@@ -4412,8 +4450,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4412 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4450 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4413 desc = &tpr->rx_jmb[dest_idx].std; 4451 desc = &tpr->rx_jmb[dest_idx].std;
4414 map = &tpr->rx_jmb_buffers[dest_idx]; 4452 map = &tpr->rx_jmb_buffers[dest_idx];
4415 if (src_idx >= 0)
4416 src_map = &tpr->rx_jmb_buffers[src_idx];
4417 skb_size = TG3_RX_JMB_MAP_SZ; 4453 skb_size = TG3_RX_JMB_MAP_SZ;
4418 break; 4454 break;
4419 4455
@@ -4435,13 +4471,14 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4435 4471
4436 mapping = pci_map_single(tp->pdev, skb->data, skb_size, 4472 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4437 PCI_DMA_FROMDEVICE); 4473 PCI_DMA_FROMDEVICE);
4474 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4475 dev_kfree_skb(skb);
4476 return -EIO;
4477 }
4438 4478
4439 map->skb = skb; 4479 map->skb = skb;
4440 pci_unmap_addr_set(map, mapping, mapping); 4480 pci_unmap_addr_set(map, mapping, mapping);
4441 4481
4442 if (src_map != NULL)
4443 src_map->skb = NULL;
4444
4445 desc->addr_hi = ((u64)mapping >> 32); 4482 desc->addr_hi = ((u64)mapping >> 32);
4446 desc->addr_lo = ((u64)mapping & 0xffffffff); 4483 desc->addr_lo = ((u64)mapping & 0xffffffff);
4447 4484
@@ -4452,30 +4489,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4452 * members of the RX descriptor are invariant. See notes above 4489 * members of the RX descriptor are invariant. See notes above
4453 * tg3_alloc_rx_skb for full details. 4490 * tg3_alloc_rx_skb for full details.
4454 */ 4491 */
4455static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, 4492static void tg3_recycle_rx(struct tg3_napi *tnapi,
4456 int src_idx, u32 dest_idx_unmasked) 4493 struct tg3_rx_prodring_set *dpr,
4494 u32 opaque_key, int src_idx,
4495 u32 dest_idx_unmasked)
4457{ 4496{
4458 struct tg3 *tp = tnapi->tp; 4497 struct tg3 *tp = tnapi->tp;
4459 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4498 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4460 struct ring_info *src_map, *dest_map; 4499 struct ring_info *src_map, *dest_map;
4461 int dest_idx; 4500 int dest_idx;
4462 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4501 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4463 4502
4464 switch (opaque_key) { 4503 switch (opaque_key) {
4465 case RXD_OPAQUE_RING_STD: 4504 case RXD_OPAQUE_RING_STD:
4466 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4505 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4467 dest_desc = &tpr->rx_std[dest_idx]; 4506 dest_desc = &dpr->rx_std[dest_idx];
4468 dest_map = &tpr->rx_std_buffers[dest_idx]; 4507 dest_map = &dpr->rx_std_buffers[dest_idx];
4469 src_desc = &tpr->rx_std[src_idx]; 4508 src_desc = &spr->rx_std[src_idx];
4470 src_map = &tpr->rx_std_buffers[src_idx]; 4509 src_map = &spr->rx_std_buffers[src_idx];
4471 break; 4510 break;
4472 4511
4473 case RXD_OPAQUE_RING_JUMBO: 4512 case RXD_OPAQUE_RING_JUMBO:
4474 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4513 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4475 dest_desc = &tpr->rx_jmb[dest_idx].std; 4514 dest_desc = &dpr->rx_jmb[dest_idx].std;
4476 dest_map = &tpr->rx_jmb_buffers[dest_idx]; 4515 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4477 src_desc = &tpr->rx_jmb[src_idx].std; 4516 src_desc = &spr->rx_jmb[src_idx].std;
4478 src_map = &tpr->rx_jmb_buffers[src_idx]; 4517 src_map = &spr->rx_jmb_buffers[src_idx];
4479 break; 4518 break;
4480 4519
4481 default: 4520 default:
@@ -4487,7 +4526,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4487 pci_unmap_addr(src_map, mapping)); 4526 pci_unmap_addr(src_map, mapping));
4488 dest_desc->addr_hi = src_desc->addr_hi; 4527 dest_desc->addr_hi = src_desc->addr_hi;
4489 dest_desc->addr_lo = src_desc->addr_lo; 4528 dest_desc->addr_lo = src_desc->addr_lo;
4490
4491 src_map->skb = NULL; 4529 src_map->skb = NULL;
4492} 4530}
4493 4531
@@ -4519,10 +4557,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4519{ 4557{
4520 struct tg3 *tp = tnapi->tp; 4558 struct tg3 *tp = tnapi->tp;
4521 u32 work_mask, rx_std_posted = 0; 4559 u32 work_mask, rx_std_posted = 0;
4560 u32 std_prod_idx, jmb_prod_idx;
4522 u32 sw_idx = tnapi->rx_rcb_ptr; 4561 u32 sw_idx = tnapi->rx_rcb_ptr;
4523 u16 hw_idx; 4562 u16 hw_idx;
4524 int received; 4563 int received;
4525 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4564 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4526 4565
4527 hw_idx = *(tnapi->rx_rcb_prod_idx); 4566 hw_idx = *(tnapi->rx_rcb_prod_idx);
4528 /* 4567 /*
@@ -4532,7 +4571,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4532 rmb(); 4571 rmb();
4533 work_mask = 0; 4572 work_mask = 0;
4534 received = 0; 4573 received = 0;
4574 std_prod_idx = tpr->rx_std_prod_idx;
4575 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4535 while (sw_idx != hw_idx && budget > 0) { 4576 while (sw_idx != hw_idx && budget > 0) {
4577 struct ring_info *ri;
4536 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 4578 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4537 unsigned int len; 4579 unsigned int len;
4538 struct sk_buff *skb; 4580 struct sk_buff *skb;
@@ -4542,16 +4584,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4542 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4584 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4543 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4585 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4544 if (opaque_key == RXD_OPAQUE_RING_STD) { 4586 if (opaque_key == RXD_OPAQUE_RING_STD) {
4545 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; 4587 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4546 dma_addr = pci_unmap_addr(ri, mapping); 4588 dma_addr = pci_unmap_addr(ri, mapping);
4547 skb = ri->skb; 4589 skb = ri->skb;
4548 post_ptr = &tpr->rx_std_ptr; 4590 post_ptr = &std_prod_idx;
4549 rx_std_posted++; 4591 rx_std_posted++;
4550 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4592 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4551 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; 4593 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4552 dma_addr = pci_unmap_addr(ri, mapping); 4594 dma_addr = pci_unmap_addr(ri, mapping);
4553 skb = ri->skb; 4595 skb = ri->skb;
4554 post_ptr = &tpr->rx_jmb_ptr; 4596 post_ptr = &jmb_prod_idx;
4555 } else 4597 } else
4556 goto next_pkt_nopost; 4598 goto next_pkt_nopost;
4557 4599
@@ -4560,7 +4602,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4560 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 4602 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4561 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 4603 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4562 drop_it: 4604 drop_it:
4563 tg3_recycle_rx(tnapi, opaque_key, 4605 tg3_recycle_rx(tnapi, tpr, opaque_key,
4564 desc_idx, *post_ptr); 4606 desc_idx, *post_ptr);
4565 drop_it_no_recycle: 4607 drop_it_no_recycle:
4566 /* Other statistics kept track of by card. */ 4608 /* Other statistics kept track of by card. */
@@ -4580,11 +4622,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4580 ) { 4622 ) {
4581 int skb_size; 4623 int skb_size;
4582 4624
4583 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, 4625 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4584 desc_idx, *post_ptr); 4626 *post_ptr);
4585 if (skb_size < 0) 4627 if (skb_size < 0)
4586 goto drop_it; 4628 goto drop_it;
4587 4629
4630 ri->skb = NULL;
4631
4588 pci_unmap_single(tp->pdev, dma_addr, skb_size, 4632 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4589 PCI_DMA_FROMDEVICE); 4633 PCI_DMA_FROMDEVICE);
4590 4634
@@ -4592,7 +4636,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4592 } else { 4636 } else {
4593 struct sk_buff *copy_skb; 4637 struct sk_buff *copy_skb;
4594 4638
4595 tg3_recycle_rx(tnapi, opaque_key, 4639 tg3_recycle_rx(tnapi, tpr, opaque_key,
4596 desc_idx, *post_ptr); 4640 desc_idx, *post_ptr);
4597 4641
4598 copy_skb = netdev_alloc_skb(tp->dev, 4642 copy_skb = netdev_alloc_skb(tp->dev,
@@ -4643,9 +4687,7 @@ next_pkt:
4643 4687
4644 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4688 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4645 u32 idx = *post_ptr % TG3_RX_RING_SIZE; 4689 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4646 4690 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
4647 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4648 TG3_64BIT_REG_LOW, idx);
4649 work_mask &= ~RXD_OPAQUE_RING_STD; 4691 work_mask &= ~RXD_OPAQUE_RING_STD;
4650 rx_std_posted = 0; 4692 rx_std_posted = 0;
4651 } 4693 }
@@ -4665,33 +4707,45 @@ next_pkt_nopost:
4665 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4707 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4666 4708
4667 /* Refill RX ring(s). */ 4709 /* Refill RX ring(s). */
4668 if (work_mask & RXD_OPAQUE_RING_STD) { 4710 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4669 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; 4711 if (work_mask & RXD_OPAQUE_RING_STD) {
4670 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 4712 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4671 sw_idx); 4713 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4672 } 4714 tpr->rx_std_prod_idx);
4673 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4715 }
4674 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; 4716 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4675 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 4717 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4676 sw_idx); 4718 TG3_RX_JUMBO_RING_SIZE;
4719 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4720 tpr->rx_jmb_prod_idx);
4721 }
4722 mmiowb();
4723 } else if (work_mask) {
4724 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4725 * updated before the producer indices can be updated.
4726 */
4727 smp_wmb();
4728
4729 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4730 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4731
4732 napi_schedule(&tp->napi[1].napi);
4677 } 4733 }
4678 mmiowb();
4679 4734
4680 return received; 4735 return received;
4681} 4736}
4682 4737
4683static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4738static void tg3_poll_link(struct tg3 *tp)
4684{ 4739{
4685 struct tg3 *tp = tnapi->tp;
4686 struct tg3_hw_status *sblk = tnapi->hw_status;
4687
4688 /* handle link change and other phy events */ 4740 /* handle link change and other phy events */
4689 if (!(tp->tg3_flags & 4741 if (!(tp->tg3_flags &
4690 (TG3_FLAG_USE_LINKCHG_REG | 4742 (TG3_FLAG_USE_LINKCHG_REG |
4691 TG3_FLAG_POLL_SERDES))) { 4743 TG3_FLAG_POLL_SERDES))) {
4744 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4745
4692 if (sblk->status & SD_STATUS_LINK_CHG) { 4746 if (sblk->status & SD_STATUS_LINK_CHG) {
4693 sblk->status = SD_STATUS_UPDATED | 4747 sblk->status = SD_STATUS_UPDATED |
4694 (sblk->status & ~SD_STATUS_LINK_CHG); 4748 (sblk->status & ~SD_STATUS_LINK_CHG);
4695 spin_lock(&tp->lock); 4749 spin_lock(&tp->lock);
4696 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 4750 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4697 tw32_f(MAC_STATUS, 4751 tw32_f(MAC_STATUS,
@@ -4705,6 +4759,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4705 spin_unlock(&tp->lock); 4759 spin_unlock(&tp->lock);
4706 } 4760 }
4707 } 4761 }
4762}
4763
4764static void tg3_rx_prodring_xfer(struct tg3 *tp,
4765 struct tg3_rx_prodring_set *dpr,
4766 struct tg3_rx_prodring_set *spr)
4767{
4768 u32 si, di, cpycnt, src_prod_idx;
4769 int i;
4770
4771 while (1) {
4772 src_prod_idx = spr->rx_std_prod_idx;
4773
4774 /* Make sure updates to the rx_std_buffers[] entries and the
4775 * standard producer index are seen in the correct order.
4776 */
4777 smp_rmb();
4778
4779 if (spr->rx_std_cons_idx == src_prod_idx)
4780 break;
4781
4782 if (spr->rx_std_cons_idx < src_prod_idx)
4783 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4784 else
4785 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4786
4787 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4788
4789 si = spr->rx_std_cons_idx;
4790 di = dpr->rx_std_prod_idx;
4791
4792 memcpy(&dpr->rx_std_buffers[di],
4793 &spr->rx_std_buffers[si],
4794 cpycnt * sizeof(struct ring_info));
4795
4796 for (i = 0; i < cpycnt; i++, di++, si++) {
4797 struct tg3_rx_buffer_desc *sbd, *dbd;
4798 sbd = &spr->rx_std[si];
4799 dbd = &dpr->rx_std[di];
4800 dbd->addr_hi = sbd->addr_hi;
4801 dbd->addr_lo = sbd->addr_lo;
4802 }
4803
4804 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4805 TG3_RX_RING_SIZE;
4806 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4807 TG3_RX_RING_SIZE;
4808 }
4809
4810 while (1) {
4811 src_prod_idx = spr->rx_jmb_prod_idx;
4812
4813 /* Make sure updates to the rx_jmb_buffers[] entries and
4814 * the jumbo producer index are seen in the correct order.
4815 */
4816 smp_rmb();
4817
4818 if (spr->rx_jmb_cons_idx == src_prod_idx)
4819 break;
4820
4821 if (spr->rx_jmb_cons_idx < src_prod_idx)
4822 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4823 else
4824 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4825
4826 cpycnt = min(cpycnt,
4827 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4828
4829 si = spr->rx_jmb_cons_idx;
4830 di = dpr->rx_jmb_prod_idx;
4831
4832 memcpy(&dpr->rx_jmb_buffers[di],
4833 &spr->rx_jmb_buffers[si],
4834 cpycnt * sizeof(struct ring_info));
4835
4836 for (i = 0; i < cpycnt; i++, di++, si++) {
4837 struct tg3_rx_buffer_desc *sbd, *dbd;
4838 sbd = &spr->rx_jmb[si].std;
4839 dbd = &dpr->rx_jmb[di].std;
4840 dbd->addr_hi = sbd->addr_hi;
4841 dbd->addr_lo = sbd->addr_lo;
4842 }
4843
4844 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4845 TG3_RX_JUMBO_RING_SIZE;
4846 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4847 TG3_RX_JUMBO_RING_SIZE;
4848 }
4849}
4850
4851static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4852{
4853 struct tg3 *tp = tnapi->tp;
4708 4854
4709 /* run TX completion thread */ 4855 /* run TX completion thread */
4710 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 4856 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
@@ -4720,6 +4866,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4720 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 4866 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4721 work_done += tg3_rx(tnapi, budget - work_done); 4867 work_done += tg3_rx(tnapi, budget - work_done);
4722 4868
4869 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4870 int i;
4871 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4872 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4873
4874 for (i = 2; i < tp->irq_cnt; i++)
4875 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4876 tp->napi[i].prodring);
4877
4878 wmb();
4879
4880 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4881 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4882 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4883 }
4884
4885 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4886 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4887 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4888 }
4889
4890 mmiowb();
4891 }
4892
4893 return work_done;
4894}
4895
4896static int tg3_poll_msix(struct napi_struct *napi, int budget)
4897{
4898 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4899 struct tg3 *tp = tnapi->tp;
4900 int work_done = 0;
4901 struct tg3_hw_status *sblk = tnapi->hw_status;
4902
4903 while (1) {
4904 work_done = tg3_poll_work(tnapi, work_done, budget);
4905
4906 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4907 goto tx_recovery;
4908
4909 if (unlikely(work_done >= budget))
4910 break;
4911
4912 /* tp->last_tag is used in tg3_restart_ints() below
4913 * to tell the hw how much work has been processed,
4914 * so we must read it before checking for more work.
4915 */
4916 tnapi->last_tag = sblk->status_tag;
4917 tnapi->last_irq_tag = tnapi->last_tag;
4918 rmb();
4919
4920 /* check for RX/TX work to do */
4921 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4922 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4923 napi_complete(napi);
4924 /* Reenable interrupts. */
4925 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4926 mmiowb();
4927 break;
4928 }
4929 }
4930
4931 return work_done;
4932
4933tx_recovery:
4934 /* work_done is guaranteed to be less than budget. */
4935 napi_complete(napi);
4936 schedule_work(&tp->reset_task);
4723 return work_done; 4937 return work_done;
4724} 4938}
4725 4939
@@ -4731,6 +4945,8 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4731 struct tg3_hw_status *sblk = tnapi->hw_status; 4945 struct tg3_hw_status *sblk = tnapi->hw_status;
4732 4946
4733 while (1) { 4947 while (1) {
4948 tg3_poll_link(tp);
4949
4734 work_done = tg3_poll_work(tnapi, work_done, budget); 4950 work_done = tg3_poll_work(tnapi, work_done, budget);
4735 4951
4736 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4952 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
@@ -5093,11 +5309,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5093static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); 5309static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5094 5310
5095/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5311/* Workaround 4GB and 40-bit hardware DMA bugs. */
5096static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 5312static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5097 u32 last_plus_one, u32 *start, 5313 struct sk_buff *skb, u32 last_plus_one,
5098 u32 base_flags, u32 mss) 5314 u32 *start, u32 base_flags, u32 mss)
5099{ 5315{
5100 struct tg3_napi *tnapi = &tp->napi[0]; 5316 struct tg3 *tp = tnapi->tp;
5101 struct sk_buff *new_skb; 5317 struct sk_buff *new_skb;
5102 dma_addr_t new_addr = 0; 5318 dma_addr_t new_addr = 0;
5103 u32 entry = *start; 5319 u32 entry = *start;
@@ -5124,7 +5340,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5124 /* Make sure new skb does not cross any 4G boundaries. 5340 /* Make sure new skb does not cross any 4G boundaries.
5125 * Drop the packet if it does. 5341 * Drop the packet if it does.
5126 */ 5342 */
5127 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { 5343 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5344 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5128 if (!ret) 5345 if (!ret)
5129 skb_dma_unmap(&tp->pdev->dev, new_skb, 5346 skb_dma_unmap(&tp->pdev->dev, new_skb,
5130 DMA_TO_DEVICE); 5347 DMA_TO_DEVICE);
@@ -5179,7 +5396,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5179} 5396}
5180 5397
5181/* hard_start_xmit for devices that don't have any bugs and 5398/* hard_start_xmit for devices that don't have any bugs and
5182 * support TG3_FLG2_HW_TSO_2 only. 5399 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5183 */ 5400 */
5184static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5401static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5185 struct net_device *dev) 5402 struct net_device *dev)
@@ -5238,7 +5455,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5238 hdrlen = ip_tcp_len + tcp_opt_len; 5455 hdrlen = ip_tcp_len + tcp_opt_len;
5239 } 5456 }
5240 5457
5241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 5458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5242 mss |= (hdrlen & 0xc) << 12; 5459 mss |= (hdrlen & 0xc) << 12;
5243 if (hdrlen & 0x10) 5460 if (hdrlen & 0x10)
5244 base_flags |= 0x00000010; 5461 base_flags |= 0x00000010;
@@ -5365,9 +5582,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5365 struct skb_shared_info *sp; 5582 struct skb_shared_info *sp;
5366 int would_hit_hwbug; 5583 int would_hit_hwbug;
5367 dma_addr_t mapping; 5584 dma_addr_t mapping;
5368 struct tg3_napi *tnapi = &tp->napi[0]; 5585 struct tg3_napi *tnapi;
5586 struct netdev_queue *txq;
5369 5587
5370 len = skb_headlen(skb); 5588 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5589 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5590 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5591 tnapi++;
5371 5592
5372 /* We are running in BH disabled context with netif_tx_lock 5593 /* We are running in BH disabled context with netif_tx_lock
5373 * and TX reclaim runs via tp->napi.poll inside of a software 5594 * and TX reclaim runs via tp->napi.poll inside of a software
@@ -5375,8 +5596,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5375 * no IRQ context deadlocks to worry about either. Rejoice! 5596 * no IRQ context deadlocks to worry about either. Rejoice!
5376 */ 5597 */
5377 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5598 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5378 if (!netif_queue_stopped(dev)) { 5599 if (!netif_tx_queue_stopped(txq)) {
5379 netif_stop_queue(dev); 5600 netif_tx_stop_queue(txq);
5380 5601
5381 /* This is a hard error, log it. */ 5602 /* This is a hard error, log it. */
5382 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5603 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5389,10 +5610,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5389 base_flags = 0; 5610 base_flags = 0;
5390 if (skb->ip_summed == CHECKSUM_PARTIAL) 5611 if (skb->ip_summed == CHECKSUM_PARTIAL)
5391 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5612 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5392 mss = 0; 5613
5393 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5614 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5394 struct iphdr *iph; 5615 struct iphdr *iph;
5395 int tcp_opt_len, ip_tcp_len, hdr_len; 5616 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5396 5617
5397 if (skb_header_cloned(skb) && 5618 if (skb_header_cloned(skb) &&
5398 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 5619 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,8 +5644,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5423 IPPROTO_TCP, 5644 IPPROTO_TCP,
5424 0); 5645 0);
5425 5646
5426 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 5647 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5427 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { 5648 mss |= (hdr_len & 0xc) << 12;
5649 if (hdr_len & 0x10)
5650 base_flags |= 0x00000010;
5651 base_flags |= (hdr_len & 0x3e0) << 5;
5652 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5653 mss |= hdr_len << 9;
5654 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5428 if (tcp_opt_len || iph->ihl > 5) { 5656 if (tcp_opt_len || iph->ihl > 5) {
5429 int tsflags; 5657 int tsflags;
5430 5658
@@ -5446,6 +5674,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5446 (vlan_tx_tag_get(skb) << 16)); 5674 (vlan_tx_tag_get(skb) << 16));
5447#endif 5675#endif
5448 5676
5677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5678 !mss && skb->len > ETH_DATA_LEN)
5679 base_flags |= TXD_FLAG_JMB_PKT;
5680
5449 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 5681 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5450 dev_kfree_skb(skb); 5682 dev_kfree_skb(skb);
5451 goto out_unlock; 5683 goto out_unlock;
@@ -5459,9 +5691,20 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5459 5691
5460 would_hit_hwbug = 0; 5692 would_hit_hwbug = 0;
5461 5693
5462 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) 5694 len = skb_headlen(skb);
5695
5696 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5463 would_hit_hwbug = 1; 5697 would_hit_hwbug = 1;
5464 else if (tg3_4g_overflow_test(mapping, len)) 5698
5699 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5700 tg3_4g_overflow_test(mapping, len))
5701 would_hit_hwbug = 1;
5702
5703 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5704 tg3_40bit_overflow_test(tp, mapping, len))
5705 would_hit_hwbug = 1;
5706
5707 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5465 would_hit_hwbug = 1; 5708 would_hit_hwbug = 1;
5466 5709
5467 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5710 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5482,10 +5725,16 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5482 5725
5483 tnapi->tx_buffers[entry].skb = NULL; 5726 tnapi->tx_buffers[entry].skb = NULL;
5484 5727
5485 if (tg3_4g_overflow_test(mapping, len)) 5728 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5729 len <= 8)
5486 would_hit_hwbug = 1; 5730 would_hit_hwbug = 1;
5487 5731
5488 if (tg3_40bit_overflow_test(tp, mapping, len)) 5732 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5733 tg3_4g_overflow_test(mapping, len))
5734 would_hit_hwbug = 1;
5735
5736 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5737 tg3_40bit_overflow_test(tp, mapping, len))
5489 would_hit_hwbug = 1; 5738 would_hit_hwbug = 1;
5490 5739
5491 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5740 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
@@ -5509,7 +5758,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5509 /* If the workaround fails due to memory/mapping 5758 /* If the workaround fails due to memory/mapping
5510 * failure, silently drop this packet. 5759 * failure, silently drop this packet.
5511 */ 5760 */
5512 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, 5761 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5513 &start, base_flags, mss)) 5762 &start, base_flags, mss))
5514 goto out_unlock; 5763 goto out_unlock;
5515 5764
@@ -5517,13 +5766,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5517 } 5766 }
5518 5767
5519 /* Packets are ready, update Tx producer idx local and on card. */ 5768 /* Packets are ready, update Tx producer idx local and on card. */
5520 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); 5769 tw32_tx_mbox(tnapi->prodmbox, entry);
5521 5770
5522 tnapi->tx_prod = entry; 5771 tnapi->tx_prod = entry;
5523 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5772 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5524 netif_stop_queue(dev); 5773 netif_tx_stop_queue(txq);
5525 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5774 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5526 netif_wake_queue(tp->dev); 5775 netif_tx_wake_queue(txq);
5527 } 5776 }
5528 5777
5529out_unlock: 5778out_unlock:
@@ -5594,36 +5843,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
5594 struct tg3_rx_prodring_set *tpr) 5843 struct tg3_rx_prodring_set *tpr)
5595{ 5844{
5596 int i; 5845 int i;
5597 struct ring_info *rxp;
5598
5599 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5600 rxp = &tpr->rx_std_buffers[i];
5601 5846
5602 if (rxp->skb == NULL) 5847 if (tpr != &tp->prodring[0]) {
5603 continue; 5848 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5849 i = (i + 1) % TG3_RX_RING_SIZE)
5850 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5851 tp->rx_pkt_map_sz);
5852
5853 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5854 for (i = tpr->rx_jmb_cons_idx;
5855 i != tpr->rx_jmb_prod_idx;
5856 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5857 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5858 TG3_RX_JMB_MAP_SZ);
5859 }
5860 }
5604 5861
5605 pci_unmap_single(tp->pdev, 5862 return;
5606 pci_unmap_addr(rxp, mapping),
5607 tp->rx_pkt_map_sz,
5608 PCI_DMA_FROMDEVICE);
5609 dev_kfree_skb_any(rxp->skb);
5610 rxp->skb = NULL;
5611 } 5863 }
5612 5864
5613 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 5865 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5614 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 5866 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5615 rxp = &tpr->rx_jmb_buffers[i]; 5867 tp->rx_pkt_map_sz);
5616 5868
5617 if (rxp->skb == NULL) 5869 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5618 continue; 5870 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
5619 5871 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5620 pci_unmap_single(tp->pdev, 5872 TG3_RX_JMB_MAP_SZ);
5621 pci_unmap_addr(rxp, mapping),
5622 TG3_RX_JMB_MAP_SZ,
5623 PCI_DMA_FROMDEVICE);
5624 dev_kfree_skb_any(rxp->skb);
5625 rxp->skb = NULL;
5626 }
5627 } 5873 }
5628} 5874}
5629 5875
@@ -5638,7 +5884,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5638 struct tg3_rx_prodring_set *tpr) 5884 struct tg3_rx_prodring_set *tpr)
5639{ 5885{
5640 u32 i, rx_pkt_dma_sz; 5886 u32 i, rx_pkt_dma_sz;
5641 struct tg3_napi *tnapi = &tp->napi[0]; 5887
5888 tpr->rx_std_cons_idx = 0;
5889 tpr->rx_std_prod_idx = 0;
5890 tpr->rx_jmb_cons_idx = 0;
5891 tpr->rx_jmb_prod_idx = 0;
5892
5893 if (tpr != &tp->prodring[0]) {
5894 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5895 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
5896 memset(&tpr->rx_jmb_buffers[0], 0,
5897 TG3_RX_JMB_BUFF_RING_SIZE);
5898 goto done;
5899 }
5642 5900
5643 /* Zero out all descriptors. */ 5901 /* Zero out all descriptors. */
5644 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 5902 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5665,7 +5923,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5665 5923
5666 /* Now allocate fresh SKBs for each rx ring. */ 5924 /* Now allocate fresh SKBs for each rx ring. */
5667 for (i = 0; i < tp->rx_pending; i++) { 5925 for (i = 0; i < tp->rx_pending; i++) {
5668 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { 5926 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5669 printk(KERN_WARNING PFX 5927 printk(KERN_WARNING PFX
5670 "%s: Using a smaller RX standard ring, " 5928 "%s: Using a smaller RX standard ring, "
5671 "only %d out of %d buffers were allocated " 5929 "only %d out of %d buffers were allocated "
@@ -5696,8 +5954,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5696 } 5954 }
5697 5955
5698 for (i = 0; i < tp->rx_jumbo_pending; i++) { 5956 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5699 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, 5957 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
5700 -1, i) < 0) { 5958 i) < 0) {
5701 printk(KERN_WARNING PFX 5959 printk(KERN_WARNING PFX
5702 "%s: Using a smaller RX jumbo ring, " 5960 "%s: Using a smaller RX jumbo ring, "
5703 "only %d out of %d buffers were " 5961 "only %d out of %d buffers were "
@@ -5741,8 +5999,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
5741static int tg3_rx_prodring_init(struct tg3 *tp, 5999static int tg3_rx_prodring_init(struct tg3 *tp,
5742 struct tg3_rx_prodring_set *tpr) 6000 struct tg3_rx_prodring_set *tpr)
5743{ 6001{
5744 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * 6002 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
5745 TG3_RX_RING_SIZE, GFP_KERNEL);
5746 if (!tpr->rx_std_buffers) 6003 if (!tpr->rx_std_buffers)
5747 return -ENOMEM; 6004 return -ENOMEM;
5748 6005
@@ -5752,8 +6009,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
5752 goto err_out; 6009 goto err_out;
5753 6010
5754 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6011 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5755 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * 6012 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
5756 TG3_RX_JUMBO_RING_SIZE,
5757 GFP_KERNEL); 6013 GFP_KERNEL);
5758 if (!tpr->rx_jmb_buffers) 6014 if (!tpr->rx_jmb_buffers)
5759 goto err_out; 6015 goto err_out;
@@ -5809,9 +6065,10 @@ static void tg3_free_rings(struct tg3 *tp)
5809 6065
5810 dev_kfree_skb_any(skb); 6066 dev_kfree_skb_any(skb);
5811 } 6067 }
5812 }
5813 6068
5814 tg3_rx_prodring_free(tp, &tp->prodring[0]); 6069 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
6070 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6071 }
5815} 6072}
5816 6073
5817/* Initialize tx/rx rings for packet processing. 6074/* Initialize tx/rx rings for packet processing.
@@ -5845,9 +6102,13 @@ static int tg3_init_rings(struct tg3 *tp)
5845 tnapi->rx_rcb_ptr = 0; 6102 tnapi->rx_rcb_ptr = 0;
5846 if (tnapi->rx_rcb) 6103 if (tnapi->rx_rcb)
5847 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6104 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6105
6106 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
6107 tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
6108 return -ENOMEM;
5848 } 6109 }
5849 6110
5850 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); 6111 return 0;
5851} 6112}
5852 6113
5853/* 6114/*
@@ -5891,7 +6152,8 @@ static void tg3_free_consistent(struct tg3 *tp)
5891 tp->hw_stats = NULL; 6152 tp->hw_stats = NULL;
5892 } 6153 }
5893 6154
5894 tg3_rx_prodring_fini(tp, &tp->prodring[0]); 6155 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
6156 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
5895} 6157}
5896 6158
5897/* 6159/*
@@ -5902,8 +6164,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5902{ 6164{
5903 int i; 6165 int i;
5904 6166
5905 if (tg3_rx_prodring_init(tp, &tp->prodring[0])) 6167 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
5906 return -ENOMEM; 6168 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6169 goto err_out;
6170 }
5907 6171
5908 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6172 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5909 sizeof(struct tg3_hw_stats), 6173 sizeof(struct tg3_hw_stats),
@@ -5947,6 +6211,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5947 break; 6211 break;
5948 } 6212 }
5949 6213
6214 if (tp->irq_cnt == 1)
6215 tnapi->prodring = &tp->prodring[0];
6216 else if (i)
6217 tnapi->prodring = &tp->prodring[i - 1];
6218
5950 /* 6219 /*
5951 * If multivector RSS is enabled, vector 0 does not handle 6220 * If multivector RSS is enabled, vector 0 does not handle
5952 * rx or tx interrupts. Don't allocate any resources for it. 6221 * rx or tx interrupts. Don't allocate any resources for it.
@@ -6580,6 +6849,30 @@ static int tg3_chip_reset(struct tg3 *tp)
6580 6849
6581 tg3_mdio_start(tp); 6850 tg3_mdio_start(tp);
6582 6851
6852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6853 u8 phy_addr;
6854
6855 phy_addr = tp->phy_addr;
6856 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6857
6858 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6859 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6860 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6861 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6862 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6863 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6864 udelay(10);
6865
6866 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6867 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6868 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6869 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6870 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6871 udelay(10);
6872
6873 tp->phy_addr = phy_addr;
6874 }
6875
6583 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 6876 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6584 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 6877 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6585 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 6878 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
@@ -7162,15 +7455,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7162 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 7455 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7163 7456
7164 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 7457 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7165 }
7166 7458
7167 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { 7459 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7168 val = tr32(TG3_PCIE_LNKCTL); 7460 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7169 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
7170 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7171 else
7172 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7173 tw32(TG3_PCIE_LNKCTL, val);
7174 } 7461 }
7175 7462
7176 /* This works around an issue with Athlon chipsets on 7463 /* This works around an issue with Athlon chipsets on
@@ -7217,9 +7504,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7217 if (err) 7504 if (err)
7218 return err; 7505 return err;
7219 7506
7220 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7221 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && 7508 val = tr32(TG3PCI_DMA_RW_CTRL) &
7222 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { 7509 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7510 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7511 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7512 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7223 /* This value is determined during the probe time DMA 7513 /* This value is determined during the probe time DMA
7224 * engine test, tg3_test_dma. 7514 * engine test, tg3_test_dma.
7225 */ 7515 */
@@ -7342,8 +7632,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7342 ((u64) tpr->rx_std_mapping >> 32)); 7632 ((u64) tpr->rx_std_mapping >> 32));
7343 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7633 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7344 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7634 ((u64) tpr->rx_std_mapping & 0xffffffff));
7345 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7635 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7346 NIC_SRAM_RX_BUFFER_DESC); 7636 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7637 NIC_SRAM_RX_BUFFER_DESC);
7347 7638
7348 /* Disable the mini ring */ 7639 /* Disable the mini ring */
7349 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7640 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
@@ -7366,8 +7657,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7366 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7657 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7367 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7658 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7368 BDINFO_FLAGS_USE_EXT_RECV); 7659 BDINFO_FLAGS_USE_EXT_RECV);
7369 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7370 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7661 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7662 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7371 } else { 7663 } else {
7372 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7664 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7373 BDINFO_FLAGS_DISABLED); 7665 BDINFO_FLAGS_DISABLED);
@@ -7383,14 +7675,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7383 7675
7384 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 7676 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7385 7677
7386 tpr->rx_std_ptr = tp->rx_pending; 7678 tpr->rx_std_prod_idx = tp->rx_pending;
7387 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 7679 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7388 tpr->rx_std_ptr);
7389 7680
7390 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7681 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7391 tp->rx_jumbo_pending : 0; 7682 tp->rx_jumbo_pending : 0;
7392 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 7683 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7393 tpr->rx_jmb_ptr);
7394 7684
7395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 7685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7396 tw32(STD_REPLENISH_LWM, 32); 7686 tw32(STD_REPLENISH_LWM, 32);
@@ -7453,7 +7743,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7453 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7743 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7454 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 7744 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7455 7745
7456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7746 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7458 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 7749 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7459 7750
@@ -7602,6 +7893,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7602 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 7893 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7603 val |= WDMAC_MODE_STATUS_TAG_FIX; 7894 val |= WDMAC_MODE_STATUS_TAG_FIX;
7604 7895
7896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7897 val |= WDMAC_MODE_BURST_ALL_DATA;
7898
7605 tw32_f(WDMAC_MODE, val); 7899 tw32_f(WDMAC_MODE, val);
7606 udelay(40); 7900 udelay(40);
7607 7901
@@ -9240,9 +9534,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9240 struct tg3 *tp = netdev_priv(dev); 9534 struct tg3 *tp = netdev_priv(dev);
9241 9535
9242 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9536 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9537 struct phy_device *phydev;
9243 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9538 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9244 return -EAGAIN; 9539 return -EAGAIN;
9245 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9540 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9541 return phy_ethtool_gset(phydev, cmd);
9246 } 9542 }
9247 9543
9248 cmd->supported = (SUPPORTED_Autoneg); 9544 cmd->supported = (SUPPORTED_Autoneg);
@@ -9281,9 +9577,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9281 struct tg3 *tp = netdev_priv(dev); 9577 struct tg3 *tp = netdev_priv(dev);
9282 9578
9283 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9579 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9580 struct phy_device *phydev;
9284 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9581 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9285 return -EAGAIN; 9582 return -EAGAIN;
9286 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9583 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9584 return phy_ethtool_sset(phydev, cmd);
9287 } 9585 }
9288 9586
9289 if (cmd->autoneg != AUTONEG_ENABLE && 9587 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -9436,15 +9734,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
9436 return 0; 9734 return 0;
9437 } 9735 }
9438 if ((dev->features & NETIF_F_IPV6_CSUM) && 9736 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9439 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { 9737 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9738 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9440 if (value) { 9739 if (value) {
9441 dev->features |= NETIF_F_TSO6; 9740 dev->features |= NETIF_F_TSO6;
9442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 9741 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9443 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 9743 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9444 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 9744 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 9745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 9746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9448 dev->features |= NETIF_F_TSO_ECN; 9747 dev->features |= NETIF_F_TSO_ECN;
9449 } else 9748 } else
9450 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9749 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -9466,7 +9765,7 @@ static int tg3_nway_reset(struct net_device *dev)
9466 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9765 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9467 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9766 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9468 return -EAGAIN; 9767 return -EAGAIN;
9469 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); 9768 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9470 } else { 9769 } else {
9471 u32 bmcr; 9770 u32 bmcr;
9472 9771
@@ -9585,7 +9884,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9585 u32 newadv; 9884 u32 newadv;
9586 struct phy_device *phydev; 9885 struct phy_device *phydev;
9587 9886
9588 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 9887 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9589 9888
9590 if (epause->rx_pause) { 9889 if (epause->rx_pause) {
9591 if (epause->tx_pause) 9890 if (epause->tx_pause)
@@ -10338,7 +10637,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10338 for (i = 14; i < tx_len; i++) 10637 for (i = 14; i < tx_len; i++)
10339 tx_data[i] = (u8) (i & 0xff); 10638 tx_data[i] = (u8) (i & 0xff);
10340 10639
10341 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 10640 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10641 dev_kfree_skb(skb);
10642 return -EIO;
10643 }
10342 10644
10343 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10645 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10344 rnapi->coal_now); 10646 rnapi->coal_now);
@@ -10349,7 +10651,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10349 10651
10350 num_pkts = 0; 10652 num_pkts = 0;
10351 10653
10352 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); 10654 tg3_set_txd(tnapi, tnapi->tx_prod,
10655 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10353 10656
10354 tnapi->tx_prod++; 10657 tnapi->tx_prod++;
10355 num_pkts++; 10658 num_pkts++;
@@ -10359,8 +10662,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10359 10662
10360 udelay(10); 10663 udelay(10);
10361 10664
10362 /* 250 usec to allow enough time on some 10/100 Mbps devices. */ 10665 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10363 for (i = 0; i < 25; i++) { 10666 for (i = 0; i < 35; i++) {
10364 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10667 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10365 coal_now); 10668 coal_now);
10366 10669
@@ -10373,7 +10676,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10373 break; 10676 break;
10374 } 10677 }
10375 10678
10376 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 10679 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10377 dev_kfree_skb(skb); 10680 dev_kfree_skb(skb);
10378 10681
10379 if (tx_idx != tnapi->tx_prod) 10682 if (tx_idx != tnapi->tx_prod)
@@ -10565,9 +10868,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10565 int err; 10868 int err;
10566 10869
10567 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10870 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10871 struct phy_device *phydev;
10568 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 10872 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10569 return -EAGAIN; 10873 return -EAGAIN;
10570 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); 10874 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10875 return phy_mii_ioctl(phydev, data, cmd);
10571 } 10876 }
10572 10877
10573 switch(cmd) { 10878 switch(cmd) {
@@ -10887,7 +11192,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10887 11192
10888 /* NVRAM protection for TPM */ 11193 /* NVRAM protection for TPM */
10889 if (nvcfg1 & (1 << 27)) 11194 if (nvcfg1 & (1 << 27))
10890 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11195 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10891 11196
10892 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11197 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10893 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 11198 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
@@ -10928,7 +11233,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10928 11233
10929 /* NVRAM protection for TPM */ 11234 /* NVRAM protection for TPM */
10930 if (nvcfg1 & (1 << 27)) { 11235 if (nvcfg1 & (1 << 27)) {
10931 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11236 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10932 protect = 1; 11237 protect = 1;
10933 } 11238 }
10934 11239
@@ -11022,7 +11327,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11022 11327
11023 /* NVRAM protection for TPM */ 11328 /* NVRAM protection for TPM */
11024 if (nvcfg1 & (1 << 27)) { 11329 if (nvcfg1 & (1 << 27)) {
11025 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11330 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11026 protect = 1; 11331 protect = 1;
11027 } 11332 }
11028 11333
@@ -11524,7 +11829,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11524 11829
11525 tg3_enable_nvram_access(tp); 11830 tg3_enable_nvram_access(tp);
11526 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 11831 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11527 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) 11832 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11528 tw32(NVRAM_WRITE1, 0x406); 11833 tw32(NVRAM_WRITE1, 0x406);
11529 11834
11530 grc_mode = tr32(GRC_MODE); 11835 grc_mode = tr32(GRC_MODE);
@@ -12400,10 +12705,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { 12705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12401 u32 prod_id_asic_rev; 12706 u32 prod_id_asic_rev;
12402 12707
12403 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || 12708 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12404 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || 12709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || 12710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12407 pci_read_config_dword(tp->pdev, 12711 pci_read_config_dword(tp->pdev,
12408 TG3PCI_GEN2_PRODID_ASICREV, 12712 TG3PCI_GEN2_PRODID_ASICREV,
12409 &prod_id_asic_rev); 12713 &prod_id_asic_rev);
@@ -12586,6 +12890,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12586 tp->dev->features |= NETIF_F_IPV6_CSUM; 12890 tp->dev->features |= NETIF_F_IPV6_CSUM;
12587 } 12891 }
12588 12892
12893 /* Determine TSO capabilities */
12894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12895 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12896 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12898 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12899 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12900 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12902 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12903 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12904 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12906 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12907 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12909 tp->fw_needed = FIRMWARE_TG3TSO5;
12910 else
12911 tp->fw_needed = FIRMWARE_TG3TSO;
12912 }
12913
12914 tp->irq_max = 1;
12915
12589 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 12916 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12590 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 12917 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12591 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 12918 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
@@ -12597,25 +12924,22 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12597 12924
12598 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 12925 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12600 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12601 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12927 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12602 } else {
12603 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12604 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12605 ASIC_REV_5750 &&
12606 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12607 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12608 } 12928 }
12609 }
12610 12929
12611 tp->irq_max = 1; 12930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12931 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12932 tp->irq_max = TG3_IRQ_MAX_VECS;
12933 }
12934 }
12612 12935
12613#ifdef TG3_NAPI 12936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12614 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 12937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12615 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 12938 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12616 tp->irq_max = TG3_IRQ_MAX_VECS; 12939 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12940 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12941 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12617 } 12942 }
12618#endif
12619 12943
12620 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 12944 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12621 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 12945 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
@@ -12926,11 +13250,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12927 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 13251 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12928 13252
12929 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12930 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12931 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12932 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12933
12934 err = tg3_mdio_init(tp); 13253 err = tg3_mdio_init(tp);
12935 if (err) 13254 if (err)
12936 return err; 13255 return err;
@@ -13220,6 +13539,11 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13220#endif 13539#endif
13221#endif 13540#endif
13222 13541
13542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13543 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13544 goto out;
13545 }
13546
13223 if (!goal) 13547 if (!goal)
13224 goto out; 13548 goto out;
13225 13549
@@ -13414,7 +13738,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13414{ 13738{
13415 dma_addr_t buf_dma; 13739 dma_addr_t buf_dma;
13416 u32 *buf, saved_dma_rwctrl; 13740 u32 *buf, saved_dma_rwctrl;
13417 int ret; 13741 int ret = 0;
13418 13742
13419 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 13743 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13420 if (!buf) { 13744 if (!buf) {
@@ -13427,6 +13751,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13427 13751
13428 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13752 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13429 13753
13754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13755 goto out;
13756
13430 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13757 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13431 /* DMA read watermark not used on PCIE */ 13758 /* DMA read watermark not used on PCIE */
13432 tp->dma_rwctrl |= 0x00180000; 13759 tp->dma_rwctrl |= 0x00180000;
@@ -13499,7 +13826,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13499 tg3_switch_clocks(tp); 13826 tg3_switch_clocks(tp);
13500#endif 13827#endif
13501 13828
13502 ret = 0;
13503 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13829 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13504 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 13830 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13505 goto out; 13831 goto out;
@@ -13678,6 +14004,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
13678 case PHY_ID_BCM5756: return "5722/5756"; 14004 case PHY_ID_BCM5756: return "5722/5756";
13679 case PHY_ID_BCM5906: return "5906"; 14005 case PHY_ID_BCM5906: return "5906";
13680 case PHY_ID_BCM5761: return "5761"; 14006 case PHY_ID_BCM5761: return "5761";
14007 case PHY_ID_BCM5717: return "5717";
13681 case PHY_ID_BCM8002: return "8002/serdes"; 14008 case PHY_ID_BCM8002: return "8002/serdes";
13682 case 0: return "serdes"; 14009 case 0: return "serdes";
13683 default: return "unknown"; 14010 default: return "unknown";
@@ -13919,51 +14246,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13919 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14246 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13920 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14247 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13921 14248
13922 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13923 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13924 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13925 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13926 struct tg3_napi *tnapi = &tp->napi[i];
13927
13928 tnapi->tp = tp;
13929 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
13930
13931 tnapi->int_mbox = intmbx;
13932 if (i < 4)
13933 intmbx += 0x8;
13934 else
13935 intmbx += 0x4;
13936
13937 tnapi->consmbox = rcvmbx;
13938 tnapi->prodmbox = sndmbx;
13939
13940 if (i)
13941 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
13942 else
13943 tnapi->coal_now = HOSTCC_MODE_NOW;
13944
13945 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
13946 break;
13947
13948 /*
13949 * If we support MSIX, we'll be using RSS. If we're using
13950 * RSS, the first vector only handles link interrupts and the
13951 * remaining vectors handle rx and tx interrupts. Reuse the
13952 * mailbox values for the next iteration. The values we setup
13953 * above are still useful for the single vectored mode.
13954 */
13955 if (!i)
13956 continue;
13957
13958 rcvmbx += 0x8;
13959
13960 if (sndmbx & 0x4)
13961 sndmbx -= 0x4;
13962 else
13963 sndmbx += 0xc;
13964 }
13965
13966 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13967 dev->ethtool_ops = &tg3_ethtool_ops; 14249 dev->ethtool_ops = &tg3_ethtool_ops;
13968 dev->watchdog_timeo = TG3_TX_TIMEOUT; 14250 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13969 dev->irq = pdev->irq; 14251 dev->irq = pdev->irq;
@@ -13975,8 +14257,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13975 goto err_out_iounmap; 14257 goto err_out_iounmap;
13976 } 14258 }
13977 14259
13978 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 14260 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
13979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 14261 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13980 dev->netdev_ops = &tg3_netdev_ops; 14262 dev->netdev_ops = &tg3_netdev_ops;
13981 else 14263 else
13982 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14264 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14023,46 +14305,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14023 14305
14024 tg3_init_bufmgr_config(tp); 14306 tg3_init_bufmgr_config(tp);
14025 14307
14026 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 14308 /* Selectively allow TSO based on operating conditions */
14027 tp->fw_needed = FIRMWARE_TG3; 14309 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14028 14310 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14029 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14030 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 14311 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14031 } 14312 else {
14032 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 14313 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 14314 tp->fw_needed = NULL;
14034 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
14035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14036 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
14037 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
14038 } else {
14039 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
14040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14041 tp->fw_needed = FIRMWARE_TG3TSO5;
14042 else
14043 tp->fw_needed = FIRMWARE_TG3TSO;
14044 } 14315 }
14045 14316
14317 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14318 tp->fw_needed = FIRMWARE_TG3;
14319
14046 /* TSO is on by default on chips that support hardware TSO. 14320 /* TSO is on by default on chips that support hardware TSO.
14047 * Firmware TSO on older chips gives lower performance, so it 14321 * Firmware TSO on older chips gives lower performance, so it
14048 * is off by default, but can be enabled using ethtool. 14322 * is off by default, but can be enabled using ethtool.
14049 */ 14323 */
14050 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 14324 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14051 if (dev->features & NETIF_F_IP_CSUM) 14325 (dev->features & NETIF_F_IP_CSUM))
14052 dev->features |= NETIF_F_TSO; 14326 dev->features |= NETIF_F_TSO;
14053 if ((dev->features & NETIF_F_IPV6_CSUM) && 14327
14054 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) 14328 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14329 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14330 if (dev->features & NETIF_F_IPV6_CSUM)
14055 dev->features |= NETIF_F_TSO6; 14331 dev->features |= NETIF_F_TSO6;
14056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14332 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14057 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14334 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14058 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14335 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 14337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
14062 dev->features |= NETIF_F_TSO_ECN; 14338 dev->features |= NETIF_F_TSO_ECN;
14063 } 14339 }
14064 14340
14065
14066 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14341 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14067 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 14342 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14068 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 14343 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
@@ -14113,6 +14388,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14113 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 14388 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14114 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 14389 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14115 14390
14391 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14392 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14393 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14394 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14395 struct tg3_napi *tnapi = &tp->napi[i];
14396
14397 tnapi->tp = tp;
14398 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14399
14400 tnapi->int_mbox = intmbx;
14401 if (i < 4)
14402 intmbx += 0x8;
14403 else
14404 intmbx += 0x4;
14405
14406 tnapi->consmbox = rcvmbx;
14407 tnapi->prodmbox = sndmbx;
14408
14409 if (i) {
14410 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14411 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14412 } else {
14413 tnapi->coal_now = HOSTCC_MODE_NOW;
14414 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14415 }
14416
14417 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14418 break;
14419
14420 /*
14421 * If we support MSIX, we'll be using RSS. If we're using
14422 * RSS, the first vector only handles link interrupts and the
14423 * remaining vectors handle rx and tx interrupts. Reuse the
14424 * mailbox values for the next iteration. The values we setup
14425 * above are still useful for the single vectored mode.
14426 */
14427 if (!i)
14428 continue;
14429
14430 rcvmbx += 0x8;
14431
14432 if (sndmbx & 0x4)
14433 sndmbx -= 0x4;
14434 else
14435 sndmbx += 0xc;
14436 }
14437
14116 tg3_init_coal(tp); 14438 tg3_init_coal(tp);
14117 14439
14118 pci_set_drvdata(pdev, dev); 14440 pci_set_drvdata(pdev, dev);
@@ -14131,13 +14453,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14131 tg3_bus_string(tp, str), 14453 tg3_bus_string(tp, str),
14132 dev->dev_addr); 14454 dev->dev_addr);
14133 14455
14134 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) 14456 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14457 struct phy_device *phydev;
14458 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14135 printk(KERN_INFO 14459 printk(KERN_INFO
14136 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14460 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14137 tp->dev->name, 14461 tp->dev->name, phydev->drv->name,
14138 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name, 14462 dev_name(&phydev->dev));
14139 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev)); 14463 } else
14140 else
14141 printk(KERN_INFO 14464 printk(KERN_INFO
14142 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", 14465 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14143 tp->dev->name, tg3_phy_string(tp), 14466 tp->dev->name, tg3_phy_string(tp),