aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c683
1 files changed, 60 insertions, 623 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index c66dd4f9437c..cad6f65fc1e9 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k3-NAPI" 34#define DRV_VERSION "7.3.21-k5-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -131,7 +131,6 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
131static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 131static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
132static int e1000_set_mac(struct net_device *netdev, void *p); 132static int e1000_set_mac(struct net_device *netdev, void *p);
133static irqreturn_t e1000_intr(int irq, void *data); 133static irqreturn_t e1000_intr(int irq, void *data);
134static irqreturn_t e1000_intr_msi(int irq, void *data);
135static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, 134static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
136 struct e1000_tx_ring *tx_ring); 135 struct e1000_tx_ring *tx_ring);
137static int e1000_clean(struct napi_struct *napi, int budget); 136static int e1000_clean(struct napi_struct *napi, int budget);
@@ -258,25 +257,14 @@ module_exit(e1000_exit_module);
258 257
259static int e1000_request_irq(struct e1000_adapter *adapter) 258static int e1000_request_irq(struct e1000_adapter *adapter)
260{ 259{
261 struct e1000_hw *hw = &adapter->hw;
262 struct net_device *netdev = adapter->netdev; 260 struct net_device *netdev = adapter->netdev;
263 irq_handler_t handler = e1000_intr; 261 irq_handler_t handler = e1000_intr;
264 int irq_flags = IRQF_SHARED; 262 int irq_flags = IRQF_SHARED;
265 int err; 263 int err;
266 264
267 if (hw->mac_type >= e1000_82571) {
268 adapter->have_msi = !pci_enable_msi(adapter->pdev);
269 if (adapter->have_msi) {
270 handler = e1000_intr_msi;
271 irq_flags = 0;
272 }
273 }
274
275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 265 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
276 netdev); 266 netdev);
277 if (err) { 267 if (err) {
278 if (adapter->have_msi)
279 pci_disable_msi(adapter->pdev);
280 DPRINTK(PROBE, ERR, 268 DPRINTK(PROBE, ERR,
281 "Unable to allocate interrupt Error: %d\n", err); 269 "Unable to allocate interrupt Error: %d\n", err);
282 } 270 }
@@ -289,9 +277,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
289 struct net_device *netdev = adapter->netdev; 277 struct net_device *netdev = adapter->netdev;
290 278
291 free_irq(adapter->pdev->irq, netdev); 279 free_irq(adapter->pdev->irq, netdev);
292
293 if (adapter->have_msi)
294 pci_disable_msi(adapter->pdev);
295} 280}
296 281
297/** 282/**
@@ -345,76 +330,6 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
345 } 330 }
346} 331}
347 332
348/**
349 * e1000_release_hw_control - release control of the h/w to f/w
350 * @adapter: address of board private structure
351 *
352 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
353 * For ASF and Pass Through versions of f/w this means that the
354 * driver is no longer loaded. For AMT version (only with 82573) i
355 * of the f/w this means that the network i/f is closed.
356 *
357 **/
358
359static void e1000_release_hw_control(struct e1000_adapter *adapter)
360{
361 u32 ctrl_ext;
362 u32 swsm;
363 struct e1000_hw *hw = &adapter->hw;
364
365 /* Let firmware taken over control of h/w */
366 switch (hw->mac_type) {
367 case e1000_82573:
368 swsm = er32(SWSM);
369 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
370 break;
371 case e1000_82571:
372 case e1000_82572:
373 case e1000_80003es2lan:
374 case e1000_ich8lan:
375 ctrl_ext = er32(CTRL_EXT);
376 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
377 break;
378 default:
379 break;
380 }
381}
382
383/**
384 * e1000_get_hw_control - get control of the h/w from f/w
385 * @adapter: address of board private structure
386 *
387 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
388 * For ASF and Pass Through versions of f/w this means that
389 * the driver is loaded. For AMT version (only with 82573)
390 * of the f/w this means that the network i/f is open.
391 *
392 **/
393
394static void e1000_get_hw_control(struct e1000_adapter *adapter)
395{
396 u32 ctrl_ext;
397 u32 swsm;
398 struct e1000_hw *hw = &adapter->hw;
399
400 /* Let firmware know the driver has taken over */
401 switch (hw->mac_type) {
402 case e1000_82573:
403 swsm = er32(SWSM);
404 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
405 break;
406 case e1000_82571:
407 case e1000_82572:
408 case e1000_80003es2lan:
409 case e1000_ich8lan:
410 ctrl_ext = er32(CTRL_EXT);
411 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
412 break;
413 default:
414 break;
415 }
416}
417
418static void e1000_init_manageability(struct e1000_adapter *adapter) 333static void e1000_init_manageability(struct e1000_adapter *adapter)
419{ 334{
420 struct e1000_hw *hw = &adapter->hw; 335 struct e1000_hw *hw = &adapter->hw;
@@ -425,20 +340,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
425 /* disable hardware interception of ARP */ 340 /* disable hardware interception of ARP */
426 manc &= ~(E1000_MANC_ARP_EN); 341 manc &= ~(E1000_MANC_ARP_EN);
427 342
428 /* enable receiving management packets to the host */
429 /* this will probably generate destination unreachable messages
430 * from the host OS, but the packets will be handled on SMBUS */
431 if (hw->has_manc2h) {
432 u32 manc2h = er32(MANC2H);
433
434 manc |= E1000_MANC_EN_MNG2HOST;
435#define E1000_MNG2HOST_PORT_623 (1 << 5)
436#define E1000_MNG2HOST_PORT_664 (1 << 6)
437 manc2h |= E1000_MNG2HOST_PORT_623;
438 manc2h |= E1000_MNG2HOST_PORT_664;
439 ew32(MANC2H, manc2h);
440 }
441
442 ew32(MANC, manc); 343 ew32(MANC, manc);
443 } 344 }
444} 345}
@@ -453,12 +354,6 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
453 /* re-enable hardware interception of ARP */ 354 /* re-enable hardware interception of ARP */
454 manc |= E1000_MANC_ARP_EN; 355 manc |= E1000_MANC_ARP_EN;
455 356
456 if (hw->has_manc2h)
457 manc &= ~E1000_MANC_EN_MNG2HOST;
458
459 /* don't explicitly have to mess with MANC2H since
460 * MANC has an enable disable that gates MANC2H */
461
462 ew32(MANC, manc); 357 ew32(MANC, manc);
463 } 358 }
464} 359}
@@ -563,15 +458,6 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
563 if (er32(MANC) & E1000_MANC_SMBUS_EN) 458 if (er32(MANC) & E1000_MANC_SMBUS_EN)
564 goto out; 459 goto out;
565 break; 460 break;
566 case e1000_82571:
567 case e1000_82572:
568 case e1000_82573:
569 case e1000_80003es2lan:
570 case e1000_ich8lan:
571 if (e1000_check_mng_mode(hw) ||
572 e1000_check_phy_reset_block(hw))
573 goto out;
574 break;
575 default: 461 default:
576 goto out; 462 goto out;
577 } 463 }
@@ -671,16 +557,6 @@ void e1000_reset(struct e1000_adapter *adapter)
671 legacy_pba_adjust = true; 557 legacy_pba_adjust = true;
672 pba = E1000_PBA_30K; 558 pba = E1000_PBA_30K;
673 break; 559 break;
674 case e1000_82571:
675 case e1000_82572:
676 case e1000_80003es2lan:
677 pba = E1000_PBA_38K;
678 break;
679 case e1000_82573:
680 pba = E1000_PBA_20K;
681 break;
682 case e1000_ich8lan:
683 pba = E1000_PBA_8K;
684 case e1000_undefined: 560 case e1000_undefined:
685 case e1000_num_macs: 561 case e1000_num_macs:
686 break; 562 break;
@@ -744,16 +620,8 @@ void e1000_reset(struct e1000_adapter *adapter)
744 620
745 /* if short on rx space, rx wins and must trump tx 621 /* if short on rx space, rx wins and must trump tx
746 * adjustment or use Early Receive if available */ 622 * adjustment or use Early Receive if available */
747 if (pba < min_rx_space) { 623 if (pba < min_rx_space)
748 switch (hw->mac_type) { 624 pba = min_rx_space;
749 case e1000_82573:
750 /* ERT enabled in e1000_configure_rx */
751 break;
752 default:
753 pba = min_rx_space;
754 break;
755 }
756 }
757 } 625 }
758 } 626 }
759 627
@@ -789,7 +657,6 @@ void e1000_reset(struct e1000_adapter *adapter)
789 657
790 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 658 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
791 if (hw->mac_type >= e1000_82544 && 659 if (hw->mac_type >= e1000_82544 &&
792 hw->mac_type <= e1000_82547_rev_2 &&
793 hw->autoneg == 1 && 660 hw->autoneg == 1 &&
794 hw->autoneg_advertised == ADVERTISE_1000_FULL) { 661 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
795 u32 ctrl = er32(CTRL); 662 u32 ctrl = er32(CTRL);
@@ -806,20 +673,6 @@ void e1000_reset(struct e1000_adapter *adapter)
806 e1000_reset_adaptive(hw); 673 e1000_reset_adaptive(hw);
807 e1000_phy_get_info(hw, &adapter->phy_info); 674 e1000_phy_get_info(hw, &adapter->phy_info);
808 675
809 if (!adapter->smart_power_down &&
810 (hw->mac_type == e1000_82571 ||
811 hw->mac_type == e1000_82572)) {
812 u16 phy_data = 0;
813 /* speed up time to link by disabling smart power down, ignore
814 * the return value of this function because there is nothing
815 * different we would do if it failed */
816 e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
817 &phy_data);
818 phy_data &= ~IGP02E1000_PM_SPD;
819 e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
820 phy_data);
821 }
822
823 e1000_release_manageability(adapter); 676 e1000_release_manageability(adapter);
824} 677}
825 678
@@ -1046,17 +899,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1046 goto err_sw_init; 899 goto err_sw_init;
1047 900
1048 err = -EIO; 901 err = -EIO;
1049 /* Flash BAR mapping must happen after e1000_sw_init
1050 * because it depends on mac_type */
1051 if ((hw->mac_type == e1000_ich8lan) &&
1052 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1053 hw->flash_address = pci_ioremap_bar(pdev, 1);
1054 if (!hw->flash_address)
1055 goto err_flashmap;
1056 }
1057
1058 if (e1000_check_phy_reset_block(hw))
1059 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
1060 902
1061 if (hw->mac_type >= e1000_82543) { 903 if (hw->mac_type >= e1000_82543) {
1062 netdev->features = NETIF_F_SG | 904 netdev->features = NETIF_F_SG |
@@ -1064,21 +906,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1064 NETIF_F_HW_VLAN_TX | 906 NETIF_F_HW_VLAN_TX |
1065 NETIF_F_HW_VLAN_RX | 907 NETIF_F_HW_VLAN_RX |
1066 NETIF_F_HW_VLAN_FILTER; 908 NETIF_F_HW_VLAN_FILTER;
1067 if (hw->mac_type == e1000_ich8lan)
1068 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
1069 } 909 }
1070 910
1071 if ((hw->mac_type >= e1000_82544) && 911 if ((hw->mac_type >= e1000_82544) &&
1072 (hw->mac_type != e1000_82547)) 912 (hw->mac_type != e1000_82547))
1073 netdev->features |= NETIF_F_TSO; 913 netdev->features |= NETIF_F_TSO;
1074 914
1075 if (hw->mac_type > e1000_82547_rev_2)
1076 netdev->features |= NETIF_F_TSO6;
1077 if (pci_using_dac) 915 if (pci_using_dac)
1078 netdev->features |= NETIF_F_HIGHDMA; 916 netdev->features |= NETIF_F_HIGHDMA;
1079 917
1080 netdev->vlan_features |= NETIF_F_TSO; 918 netdev->vlan_features |= NETIF_F_TSO;
1081 netdev->vlan_features |= NETIF_F_TSO6;
1082 netdev->vlan_features |= NETIF_F_HW_CSUM; 919 netdev->vlan_features |= NETIF_F_HW_CSUM;
1083 netdev->vlan_features |= NETIF_F_SG; 920 netdev->vlan_features |= NETIF_F_SG;
1084 921
@@ -1153,15 +990,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); 990 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154 eeprom_apme_mask = E1000_EEPROM_82544_APM; 991 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155 break; 992 break;
1156 case e1000_ich8lan:
1157 e1000_read_eeprom(hw,
1158 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
1159 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
1160 break;
1161 case e1000_82546: 993 case e1000_82546:
1162 case e1000_82546_rev_3: 994 case e1000_82546_rev_3:
1163 case e1000_82571:
1164 case e1000_80003es2lan:
1165 if (er32(STATUS) & E1000_STATUS_FUNC_1){ 995 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1166 e1000_read_eeprom(hw, 996 e1000_read_eeprom(hw,
1167 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 997 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
@@ -1185,17 +1015,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1185 break; 1015 break;
1186 case E1000_DEV_ID_82546EB_FIBER: 1016 case E1000_DEV_ID_82546EB_FIBER:
1187 case E1000_DEV_ID_82546GB_FIBER: 1017 case E1000_DEV_ID_82546GB_FIBER:
1188 case E1000_DEV_ID_82571EB_FIBER:
1189 /* Wake events only supported on port A for dual fiber 1018 /* Wake events only supported on port A for dual fiber
1190 * regardless of eeprom setting */ 1019 * regardless of eeprom setting */
1191 if (er32(STATUS) & E1000_STATUS_FUNC_1) 1020 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1192 adapter->eeprom_wol = 0; 1021 adapter->eeprom_wol = 0;
1193 break; 1022 break;
1194 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1023 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1195 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1196 case E1000_DEV_ID_82571EB_QUAD_FIBER:
1197 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1198 case E1000_DEV_ID_82571PT_QUAD_COPPER:
1199 /* if quad port adapter, disable WoL on all but port A */ 1024 /* if quad port adapter, disable WoL on all but port A */
1200 if (global_quad_port_a != 0) 1025 if (global_quad_port_a != 0)
1201 adapter->eeprom_wol = 0; 1026 adapter->eeprom_wol = 0;
@@ -1213,39 +1038,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1213 1038
1214 /* print bus type/speed/width info */ 1039 /* print bus type/speed/width info */
1215 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1040 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1216 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : 1041 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1217 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), 1042 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1218 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1219 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1220 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1043 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1221 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : 1044 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1222 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1045 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1223 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : 1046 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
1224 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1225 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1226 "32-bit"));
1227 1047
1228 printk("%pM\n", netdev->dev_addr); 1048 printk("%pM\n", netdev->dev_addr);
1229 1049
1230 if (hw->bus_type == e1000_bus_type_pci_express) {
1231 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1232 "longer be supported by this driver in the future.\n",
1233 pdev->vendor, pdev->device);
1234 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1235 "driver instead.\n");
1236 }
1237
1238 /* reset the hardware with the new settings */ 1050 /* reset the hardware with the new settings */
1239 e1000_reset(adapter); 1051 e1000_reset(adapter);
1240 1052
1241 /* If the controller is 82573 and f/w is AMT, do not set
1242 * DRV_LOAD until the interface is up. For all other cases,
1243 * let the f/w know that the h/w is now under the control
1244 * of the driver. */
1245 if (hw->mac_type != e1000_82573 ||
1246 !e1000_check_mng_mode(hw))
1247 e1000_get_hw_control(adapter);
1248
1249 strcpy(netdev->name, "eth%d"); 1053 strcpy(netdev->name, "eth%d");
1250 err = register_netdev(netdev); 1054 err = register_netdev(netdev);
1251 if (err) 1055 if (err)
@@ -1260,14 +1064,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1260 return 0; 1064 return 0;
1261 1065
1262err_register: 1066err_register:
1263 e1000_release_hw_control(adapter);
1264err_eeprom: 1067err_eeprom:
1265 if (!e1000_check_phy_reset_block(hw)) 1068 e1000_phy_hw_reset(hw);
1266 e1000_phy_hw_reset(hw);
1267 1069
1268 if (hw->flash_address) 1070 if (hw->flash_address)
1269 iounmap(hw->flash_address); 1071 iounmap(hw->flash_address);
1270err_flashmap:
1271 kfree(adapter->tx_ring); 1072 kfree(adapter->tx_ring);
1272 kfree(adapter->rx_ring); 1073 kfree(adapter->rx_ring);
1273err_sw_init: 1074err_sw_init:
@@ -1302,14 +1103,9 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1302 1103
1303 e1000_release_manageability(adapter); 1104 e1000_release_manageability(adapter);
1304 1105
1305 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1306 * would have already happened in close and is redundant. */
1307 e1000_release_hw_control(adapter);
1308
1309 unregister_netdev(netdev); 1106 unregister_netdev(netdev);
1310 1107
1311 if (!e1000_check_phy_reset_block(hw)) 1108 e1000_phy_hw_reset(hw);
1312 e1000_phy_hw_reset(hw);
1313 1109
1314 kfree(adapter->tx_ring); 1110 kfree(adapter->tx_ring);
1315 kfree(adapter->rx_ring); 1111 kfree(adapter->rx_ring);
@@ -1472,12 +1268,6 @@ static int e1000_open(struct net_device *netdev)
1472 e1000_update_mng_vlan(adapter); 1268 e1000_update_mng_vlan(adapter);
1473 } 1269 }
1474 1270
1475 /* If AMT is enabled, let the firmware know that the network
1476 * interface is now open */
1477 if (hw->mac_type == e1000_82573 &&
1478 e1000_check_mng_mode(hw))
1479 e1000_get_hw_control(adapter);
1480
1481 /* before we allocate an interrupt, we must be ready to handle it. 1271 /* before we allocate an interrupt, we must be ready to handle it.
1482 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1272 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1483 * as soon as we call pci_request_irq, so we have to setup our 1273 * as soon as we call pci_request_irq, so we have to setup our
@@ -1503,7 +1293,6 @@ static int e1000_open(struct net_device *netdev)
1503 return E1000_SUCCESS; 1293 return E1000_SUCCESS;
1504 1294
1505err_req_irq: 1295err_req_irq:
1506 e1000_release_hw_control(adapter);
1507 e1000_power_down_phy(adapter); 1296 e1000_power_down_phy(adapter);
1508 e1000_free_all_rx_resources(adapter); 1297 e1000_free_all_rx_resources(adapter);
1509err_setup_rx: 1298err_setup_rx:
@@ -1548,12 +1337,6 @@ static int e1000_close(struct net_device *netdev)
1548 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 1337 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1549 } 1338 }
1550 1339
1551 /* If AMT is enabled, let the firmware know that the network
1552 * interface is now closed */
1553 if (hw->mac_type == e1000_82573 &&
1554 e1000_check_mng_mode(hw))
1555 e1000_release_hw_control(adapter);
1556
1557 return 0; 1340 return 0;
1558} 1341}
1559 1342
@@ -1692,7 +1475,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1692{ 1475{
1693 u64 tdba; 1476 u64 tdba;
1694 struct e1000_hw *hw = &adapter->hw; 1477 struct e1000_hw *hw = &adapter->hw;
1695 u32 tdlen, tctl, tipg, tarc; 1478 u32 tdlen, tctl, tipg;
1696 u32 ipgr1, ipgr2; 1479 u32 ipgr1, ipgr2;
1697 1480
1698 /* Setup the HW Tx Head and Tail descriptor pointers */ 1481 /* Setup the HW Tx Head and Tail descriptor pointers */
@@ -1714,8 +1497,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1714 } 1497 }
1715 1498
1716 /* Set the default values for the Tx Inter Packet Gap timer */ 1499 /* Set the default values for the Tx Inter Packet Gap timer */
1717 if (hw->mac_type <= e1000_82547_rev_2 && 1500 if ((hw->media_type == e1000_media_type_fiber ||
1718 (hw->media_type == e1000_media_type_fiber ||
1719 hw->media_type == e1000_media_type_internal_serdes)) 1501 hw->media_type == e1000_media_type_internal_serdes))
1720 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1502 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1721 else 1503 else
@@ -1728,10 +1510,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1728 ipgr1 = DEFAULT_82542_TIPG_IPGR1; 1510 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1729 ipgr2 = DEFAULT_82542_TIPG_IPGR2; 1511 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1730 break; 1512 break;
1731 case e1000_80003es2lan:
1732 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1733 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1734 break;
1735 default: 1513 default:
1736 ipgr1 = DEFAULT_82543_TIPG_IPGR1; 1514 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1737 ipgr2 = DEFAULT_82543_TIPG_IPGR2; 1515 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
@@ -1754,21 +1532,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1754 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 1532 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1755 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1533 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1756 1534
1757 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1758 tarc = er32(TARC0);
1759 /* set the speed mode bit, we'll clear it if we're not at
1760 * gigabit link later */
1761 tarc |= (1 << 21);
1762 ew32(TARC0, tarc);
1763 } else if (hw->mac_type == e1000_80003es2lan) {
1764 tarc = er32(TARC0);
1765 tarc |= 1;
1766 ew32(TARC0, tarc);
1767 tarc = er32(TARC1);
1768 tarc |= 1;
1769 ew32(TARC1, tarc);
1770 }
1771
1772 e1000_config_collision_dist(hw); 1535 e1000_config_collision_dist(hw);
1773 1536
1774 /* Setup Transmit Descriptor Settings for eop descriptor */ 1537 /* Setup Transmit Descriptor Settings for eop descriptor */
@@ -1804,7 +1567,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1804static int e1000_setup_rx_resources(struct e1000_adapter *adapter, 1567static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1805 struct e1000_rx_ring *rxdr) 1568 struct e1000_rx_ring *rxdr)
1806{ 1569{
1807 struct e1000_hw *hw = &adapter->hw;
1808 struct pci_dev *pdev = adapter->pdev; 1570 struct pci_dev *pdev = adapter->pdev;
1809 int size, desc_len; 1571 int size, desc_len;
1810 1572
@@ -1817,10 +1579,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1817 } 1579 }
1818 memset(rxdr->buffer_info, 0, size); 1580 memset(rxdr->buffer_info, 0, size);
1819 1581
1820 if (hw->mac_type <= e1000_82547_rev_2) 1582 desc_len = sizeof(struct e1000_rx_desc);
1821 desc_len = sizeof(struct e1000_rx_desc);
1822 else
1823 desc_len = sizeof(union e1000_rx_desc_packet_split);
1824 1583
1825 /* Round up to nearest 4K */ 1584 /* Round up to nearest 4K */
1826 1585
@@ -1977,7 +1736,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1977{ 1736{
1978 u64 rdba; 1737 u64 rdba;
1979 struct e1000_hw *hw = &adapter->hw; 1738 struct e1000_hw *hw = &adapter->hw;
1980 u32 rdlen, rctl, rxcsum, ctrl_ext; 1739 u32 rdlen, rctl, rxcsum;
1981 1740
1982 if (adapter->netdev->mtu > ETH_DATA_LEN) { 1741 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1983 rdlen = adapter->rx_ring[0].count * 1742 rdlen = adapter->rx_ring[0].count *
@@ -2004,17 +1763,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2004 ew32(ITR, 1000000000 / (adapter->itr * 256)); 1763 ew32(ITR, 1000000000 / (adapter->itr * 256));
2005 } 1764 }
2006 1765
2007 if (hw->mac_type >= e1000_82571) {
2008 ctrl_ext = er32(CTRL_EXT);
2009 /* Reset delay timers after every interrupt */
2010 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2011 /* Auto-Mask interrupts upon ICR access */
2012 ctrl_ext |= E1000_CTRL_EXT_IAME;
2013 ew32(IAM, 0xffffffff);
2014 ew32(CTRL_EXT, ctrl_ext);
2015 E1000_WRITE_FLUSH();
2016 }
2017
2018 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1766 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2019 * the Base and Length of the Rx Descriptor Ring */ 1767 * the Base and Length of the Rx Descriptor Ring */
2020 switch (adapter->num_rx_queues) { 1768 switch (adapter->num_rx_queues) {
@@ -2329,22 +2077,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2329 2077
2330 e1000_rar_set(hw, hw->mac_addr, 0); 2078 e1000_rar_set(hw, hw->mac_addr, 0);
2331 2079
2332 /* With 82571 controllers, LAA may be overwritten (with the default)
2333 * due to controller reset from the other port. */
2334 if (hw->mac_type == e1000_82571) {
2335 /* activate the work around */
2336 hw->laa_is_present = 1;
2337
2338 /* Hold a copy of the LAA in RAR[14] This is done so that
2339 * between the time RAR[0] gets clobbered and the time it
2340 * gets fixed (in e1000_watchdog), the actual LAA is in one
2341 * of the RARs and no incoming packets directed to this port
2342 * are dropped. Eventaully the LAA will be in RAR[0] and
2343 * RAR[14] */
2344 e1000_rar_set(hw, hw->mac_addr,
2345 E1000_RAR_ENTRIES - 1);
2346 }
2347
2348 if (hw->mac_type == e1000_82542_rev2_0) 2080 if (hw->mac_type == e1000_82542_rev2_0)
2349 e1000_leave_82542_rst(adapter); 2081 e1000_leave_82542_rst(adapter);
2350 2082
@@ -2371,9 +2103,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2371 u32 rctl; 2103 u32 rctl;
2372 u32 hash_value; 2104 u32 hash_value;
2373 int i, rar_entries = E1000_RAR_ENTRIES; 2105 int i, rar_entries = E1000_RAR_ENTRIES;
2374 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? 2106 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2375 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2376 E1000_NUM_MTA_REGISTERS;
2377 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2107 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2378 2108
2379 if (!mcarray) { 2109 if (!mcarray) {
@@ -2381,13 +2111,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2381 return; 2111 return;
2382 } 2112 }
2383 2113
2384 if (hw->mac_type == e1000_ich8lan)
2385 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2386
2387 /* reserve RAR[14] for LAA over-write work-around */
2388 if (hw->mac_type == e1000_82571)
2389 rar_entries--;
2390
2391 /* Check for Promiscuous and All Multicast modes */ 2114 /* Check for Promiscuous and All Multicast modes */
2392 2115
2393 rctl = er32(RCTL); 2116 rctl = er32(RCTL);
@@ -2396,15 +2119,13 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2396 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2119 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2397 rctl &= ~E1000_RCTL_VFE; 2120 rctl &= ~E1000_RCTL_VFE;
2398 } else { 2121 } else {
2399 if (netdev->flags & IFF_ALLMULTI) { 2122 if (netdev->flags & IFF_ALLMULTI)
2400 rctl |= E1000_RCTL_MPE; 2123 rctl |= E1000_RCTL_MPE;
2401 } else { 2124 else
2402 rctl &= ~E1000_RCTL_MPE; 2125 rctl &= ~E1000_RCTL_MPE;
2403 } 2126 /* Enable VLAN filter if there is a VLAN */
2404 if (adapter->hw.mac_type != e1000_ich8lan) 2127 if (adapter->vlgrp)
2405 /* Enable VLAN filter if there is a VLAN */ 2128 rctl |= E1000_RCTL_VFE;
2406 if (adapter->vlgrp)
2407 rctl |= E1000_RCTL_VFE;
2408 } 2129 }
2409 2130
2410 if (netdev->uc.count > rar_entries - 1) { 2131 if (netdev->uc.count > rar_entries - 1) {
@@ -2427,7 +2148,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2427 * 2148 *
2428 * RAR 0 is used for the station MAC adddress 2149 * RAR 0 is used for the station MAC adddress
2429 * if there are not 14 addresses, go ahead and clear the filters 2150 * if there are not 14 addresses, go ahead and clear the filters
2430 * -- with 82571 controllers only 0-13 entries are filled here
2431 */ 2151 */
2432 i = 1; 2152 i = 1;
2433 if (use_uc) 2153 if (use_uc)
@@ -2538,22 +2258,8 @@ static void e1000_watchdog(unsigned long data)
2538 struct net_device *netdev = adapter->netdev; 2258 struct net_device *netdev = adapter->netdev;
2539 struct e1000_tx_ring *txdr = adapter->tx_ring; 2259 struct e1000_tx_ring *txdr = adapter->tx_ring;
2540 u32 link, tctl; 2260 u32 link, tctl;
2541 s32 ret_val;
2542
2543 ret_val = e1000_check_for_link(hw);
2544 if ((ret_val == E1000_ERR_PHY) &&
2545 (hw->phy_type == e1000_phy_igp_3) &&
2546 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2547 /* See e1000_kumeran_lock_loss_workaround() */
2548 DPRINTK(LINK, INFO,
2549 "Gigabit has been disabled, downgrading speed\n");
2550 }
2551 2261
2552 if (hw->mac_type == e1000_82573) { 2262 e1000_check_for_link(hw);
2553 e1000_enable_tx_pkt_filtering(hw);
2554 if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id)
2555 e1000_update_mng_vlan(adapter);
2556 }
2557 2263
2558 if ((hw->media_type == e1000_media_type_internal_serdes) && 2264 if ((hw->media_type == e1000_media_type_internal_serdes) &&
2559 !(er32(TXCW) & E1000_TXCW_ANE)) 2265 !(er32(TXCW) & E1000_TXCW_ANE))
@@ -2598,52 +2304,15 @@ static void e1000_watchdog(unsigned long data)
2598 break; 2304 break;
2599 } 2305 }
2600 2306
2601 if ((hw->mac_type == e1000_82571 || 2307 /* enable transmits in the hardware */
2602 hw->mac_type == e1000_82572) &&
2603 !txb2b) {
2604 u32 tarc0;
2605 tarc0 = er32(TARC0);
2606 tarc0 &= ~(1 << 21);
2607 ew32(TARC0, tarc0);
2608 }
2609
2610 /* disable TSO for pcie and 10/100 speeds, to avoid
2611 * some hardware issues */
2612 if (!adapter->tso_force &&
2613 hw->bus_type == e1000_bus_type_pci_express){
2614 switch (adapter->link_speed) {
2615 case SPEED_10:
2616 case SPEED_100:
2617 DPRINTK(PROBE,INFO,
2618 "10/100 speed: disabling TSO\n");
2619 netdev->features &= ~NETIF_F_TSO;
2620 netdev->features &= ~NETIF_F_TSO6;
2621 break;
2622 case SPEED_1000:
2623 netdev->features |= NETIF_F_TSO;
2624 netdev->features |= NETIF_F_TSO6;
2625 break;
2626 default:
2627 /* oops */
2628 break;
2629 }
2630 }
2631
2632 /* enable transmits in the hardware, need to do this
2633 * after setting TARC0 */
2634 tctl = er32(TCTL); 2308 tctl = er32(TCTL);
2635 tctl |= E1000_TCTL_EN; 2309 tctl |= E1000_TCTL_EN;
2636 ew32(TCTL, tctl); 2310 ew32(TCTL, tctl);
2637 2311
2638 netif_carrier_on(netdev); 2312 netif_carrier_on(netdev);
2639 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2313 mod_timer(&adapter->phy_info_timer,
2314 round_jiffies(jiffies + 2 * HZ));
2640 adapter->smartspeed = 0; 2315 adapter->smartspeed = 0;
2641 } else {
2642 /* make sure the receive unit is started */
2643 if (hw->rx_needs_kicking) {
2644 u32 rctl = er32(RCTL);
2645 ew32(RCTL, rctl | E1000_RCTL_EN);
2646 }
2647 } 2316 }
2648 } else { 2317 } else {
2649 if (netif_carrier_ok(netdev)) { 2318 if (netif_carrier_ok(netdev)) {
@@ -2652,16 +2321,8 @@ static void e1000_watchdog(unsigned long data)
2652 printk(KERN_INFO "e1000: %s NIC Link is Down\n", 2321 printk(KERN_INFO "e1000: %s NIC Link is Down\n",
2653 netdev->name); 2322 netdev->name);
2654 netif_carrier_off(netdev); 2323 netif_carrier_off(netdev);
2655 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2324 mod_timer(&adapter->phy_info_timer,
2656 2325 round_jiffies(jiffies + 2 * HZ));
2657 /* 80003ES2LAN workaround--
2658 * For packet buffer work-around on link down event;
2659 * disable receives in the ISR and
2660 * reset device here in the watchdog
2661 */
2662 if (hw->mac_type == e1000_80003es2lan)
2663 /* reset device */
2664 schedule_work(&adapter->reset_task);
2665 } 2326 }
2666 2327
2667 e1000_smartspeed(adapter); 2328 e1000_smartspeed(adapter);
@@ -2700,11 +2361,6 @@ static void e1000_watchdog(unsigned long data)
2700 /* Force detection of hung controller every watchdog period */ 2361 /* Force detection of hung controller every watchdog period */
2701 adapter->detect_tx_hung = true; 2362 adapter->detect_tx_hung = true;
2702 2363
2703 /* With 82571 controllers, LAA may be overwritten due to controller
2704 * reset from the other port. Set the appropriate LAA in RAR[0] */
2705 if (hw->mac_type == e1000_82571 && hw->laa_is_present)
2706 e1000_rar_set(hw, hw->mac_addr, 0);
2707
2708 /* Reset the timer */ 2364 /* Reset the timer */
2709 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); 2365 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
2710} 2366}
@@ -3186,41 +2842,6 @@ no_fifo_stall_required:
3186 return 0; 2842 return 0;
3187} 2843}
3188 2844
3189#define MINIMUM_DHCP_PACKET_SIZE 282
3190static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
3191 struct sk_buff *skb)
3192{
3193 struct e1000_hw *hw = &adapter->hw;
3194 u16 length, offset;
3195 if (vlan_tx_tag_present(skb)) {
3196 if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) &&
3197 ( hw->mng_cookie.status &
3198 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3199 return 0;
3200 }
3201 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3202 struct ethhdr *eth = (struct ethhdr *)skb->data;
3203 if ((htons(ETH_P_IP) == eth->h_proto)) {
3204 const struct iphdr *ip =
3205 (struct iphdr *)((u8 *)skb->data+14);
3206 if (IPPROTO_UDP == ip->protocol) {
3207 struct udphdr *udp =
3208 (struct udphdr *)((u8 *)ip +
3209 (ip->ihl << 2));
3210 if (ntohs(udp->dest) == 67) {
3211 offset = (u8 *)udp + 8 - skb->data;
3212 length = skb->len - offset;
3213
3214 return e1000_mng_write_dhcp_info(hw,
3215 (u8 *)udp + 8,
3216 length);
3217 }
3218 }
3219 }
3220 }
3221 return 0;
3222}
3223
3224static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) 2845static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3225{ 2846{
3226 struct e1000_adapter *adapter = netdev_priv(netdev); 2847 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3279,11 +2900,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3279 return NETDEV_TX_OK; 2900 return NETDEV_TX_OK;
3280 } 2901 }
3281 2902
3282 /* 82571 and newer doesn't need the workaround that limited descriptor
3283 * length to 4kB */
3284 if (hw->mac_type >= e1000_82571)
3285 max_per_txd = 8192;
3286
3287 mss = skb_shinfo(skb)->gso_size; 2903 mss = skb_shinfo(skb)->gso_size;
3288 /* The controller does a simple calculation to 2904 /* The controller does a simple calculation to
3289 * make sure there is enough room in the FIFO before 2905 * make sure there is enough room in the FIFO before
@@ -3296,9 +2912,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3296 max_per_txd = min(mss << 2, max_per_txd); 2912 max_per_txd = min(mss << 2, max_per_txd);
3297 max_txd_pwr = fls(max_per_txd) - 1; 2913 max_txd_pwr = fls(max_per_txd) - 1;
3298 2914
3299 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3300 * points to just header, pull a few bytes of payload from
3301 * frags into skb->data */
3302 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2915 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3303 if (skb->data_len && hdr_len == len) { 2916 if (skb->data_len && hdr_len == len) {
3304 switch (hw->mac_type) { 2917 switch (hw->mac_type) {
@@ -3313,10 +2926,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3313 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 2926 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3314 break; 2927 break;
3315 /* fall through */ 2928 /* fall through */
3316 case e1000_82571:
3317 case e1000_82572:
3318 case e1000_82573:
3319 case e1000_ich8lan:
3320 pull_size = min((unsigned int)4, skb->data_len); 2929 pull_size = min((unsigned int)4, skb->data_len);
3321 if (!__pskb_pull_tail(skb, pull_size)) { 2930 if (!__pskb_pull_tail(skb, pull_size)) {
3322 DPRINTK(DRV, ERR, 2931 DPRINTK(DRV, ERR,
@@ -3361,11 +2970,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3361 if (adapter->pcix_82544) 2970 if (adapter->pcix_82544)
3362 count += nr_frags; 2971 count += nr_frags;
3363 2972
3364
3365 if (hw->tx_pkt_filtering &&
3366 (hw->mac_type == e1000_82573))
3367 e1000_transfer_dhcp_info(adapter, skb);
3368
3369 /* need: count + 2 desc gap to keep tail from touching 2973 /* need: count + 2 desc gap to keep tail from touching
3370 * head, otherwise try next time */ 2974 * head, otherwise try next time */
3371 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) 2975 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
@@ -3398,9 +3002,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3398 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3002 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3399 tx_flags |= E1000_TX_FLAGS_CSUM; 3003 tx_flags |= E1000_TX_FLAGS_CSUM;
3400 3004
3401 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3402 * 82571 hardware supports TSO capabilities for IPv6 as well...
3403 * no longer assume, we must. */
3404 if (likely(skb->protocol == htons(ETH_P_IP))) 3005 if (likely(skb->protocol == htons(ETH_P_IP)))
3405 tx_flags |= E1000_TX_FLAGS_IPV4; 3006 tx_flags |= E1000_TX_FLAGS_IPV4;
3406 3007
@@ -3472,7 +3073,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3472 struct e1000_adapter *adapter = netdev_priv(netdev); 3073 struct e1000_adapter *adapter = netdev_priv(netdev);
3473 struct e1000_hw *hw = &adapter->hw; 3074 struct e1000_hw *hw = &adapter->hw;
3474 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3075 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3475 u16 eeprom_data = 0;
3476 3076
3477 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3077 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3478 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3078 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3483,39 +3083,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3483 /* Adapter-specific max frame size limits. */ 3083 /* Adapter-specific max frame size limits. */
3484 switch (hw->mac_type) { 3084 switch (hw->mac_type) {
3485 case e1000_undefined ... e1000_82542_rev2_1: 3085 case e1000_undefined ... e1000_82542_rev2_1:
3486 case e1000_ich8lan:
3487 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3086 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3488 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3087 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3489 return -EINVAL; 3088 return -EINVAL;
3490 } 3089 }
3491 break; 3090 break;
3492 case e1000_82573:
3493 /* Jumbo Frames not supported if:
3494 * - this is not an 82573L device
3495 * - ASPM is enabled in any way (0x1A bits 3:2) */
3496 e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1,
3497 &eeprom_data);
3498 if ((hw->device_id != E1000_DEV_ID_82573L) ||
3499 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3500 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3501 DPRINTK(PROBE, ERR,
3502 "Jumbo Frames not supported.\n");
3503 return -EINVAL;
3504 }
3505 break;
3506 }
3507 /* ERT will be enabled later to enable wire speed receives */
3508
3509 /* fall through to get support */
3510 case e1000_82571:
3511 case e1000_82572:
3512 case e1000_80003es2lan:
3513#define MAX_STD_JUMBO_FRAME_SIZE 9234
3514 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3515 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3516 return -EINVAL;
3517 }
3518 break;
3519 default: 3091 default:
3520 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ 3092 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3521 break; 3093 break;
@@ -3596,14 +3168,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3596 adapter->stats.mprc += er32(MPRC); 3168 adapter->stats.mprc += er32(MPRC);
3597 adapter->stats.roc += er32(ROC); 3169 adapter->stats.roc += er32(ROC);
3598 3170
3599 if (hw->mac_type != e1000_ich8lan) { 3171 adapter->stats.prc64 += er32(PRC64);
3600 adapter->stats.prc64 += er32(PRC64); 3172 adapter->stats.prc127 += er32(PRC127);
3601 adapter->stats.prc127 += er32(PRC127); 3173 adapter->stats.prc255 += er32(PRC255);
3602 adapter->stats.prc255 += er32(PRC255); 3174 adapter->stats.prc511 += er32(PRC511);
3603 adapter->stats.prc511 += er32(PRC511); 3175 adapter->stats.prc1023 += er32(PRC1023);
3604 adapter->stats.prc1023 += er32(PRC1023); 3176 adapter->stats.prc1522 += er32(PRC1522);
3605 adapter->stats.prc1522 += er32(PRC1522);
3606 }
3607 3177
3608 adapter->stats.symerrs += er32(SYMERRS); 3178 adapter->stats.symerrs += er32(SYMERRS);
3609 adapter->stats.mpc += er32(MPC); 3179 adapter->stats.mpc += er32(MPC);
@@ -3632,14 +3202,12 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3632 adapter->stats.toth += er32(TOTH); 3202 adapter->stats.toth += er32(TOTH);
3633 adapter->stats.tpr += er32(TPR); 3203 adapter->stats.tpr += er32(TPR);
3634 3204
3635 if (hw->mac_type != e1000_ich8lan) { 3205 adapter->stats.ptc64 += er32(PTC64);
3636 adapter->stats.ptc64 += er32(PTC64); 3206 adapter->stats.ptc127 += er32(PTC127);
3637 adapter->stats.ptc127 += er32(PTC127); 3207 adapter->stats.ptc255 += er32(PTC255);
3638 adapter->stats.ptc255 += er32(PTC255); 3208 adapter->stats.ptc511 += er32(PTC511);
3639 adapter->stats.ptc511 += er32(PTC511); 3209 adapter->stats.ptc1023 += er32(PTC1023);
3640 adapter->stats.ptc1023 += er32(PTC1023); 3210 adapter->stats.ptc1522 += er32(PTC1522);
3641 adapter->stats.ptc1522 += er32(PTC1522);
3642 }
3643 3211
3644 adapter->stats.mptc += er32(MPTC); 3212 adapter->stats.mptc += er32(MPTC);
3645 adapter->stats.bptc += er32(BPTC); 3213 adapter->stats.bptc += er32(BPTC);
@@ -3659,20 +3227,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3659 adapter->stats.tsctc += er32(TSCTC); 3227 adapter->stats.tsctc += er32(TSCTC);
3660 adapter->stats.tsctfc += er32(TSCTFC); 3228 adapter->stats.tsctfc += er32(TSCTFC);
3661 } 3229 }
3662 if (hw->mac_type > e1000_82547_rev_2) {
3663 adapter->stats.iac += er32(IAC);
3664 adapter->stats.icrxoc += er32(ICRXOC);
3665
3666 if (hw->mac_type != e1000_ich8lan) {
3667 adapter->stats.icrxptc += er32(ICRXPTC);
3668 adapter->stats.icrxatc += er32(ICRXATC);
3669 adapter->stats.ictxptc += er32(ICTXPTC);
3670 adapter->stats.ictxatc += er32(ICTXATC);
3671 adapter->stats.ictxqec += er32(ICTXQEC);
3672 adapter->stats.ictxqmtc += er32(ICTXQMTC);
3673 adapter->stats.icrxdmtc += er32(ICRXDMTC);
3674 }
3675 }
3676 3230
3677 /* Fill out the OS statistics structure */ 3231 /* Fill out the OS statistics structure */
3678 adapter->net_stats.multicast = adapter->stats.mprc; 3232 adapter->net_stats.multicast = adapter->stats.mprc;
@@ -3731,49 +3285,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3731} 3285}
3732 3286
3733/** 3287/**
3734 * e1000_intr_msi - Interrupt Handler
3735 * @irq: interrupt number
3736 * @data: pointer to a network interface device structure
3737 **/
3738
3739static irqreturn_t e1000_intr_msi(int irq, void *data)
3740{
3741 struct net_device *netdev = data;
3742 struct e1000_adapter *adapter = netdev_priv(netdev);
3743 struct e1000_hw *hw = &adapter->hw;
3744 u32 icr = er32(ICR);
3745
3746 /* in NAPI mode read ICR disables interrupts using IAM */
3747
3748 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3749 hw->get_link_status = 1;
3750 /* 80003ES2LAN workaround-- For packet buffer work-around on
3751 * link down event; disable receives here in the ISR and reset
3752 * adapter in watchdog */
3753 if (netif_carrier_ok(netdev) &&
3754 (hw->mac_type == e1000_80003es2lan)) {
3755 /* disable receives */
3756 u32 rctl = er32(RCTL);
3757 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3758 }
3759 /* guard against interrupt when we're going down */
3760 if (!test_bit(__E1000_DOWN, &adapter->flags))
3761 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3762 }
3763
3764 if (likely(napi_schedule_prep(&adapter->napi))) {
3765 adapter->total_tx_bytes = 0;
3766 adapter->total_tx_packets = 0;
3767 adapter->total_rx_bytes = 0;
3768 adapter->total_rx_packets = 0;
3769 __napi_schedule(&adapter->napi);
3770 } else
3771 e1000_irq_enable(adapter);
3772
3773 return IRQ_HANDLED;
3774}
3775
3776/**
3777 * e1000_intr - Interrupt Handler 3288 * e1000_intr - Interrupt Handler
3778 * @irq: interrupt number 3289 * @irq: interrupt number
3779 * @data: pointer to a network interface device structure 3290 * @data: pointer to a network interface device structure
@@ -3784,43 +3295,22 @@ static irqreturn_t e1000_intr(int irq, void *data)
3784 struct net_device *netdev = data; 3295 struct net_device *netdev = data;
3785 struct e1000_adapter *adapter = netdev_priv(netdev); 3296 struct e1000_adapter *adapter = netdev_priv(netdev);
3786 struct e1000_hw *hw = &adapter->hw; 3297 struct e1000_hw *hw = &adapter->hw;
3787 u32 rctl, icr = er32(ICR); 3298 u32 icr = er32(ICR);
3788 3299
3789 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) 3300 if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags)))
3790 return IRQ_NONE; /* Not our interrupt */ 3301 return IRQ_NONE; /* Not our interrupt */
3791 3302
3792 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3793 * not set, then the adapter didn't send an interrupt */
3794 if (unlikely(hw->mac_type >= e1000_82571 &&
3795 !(icr & E1000_ICR_INT_ASSERTED)))
3796 return IRQ_NONE;
3797
3798 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3799 * need for the IMC write */
3800
3801 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3303 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3802 hw->get_link_status = 1; 3304 hw->get_link_status = 1;
3803 /* 80003ES2LAN workaround--
3804 * For packet buffer work-around on link down event;
3805 * disable receives here in the ISR and
3806 * reset adapter in watchdog
3807 */
3808 if (netif_carrier_ok(netdev) &&
3809 (hw->mac_type == e1000_80003es2lan)) {
3810 /* disable receives */
3811 rctl = er32(RCTL);
3812 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3813 }
3814 /* guard against interrupt when we're going down */ 3305 /* guard against interrupt when we're going down */
3815 if (!test_bit(__E1000_DOWN, &adapter->flags)) 3306 if (!test_bit(__E1000_DOWN, &adapter->flags))
3816 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3307 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3817 } 3308 }
3818 3309
3819 if (unlikely(hw->mac_type < e1000_82571)) { 3310 /* disable interrupts, without the synchronize_irq bit */
3820 /* disable interrupts, without the synchronize_irq bit */ 3311 ew32(IMC, ~0);
3821 ew32(IMC, ~0); 3312 E1000_WRITE_FLUSH();
3822 E1000_WRITE_FLUSH(); 3313
3823 }
3824 if (likely(napi_schedule_prep(&adapter->napi))) { 3314 if (likely(napi_schedule_prep(&adapter->napi))) {
3825 adapter->total_tx_bytes = 0; 3315 adapter->total_tx_bytes = 0;
3826 adapter->total_tx_packets = 0; 3316 adapter->total_tx_packets = 0;
@@ -3999,25 +3489,13 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3999 return; 3489 return;
4000 } 3490 }
4001 /* TCP/UDP Checksum has not been calculated */ 3491 /* TCP/UDP Checksum has not been calculated */
4002 if (hw->mac_type <= e1000_82547_rev_2) { 3492 if (!(status & E1000_RXD_STAT_TCPCS))
4003 if (!(status & E1000_RXD_STAT_TCPCS)) 3493 return;
4004 return; 3494
4005 } else {
4006 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
4007 return;
4008 }
4009 /* It must be a TCP or UDP packet with a valid checksum */ 3495 /* It must be a TCP or UDP packet with a valid checksum */
4010 if (likely(status & E1000_RXD_STAT_TCPCS)) { 3496 if (likely(status & E1000_RXD_STAT_TCPCS)) {
4011 /* TCP checksum is good */ 3497 /* TCP checksum is good */
4012 skb->ip_summed = CHECKSUM_UNNECESSARY; 3498 skb->ip_summed = CHECKSUM_UNNECESSARY;
4013 } else if (hw->mac_type > e1000_82547_rev_2) {
4014 /* IP fragment with UDP payload */
4015 /* Hardware complements the payload checksum, so we undo it
4016 * and then put the value in host order for further stack use.
4017 */
4018 __sum16 sum = (__force __sum16)htons(csum);
4019 skb->csum = csum_unfold(~sum);
4020 skb->ip_summed = CHECKSUM_COMPLETE;
4021 } 3499 }
4022 adapter->hw_csum_good++; 3500 adapter->hw_csum_good++;
4023} 3501}
@@ -4850,33 +4328,28 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
4850 ctrl |= E1000_CTRL_VME; 4328 ctrl |= E1000_CTRL_VME;
4851 ew32(CTRL, ctrl); 4329 ew32(CTRL, ctrl);
4852 4330
4853 if (adapter->hw.mac_type != e1000_ich8lan) { 4331 /* enable VLAN receive filtering */
4854 /* enable VLAN receive filtering */ 4332 rctl = er32(RCTL);
4855 rctl = er32(RCTL); 4333 rctl &= ~E1000_RCTL_CFIEN;
4856 rctl &= ~E1000_RCTL_CFIEN; 4334 if (!(netdev->flags & IFF_PROMISC))
4857 if (!(netdev->flags & IFF_PROMISC)) 4335 rctl |= E1000_RCTL_VFE;
4858 rctl |= E1000_RCTL_VFE; 4336 ew32(RCTL, rctl);
4859 ew32(RCTL, rctl); 4337 e1000_update_mng_vlan(adapter);
4860 e1000_update_mng_vlan(adapter);
4861 }
4862 } else { 4338 } else {
4863 /* disable VLAN tag insert/strip */ 4339 /* disable VLAN tag insert/strip */
4864 ctrl = er32(CTRL); 4340 ctrl = er32(CTRL);
4865 ctrl &= ~E1000_CTRL_VME; 4341 ctrl &= ~E1000_CTRL_VME;
4866 ew32(CTRL, ctrl); 4342 ew32(CTRL, ctrl);
4867 4343
4868 if (adapter->hw.mac_type != e1000_ich8lan) { 4344 /* disable VLAN receive filtering */
4869 /* disable VLAN receive filtering */ 4345 rctl = er32(RCTL);
4870 rctl = er32(RCTL); 4346 rctl &= ~E1000_RCTL_VFE;
4871 rctl &= ~E1000_RCTL_VFE; 4347 ew32(RCTL, rctl);
4872 ew32(RCTL, rctl);
4873 4348
4874 if (adapter->mng_vlan_id != 4349 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
4875 (u16)E1000_MNG_VLAN_NONE) { 4350 e1000_vlan_rx_kill_vid(netdev,
4876 e1000_vlan_rx_kill_vid(netdev, 4351 adapter->mng_vlan_id);
4877 adapter->mng_vlan_id); 4352 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4878 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4879 }
4880 } 4353 }
4881 } 4354 }
4882 4355
@@ -4913,14 +4386,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4913 if (!test_bit(__E1000_DOWN, &adapter->flags)) 4386 if (!test_bit(__E1000_DOWN, &adapter->flags))
4914 e1000_irq_enable(adapter); 4387 e1000_irq_enable(adapter);
4915 4388
4916 if ((hw->mng_cookie.status &
4917 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4918 (vid == adapter->mng_vlan_id)) {
4919 /* release control to f/w */
4920 e1000_release_hw_control(adapter);
4921 return;
4922 }
4923
4924 /* remove VID from filter table */ 4389 /* remove VID from filter table */
4925 index = (vid >> 5) & 0x7F; 4390 index = (vid >> 5) & 0x7F;
4926 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); 4391 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
@@ -5031,16 +4496,13 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5031 } 4496 }
5032 4497
5033 if (hw->media_type == e1000_media_type_fiber || 4498 if (hw->media_type == e1000_media_type_fiber ||
5034 hw->media_type == e1000_media_type_internal_serdes) { 4499 hw->media_type == e1000_media_type_internal_serdes) {
5035 /* keep the laser running in D3 */ 4500 /* keep the laser running in D3 */
5036 ctrl_ext = er32(CTRL_EXT); 4501 ctrl_ext = er32(CTRL_EXT);
5037 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 4502 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5038 ew32(CTRL_EXT, ctrl_ext); 4503 ew32(CTRL_EXT, ctrl_ext);
5039 } 4504 }
5040 4505
5041 /* Allow time for pending master requests to run */
5042 e1000_disable_pciex_master(hw);
5043
5044 ew32(WUC, E1000_WUC_PME_EN); 4506 ew32(WUC, E1000_WUC_PME_EN);
5045 ew32(WUFC, wufc); 4507 ew32(WUFC, wufc);
5046 } else { 4508 } else {
@@ -5056,16 +4518,9 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5056 if (adapter->en_mng_pt) 4518 if (adapter->en_mng_pt)
5057 *enable_wake = true; 4519 *enable_wake = true;
5058 4520
5059 if (hw->phy_type == e1000_phy_igp_3)
5060 e1000_phy_powerdown_workaround(hw);
5061
5062 if (netif_running(netdev)) 4521 if (netif_running(netdev))
5063 e1000_free_irq(adapter); 4522 e1000_free_irq(adapter);
5064 4523
5065 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5066 * would have already happened in close and is redundant. */
5067 e1000_release_hw_control(adapter);
5068
5069 pci_disable_device(pdev); 4524 pci_disable_device(pdev);
5070 4525
5071 return 0; 4526 return 0;
@@ -5131,14 +4586,6 @@ static int e1000_resume(struct pci_dev *pdev)
5131 4586
5132 netif_device_attach(netdev); 4587 netif_device_attach(netdev);
5133 4588
5134 /* If the controller is 82573 and f/w is AMT, do not set
5135 * DRV_LOAD until the interface is up. For all other cases,
5136 * let the f/w know that the h/w is now under the control
5137 * of the driver. */
5138 if (hw->mac_type != e1000_82573 ||
5139 !e1000_check_mng_mode(hw))
5140 e1000_get_hw_control(adapter);
5141
5142 return 0; 4589 return 0;
5143} 4590}
5144#endif 4591#endif
@@ -5243,7 +4690,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
5243{ 4690{
5244 struct net_device *netdev = pci_get_drvdata(pdev); 4691 struct net_device *netdev = pci_get_drvdata(pdev);
5245 struct e1000_adapter *adapter = netdev_priv(netdev); 4692 struct e1000_adapter *adapter = netdev_priv(netdev);
5246 struct e1000_hw *hw = &adapter->hw;
5247 4693
5248 e1000_init_manageability(adapter); 4694 e1000_init_manageability(adapter);
5249 4695
@@ -5255,15 +4701,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
5255 } 4701 }
5256 4702
5257 netif_device_attach(netdev); 4703 netif_device_attach(netdev);
5258
5259 /* If the controller is 82573 and f/w is AMT, do not set
5260 * DRV_LOAD until the interface is up. For all other cases,
5261 * let the f/w know that the h/w is now under the control
5262 * of the driver. */
5263 if (hw->mac_type != e1000_82573 ||
5264 !e1000_check_mng_mode(hw))
5265 e1000_get_hw_control(adapter);
5266
5267} 4704}
5268 4705
5269/* e1000_main.c */ 4706/* e1000_main.c */