diff options
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r-- | drivers/net/igb/igb_main.c | 170 |
1 files changed, 135 insertions, 35 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 14363260612c..ba043c4e1ca2 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -51,7 +51,7 @@ char igb_driver_name[] = "igb"; | |||
51 | char igb_driver_version[] = DRV_VERSION; | 51 | char igb_driver_version[] = DRV_VERSION; |
52 | static const char igb_driver_string[] = | 52 | static const char igb_driver_string[] = |
53 | "Intel(R) Gigabit Ethernet Network Driver"; | 53 | "Intel(R) Gigabit Ethernet Network Driver"; |
54 | static const char igb_copyright[] = "Copyright (c) 2007 Intel Corporation."; | 54 | static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation."; |
55 | 55 | ||
56 | 56 | ||
57 | static const struct e1000_info *igb_info_tbl[] = { | 57 | static const struct e1000_info *igb_info_tbl[] = { |
@@ -59,6 +59,10 @@ static const struct e1000_info *igb_info_tbl[] = { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | static struct pci_device_id igb_pci_tbl[] = { | 61 | static struct pci_device_id igb_pci_tbl[] = { |
62 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, | ||
63 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, | ||
64 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, | ||
65 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, | ||
62 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, | 66 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, |
63 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, | 67 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, |
64 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, | 68 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, |
@@ -268,6 +272,10 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
268 | { | 272 | { |
269 | u32 msixbm = 0; | 273 | u32 msixbm = 0; |
270 | struct e1000_hw *hw = &adapter->hw; | 274 | struct e1000_hw *hw = &adapter->hw; |
275 | u32 ivar, index; | ||
276 | |||
277 | switch (hw->mac.type) { | ||
278 | case e1000_82575: | ||
271 | /* The 82575 assigns vectors using a bitmask, which matches the | 279 | /* The 82575 assigns vectors using a bitmask, which matches the |
272 | bitmask for the EICR/EIMS/EIMC registers. To assign one | 280 | bitmask for the EICR/EIMS/EIMC registers. To assign one |
273 | or more queues to a vector, we write the appropriate bits | 281 | or more queues to a vector, we write the appropriate bits |
@@ -282,6 +290,47 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, | |||
282 | E1000_EICR_TX_QUEUE0 << tx_queue; | 290 | E1000_EICR_TX_QUEUE0 << tx_queue; |
283 | } | 291 | } |
284 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); | 292 | array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); |
293 | break; | ||
294 | case e1000_82576: | ||
295 | /* Kawela uses a table-based method for assigning vectors. | ||
296 | Each queue has a single entry in the table to which we write | ||
297 | a vector number along with a "valid" bit. Sadly, the layout | ||
298 | of the table is somewhat counterintuitive. */ | ||
299 | if (rx_queue > IGB_N0_QUEUE) { | ||
300 | index = (rx_queue & 0x7); | ||
301 | ivar = array_rd32(E1000_IVAR0, index); | ||
302 | if (rx_queue < 8) { | ||
303 | /* vector goes into low byte of register */ | ||
304 | ivar = ivar & 0xFFFFFF00; | ||
305 | ivar |= msix_vector | E1000_IVAR_VALID; | ||
306 | } else { | ||
307 | /* vector goes into third byte of register */ | ||
308 | ivar = ivar & 0xFF00FFFF; | ||
309 | ivar |= (msix_vector | E1000_IVAR_VALID) << 16; | ||
310 | } | ||
311 | adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; | ||
312 | array_wr32(E1000_IVAR0, index, ivar); | ||
313 | } | ||
314 | if (tx_queue > IGB_N0_QUEUE) { | ||
315 | index = (tx_queue & 0x7); | ||
316 | ivar = array_rd32(E1000_IVAR0, index); | ||
317 | if (tx_queue < 8) { | ||
318 | /* vector goes into second byte of register */ | ||
319 | ivar = ivar & 0xFFFF00FF; | ||
320 | ivar |= (msix_vector | E1000_IVAR_VALID) << 8; | ||
321 | } else { | ||
322 | /* vector goes into high byte of register */ | ||
323 | ivar = ivar & 0x00FFFFFF; | ||
324 | ivar |= (msix_vector | E1000_IVAR_VALID) << 24; | ||
325 | } | ||
326 | adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; | ||
327 | array_wr32(E1000_IVAR0, index, ivar); | ||
328 | } | ||
329 | break; | ||
330 | default: | ||
331 | BUG(); | ||
332 | break; | ||
333 | } | ||
285 | } | 334 | } |
286 | 335 | ||
287 | /** | 336 | /** |
@@ -297,6 +346,12 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
297 | struct e1000_hw *hw = &adapter->hw; | 346 | struct e1000_hw *hw = &adapter->hw; |
298 | 347 | ||
299 | adapter->eims_enable_mask = 0; | 348 | adapter->eims_enable_mask = 0; |
349 | if (hw->mac.type == e1000_82576) | ||
350 | /* Turn on MSI-X capability first, or our settings | ||
351 | * won't stick. And it will take days to debug. */ | ||
352 | wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | | ||
353 | E1000_GPIE_PBA | E1000_GPIE_EIAME | | ||
354 | E1000_GPIE_NSICR); | ||
300 | 355 | ||
301 | for (i = 0; i < adapter->num_tx_queues; i++) { | 356 | for (i = 0; i < adapter->num_tx_queues; i++) { |
302 | struct igb_ring *tx_ring = &adapter->tx_ring[i]; | 357 | struct igb_ring *tx_ring = &adapter->tx_ring[i]; |
@@ -322,6 +377,8 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
322 | 377 | ||
323 | 378 | ||
324 | /* set vector for other causes, i.e. link changes */ | 379 | /* set vector for other causes, i.e. link changes */ |
380 | switch (hw->mac.type) { | ||
381 | case e1000_82575: | ||
325 | array_wr32(E1000_MSIXBM(0), vector++, | 382 | array_wr32(E1000_MSIXBM(0), vector++, |
326 | E1000_EIMS_OTHER); | 383 | E1000_EIMS_OTHER); |
327 | 384 | ||
@@ -337,6 +394,19 @@ static void igb_configure_msix(struct igb_adapter *adapter) | |||
337 | adapter->eims_enable_mask |= E1000_EIMS_OTHER; | 394 | adapter->eims_enable_mask |= E1000_EIMS_OTHER; |
338 | adapter->eims_other = E1000_EIMS_OTHER; | 395 | adapter->eims_other = E1000_EIMS_OTHER; |
339 | 396 | ||
397 | break; | ||
398 | |||
399 | case e1000_82576: | ||
400 | tmp = (vector++ | E1000_IVAR_VALID) << 8; | ||
401 | wr32(E1000_IVAR_MISC, tmp); | ||
402 | |||
403 | adapter->eims_enable_mask = (1 << (vector)) - 1; | ||
404 | adapter->eims_other = 1 << (vector - 1); | ||
405 | break; | ||
406 | default: | ||
407 | /* do nothing, since nothing else supports MSI-X */ | ||
408 | break; | ||
409 | } /* switch (hw->mac.type) */ | ||
340 | wrfl(); | 410 | wrfl(); |
341 | } | 411 | } |
342 | 412 | ||
@@ -474,8 +544,17 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
474 | adapter->num_rx_queues = 1; | 544 | adapter->num_rx_queues = 1; |
475 | igb_alloc_queues(adapter); | 545 | igb_alloc_queues(adapter); |
476 | } else { | 546 | } else { |
477 | wr32(E1000_MSIXBM(0), (E1000_EICR_RX_QUEUE0 | | 547 | switch (hw->mac.type) { |
478 | E1000_EIMS_OTHER)); | 548 | case e1000_82575: |
549 | wr32(E1000_MSIXBM(0), | ||
550 | (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); | ||
551 | break; | ||
552 | case e1000_82576: | ||
553 | wr32(E1000_IVAR0, E1000_IVAR_VALID); | ||
554 | break; | ||
555 | default: | ||
556 | break; | ||
557 | } | ||
479 | } | 558 | } |
480 | 559 | ||
481 | if (adapter->msi_enabled) { | 560 | if (adapter->msi_enabled) { |
@@ -770,16 +849,23 @@ void igb_reinit_locked(struct igb_adapter *adapter) | |||
770 | void igb_reset(struct igb_adapter *adapter) | 849 | void igb_reset(struct igb_adapter *adapter) |
771 | { | 850 | { |
772 | struct e1000_hw *hw = &adapter->hw; | 851 | struct e1000_hw *hw = &adapter->hw; |
773 | struct e1000_fc_info *fc = &adapter->hw.fc; | 852 | struct e1000_mac_info *mac = &hw->mac; |
853 | struct e1000_fc_info *fc = &hw->fc; | ||
774 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; | 854 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; |
775 | u16 hwm; | 855 | u16 hwm; |
776 | 856 | ||
777 | /* Repartition Pba for greater than 9k mtu | 857 | /* Repartition Pba for greater than 9k mtu |
778 | * To take effect CTRL.RST is required. | 858 | * To take effect CTRL.RST is required. |
779 | */ | 859 | */ |
860 | if (mac->type != e1000_82576) { | ||
780 | pba = E1000_PBA_34K; | 861 | pba = E1000_PBA_34K; |
862 | } | ||
863 | else { | ||
864 | pba = E1000_PBA_64K; | ||
865 | } | ||
781 | 866 | ||
782 | if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { | 867 | if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && |
868 | (mac->type < e1000_82576)) { | ||
783 | /* adjust PBA for jumbo frames */ | 869 | /* adjust PBA for jumbo frames */ |
784 | wr32(E1000_PBA, pba); | 870 | wr32(E1000_PBA, pba); |
785 | 871 | ||
@@ -818,8 +904,8 @@ void igb_reset(struct igb_adapter *adapter) | |||
818 | if (pba < min_rx_space) | 904 | if (pba < min_rx_space) |
819 | pba = min_rx_space; | 905 | pba = min_rx_space; |
820 | } | 906 | } |
907 | wr32(E1000_PBA, pba); | ||
821 | } | 908 | } |
822 | wr32(E1000_PBA, pba); | ||
823 | 909 | ||
824 | /* flow control settings */ | 910 | /* flow control settings */ |
825 | /* The high water mark must be low enough to fit one full frame | 911 | /* The high water mark must be low enough to fit one full frame |
@@ -828,10 +914,15 @@ void igb_reset(struct igb_adapter *adapter) | |||
828 | * - 90% of the Rx FIFO size, or | 914 | * - 90% of the Rx FIFO size, or |
829 | * - the full Rx FIFO size minus one full frame */ | 915 | * - the full Rx FIFO size minus one full frame */ |
830 | hwm = min(((pba << 10) * 9 / 10), | 916 | hwm = min(((pba << 10) * 9 / 10), |
831 | ((pba << 10) - adapter->max_frame_size)); | 917 | ((pba << 10) - 2 * adapter->max_frame_size)); |
832 | 918 | ||
833 | fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ | 919 | if (mac->type < e1000_82576) { |
834 | fc->low_water = fc->high_water - 8; | 920 | fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ |
921 | fc->low_water = fc->high_water - 8; | ||
922 | } else { | ||
923 | fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ | ||
924 | fc->low_water = fc->high_water - 16; | ||
925 | } | ||
835 | fc->pause_time = 0xFFFF; | 926 | fc->pause_time = 0xFFFF; |
836 | fc->send_xon = 1; | 927 | fc->send_xon = 1; |
837 | fc->type = fc->original_type; | 928 | fc->type = fc->original_type; |
@@ -1118,9 +1209,12 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1118 | * lan on a particular port */ | 1209 | * lan on a particular port */ |
1119 | switch (pdev->device) { | 1210 | switch (pdev->device) { |
1120 | case E1000_DEV_ID_82575GB_QUAD_COPPER: | 1211 | case E1000_DEV_ID_82575GB_QUAD_COPPER: |
1212 | case E1000_DEV_ID_82576_QUAD_COPPER: | ||
1121 | adapter->eeprom_wol = 0; | 1213 | adapter->eeprom_wol = 0; |
1122 | break; | 1214 | break; |
1123 | case E1000_DEV_ID_82575EB_FIBER_SERDES: | 1215 | case E1000_DEV_ID_82575EB_FIBER_SERDES: |
1216 | case E1000_DEV_ID_82576_FIBER: | ||
1217 | case E1000_DEV_ID_82576_SERDES: | ||
1124 | /* Wake events only supported on port A for dual fiber | 1218 | /* Wake events only supported on port A for dual fiber |
1125 | * regardless of eeprom setting */ | 1219 | * regardless of eeprom setting */ |
1126 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) | 1220 | if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) |
@@ -1801,7 +1895,10 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
1801 | 1895 | ||
1802 | get_random_bytes(&random[0], 40); | 1896 | get_random_bytes(&random[0], 40); |
1803 | 1897 | ||
1804 | shift = 6; | 1898 | if (hw->mac.type >= e1000_82576) |
1899 | shift = 0; | ||
1900 | else | ||
1901 | shift = 6; | ||
1805 | for (j = 0; j < (32 * 4); j++) { | 1902 | for (j = 0; j < (32 * 4); j++) { |
1806 | reta.bytes[j & 3] = | 1903 | reta.bytes[j & 3] = |
1807 | (j % adapter->num_rx_queues) << shift; | 1904 | (j % adapter->num_rx_queues) << shift; |
@@ -2127,7 +2224,7 @@ static void igb_set_multi(struct net_device *netdev) | |||
2127 | 2224 | ||
2128 | if (!netdev->mc_count) { | 2225 | if (!netdev->mc_count) { |
2129 | /* nothing to program, so clear mc list */ | 2226 | /* nothing to program, so clear mc list */ |
2130 | igb_update_mc_addr_list(hw, NULL, 0, 1, | 2227 | igb_update_mc_addr_list_82575(hw, NULL, 0, 1, |
2131 | mac->rar_entry_count); | 2228 | mac->rar_entry_count); |
2132 | return; | 2229 | return; |
2133 | } | 2230 | } |
@@ -2145,7 +2242,8 @@ static void igb_set_multi(struct net_device *netdev) | |||
2145 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); | 2242 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); |
2146 | mc_ptr = mc_ptr->next; | 2243 | mc_ptr = mc_ptr->next; |
2147 | } | 2244 | } |
2148 | igb_update_mc_addr_list(hw, mta_list, i, 1, mac->rar_entry_count); | 2245 | igb_update_mc_addr_list_82575(hw, mta_list, i, 1, |
2246 | mac->rar_entry_count); | ||
2149 | kfree(mta_list); | 2247 | kfree(mta_list); |
2150 | } | 2248 | } |
2151 | 2249 | ||
@@ -3211,8 +3309,14 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring) | |||
3211 | 3309 | ||
3212 | if (rx_ring->cpu != cpu) { | 3310 | if (rx_ring->cpu != cpu) { |
3213 | dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); | 3311 | dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); |
3214 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; | 3312 | if (hw->mac.type == e1000_82576) { |
3215 | dca_rxctrl |= dca_get_tag(cpu); | 3313 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; |
3314 | dca_rxctrl |= dca_get_tag(cpu) << | ||
3315 | E1000_DCA_RXCTRL_CPUID_SHIFT; | ||
3316 | } else { | ||
3317 | dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; | ||
3318 | dca_rxctrl |= dca_get_tag(cpu); | ||
3319 | } | ||
3216 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; | 3320 | dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; |
3217 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; | 3321 | dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; |
3218 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; | 3322 | dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; |
@@ -3232,8 +3336,14 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring) | |||
3232 | 3336 | ||
3233 | if (tx_ring->cpu != cpu) { | 3337 | if (tx_ring->cpu != cpu) { |
3234 | dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); | 3338 | dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); |
3235 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | 3339 | if (hw->mac.type == e1000_82576) { |
3236 | dca_txctrl |= dca_get_tag(cpu); | 3340 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576; |
3341 | dca_txctrl |= dca_get_tag(cpu) << | ||
3342 | E1000_DCA_TXCTRL_CPUID_SHIFT; | ||
3343 | } else { | ||
3344 | dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; | ||
3345 | dca_txctrl |= dca_get_tag(cpu); | ||
3346 | } | ||
3237 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; | 3347 | dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; |
3238 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); | 3348 | wr32(E1000_DCA_TXCTRL(q), dca_txctrl); |
3239 | tx_ring->cpu = cpu; | 3349 | tx_ring->cpu = cpu; |
@@ -3572,7 +3682,7 @@ done_cleaning: | |||
3572 | /* detected Tx unit hang */ | 3682 | /* detected Tx unit hang */ |
3573 | dev_err(&adapter->pdev->dev, | 3683 | dev_err(&adapter->pdev->dev, |
3574 | "Detected Tx Unit Hang\n" | 3684 | "Detected Tx Unit Hang\n" |
3575 | " Tx Queue <%lu>\n" | 3685 | " Tx Queue <%d>\n" |
3576 | " TDH <%x>\n" | 3686 | " TDH <%x>\n" |
3577 | " TDT <%x>\n" | 3687 | " TDT <%x>\n" |
3578 | " next_to_use <%x>\n" | 3688 | " next_to_use <%x>\n" |
@@ -3582,8 +3692,7 @@ done_cleaning: | |||
3582 | " time_stamp <%lx>\n" | 3692 | " time_stamp <%lx>\n" |
3583 | " jiffies <%lx>\n" | 3693 | " jiffies <%lx>\n" |
3584 | " desc.status <%x>\n", | 3694 | " desc.status <%x>\n", |
3585 | (unsigned long)((tx_ring - adapter->tx_ring) / | 3695 | tx_ring->queue_index, |
3586 | sizeof(struct igb_ring)), | ||
3587 | readl(adapter->hw.hw_addr + tx_ring->head), | 3696 | readl(adapter->hw.hw_addr + tx_ring->head), |
3588 | readl(adapter->hw.hw_addr + tx_ring->tail), | 3697 | readl(adapter->hw.hw_addr + tx_ring->tail), |
3589 | tx_ring->next_to_use, | 3698 | tx_ring->next_to_use, |
@@ -4098,7 +4207,7 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4098 | struct net_device *netdev = pci_get_drvdata(pdev); | 4207 | struct net_device *netdev = pci_get_drvdata(pdev); |
4099 | struct igb_adapter *adapter = netdev_priv(netdev); | 4208 | struct igb_adapter *adapter = netdev_priv(netdev); |
4100 | struct e1000_hw *hw = &adapter->hw; | 4209 | struct e1000_hw *hw = &adapter->hw; |
4101 | u32 ctrl, ctrl_ext, rctl, status; | 4210 | u32 ctrl, rctl, status; |
4102 | u32 wufc = adapter->wol; | 4211 | u32 wufc = adapter->wol; |
4103 | #ifdef CONFIG_PM | 4212 | #ifdef CONFIG_PM |
4104 | int retval = 0; | 4213 | int retval = 0; |
@@ -4141,33 +4250,24 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4141 | ctrl |= E1000_CTRL_ADVD3WUC; | 4250 | ctrl |= E1000_CTRL_ADVD3WUC; |
4142 | wr32(E1000_CTRL, ctrl); | 4251 | wr32(E1000_CTRL, ctrl); |
4143 | 4252 | ||
4144 | if (adapter->hw.phy.media_type == e1000_media_type_fiber || | ||
4145 | adapter->hw.phy.media_type == | ||
4146 | e1000_media_type_internal_serdes) { | ||
4147 | /* keep the laser running in D3 */ | ||
4148 | ctrl_ext = rd32(E1000_CTRL_EXT); | ||
4149 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; | ||
4150 | wr32(E1000_CTRL_EXT, ctrl_ext); | ||
4151 | } | ||
4152 | |||
4153 | /* Allow time for pending master requests to run */ | 4253 | /* Allow time for pending master requests to run */ |
4154 | igb_disable_pcie_master(&adapter->hw); | 4254 | igb_disable_pcie_master(&adapter->hw); |
4155 | 4255 | ||
4156 | wr32(E1000_WUC, E1000_WUC_PME_EN); | 4256 | wr32(E1000_WUC, E1000_WUC_PME_EN); |
4157 | wr32(E1000_WUFC, wufc); | 4257 | wr32(E1000_WUFC, wufc); |
4158 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
4159 | pci_enable_wake(pdev, PCI_D3cold, 1); | ||
4160 | } else { | 4258 | } else { |
4161 | wr32(E1000_WUC, 0); | 4259 | wr32(E1000_WUC, 0); |
4162 | wr32(E1000_WUFC, 0); | 4260 | wr32(E1000_WUFC, 0); |
4163 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
4164 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4165 | } | 4261 | } |
4166 | 4262 | ||
4167 | /* make sure adapter isn't asleep if manageability is enabled */ | 4263 | /* make sure adapter isn't asleep if manageability/wol is enabled */ |
4168 | if (adapter->en_mng_pt) { | 4264 | if (wufc || adapter->en_mng_pt) { |
4169 | pci_enable_wake(pdev, PCI_D3hot, 1); | 4265 | pci_enable_wake(pdev, PCI_D3hot, 1); |
4170 | pci_enable_wake(pdev, PCI_D3cold, 1); | 4266 | pci_enable_wake(pdev, PCI_D3cold, 1); |
4267 | } else { | ||
4268 | igb_shutdown_fiber_serdes_link_82575(hw); | ||
4269 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
4270 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
4171 | } | 4271 | } |
4172 | 4272 | ||
4173 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 4273 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |