diff options
Diffstat (limited to 'drivers/net/ethernet/intel/e1000e/netdev.c')
-rw-r--r-- | drivers/net/ethernet/intel/e1000e/netdev.c | 385 |
1 files changed, 238 insertions, 147 deletions
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 6d91933c4cdd..3e69386add04 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -1,30 +1,23 @@ | |||
1 | /******************************************************************************* | 1 | /* Intel PRO/1000 Linux driver |
2 | 2 | * Copyright(c) 1999 - 2014 Intel Corporation. | |
3 | Intel PRO/1000 Linux driver | 3 | * |
4 | Copyright(c) 1999 - 2013 Intel Corporation. | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | 5 | * under the terms and conditions of the GNU General Public License, | |
6 | This program is free software; you can redistribute it and/or modify it | 6 | * version 2, as published by the Free Software Foundation. |
7 | under the terms and conditions of the GNU General Public License, | 7 | * |
8 | version 2, as published by the Free Software Foundation. | 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | This program is distributed in the hope it will be useful, but WITHOUT | 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 11 | * more details. |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 12 | * |
13 | more details. | 13 | * The full GNU General Public License is included in this distribution in |
14 | 14 | * the file called "COPYING". | |
15 | You should have received a copy of the GNU General Public License along with | 15 | * |
16 | this program; if not, write to the Free Software Foundation, Inc., | 16 | * Contact Information: |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 17 | * Linux NICS <linux.nics@intel.com> |
18 | 18 | * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
19 | The full GNU General Public License is included in this distribution in | 19 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
20 | the file called "COPYING". | 20 | */ |
21 | |||
22 | Contact Information: | ||
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | ||
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
26 | |||
27 | *******************************************************************************/ | ||
28 | 21 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | 23 | ||
@@ -885,7 +878,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, | |||
885 | struct sk_buff *skb) | 878 | struct sk_buff *skb) |
886 | { | 879 | { |
887 | if (netdev->features & NETIF_F_RXHASH) | 880 | if (netdev->features & NETIF_F_RXHASH) |
888 | skb->rxhash = le32_to_cpu(rss); | 881 | skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); |
889 | } | 882 | } |
890 | 883 | ||
891 | /** | 884 | /** |
@@ -1097,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
1097 | adapter->tx_hang_recheck = true; | 1090 | adapter->tx_hang_recheck = true; |
1098 | return; | 1091 | return; |
1099 | } | 1092 | } |
1100 | /* Real hang detected */ | ||
1101 | adapter->tx_hang_recheck = false; | 1093 | adapter->tx_hang_recheck = false; |
1094 | |||
1095 | if (er32(TDH(0)) == er32(TDT(0))) { | ||
1096 | e_dbg("false hang detected, ignoring\n"); | ||
1097 | return; | ||
1098 | } | ||
1099 | |||
1100 | /* Real hang detected */ | ||
1102 | netif_stop_queue(netdev); | 1101 | netif_stop_queue(netdev); |
1103 | 1102 | ||
1104 | e1e_rphy(hw, MII_BMSR, &phy_status); | 1103 | e1e_rphy(hw, MII_BMSR, &phy_status); |
@@ -1128,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
1128 | eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), | 1127 | eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), |
1129 | phy_status, phy_1000t_status, phy_ext_status, pci_status); | 1128 | phy_status, phy_1000t_status, phy_ext_status, pci_status); |
1130 | 1129 | ||
1130 | e1000e_dump(adapter); | ||
1131 | |||
1131 | /* Suggest workaround for known h/w issue */ | 1132 | /* Suggest workaround for known h/w issue */ |
1132 | if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) | 1133 | if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) |
1133 | e_err("Try turning off Tx pause (flow control) via ethtool\n"); | 1134 | e_err("Try turning off Tx pause (flow control) via ethtool\n"); |
@@ -1147,9 +1148,6 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) | |||
1147 | tx_hwtstamp_work); | 1148 | tx_hwtstamp_work); |
1148 | struct e1000_hw *hw = &adapter->hw; | 1149 | struct e1000_hw *hw = &adapter->hw; |
1149 | 1150 | ||
1150 | if (!adapter->tx_hwtstamp_skb) | ||
1151 | return; | ||
1152 | |||
1153 | if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { | 1151 | if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { |
1154 | struct skb_shared_hwtstamps shhwtstamps; | 1152 | struct skb_shared_hwtstamps shhwtstamps; |
1155 | u64 txstmp; | 1153 | u64 txstmp; |
@@ -1162,6 +1160,12 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) | |||
1162 | skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); | 1160 | skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); |
1163 | dev_kfree_skb_any(adapter->tx_hwtstamp_skb); | 1161 | dev_kfree_skb_any(adapter->tx_hwtstamp_skb); |
1164 | adapter->tx_hwtstamp_skb = NULL; | 1162 | adapter->tx_hwtstamp_skb = NULL; |
1163 | } else if (time_after(jiffies, adapter->tx_hwtstamp_start | ||
1164 | + adapter->tx_timeout_factor * HZ)) { | ||
1165 | dev_kfree_skb_any(adapter->tx_hwtstamp_skb); | ||
1166 | adapter->tx_hwtstamp_skb = NULL; | ||
1167 | adapter->tx_hwtstamp_timeouts++; | ||
1168 | e_warn("clearing Tx timestamp hang\n"); | ||
1165 | } else { | 1169 | } else { |
1166 | /* reschedule to check later */ | 1170 | /* reschedule to check later */ |
1167 | schedule_work(&adapter->tx_hwtstamp_work); | 1171 | schedule_work(&adapter->tx_hwtstamp_work); |
@@ -1701,7 +1705,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) | |||
1701 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; | 1705 | adapter->flags2 &= ~FLAG2_IS_DISCARDING; |
1702 | 1706 | ||
1703 | writel(0, rx_ring->head); | 1707 | writel(0, rx_ring->head); |
1704 | if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | 1708 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
1705 | e1000e_update_rdt_wa(rx_ring, 0); | 1709 | e1000e_update_rdt_wa(rx_ring, 0); |
1706 | else | 1710 | else |
1707 | writel(0, rx_ring->tail); | 1711 | writel(0, rx_ring->tail); |
@@ -2038,13 +2042,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) | |||
2038 | msix_entry), | 2042 | msix_entry), |
2039 | GFP_KERNEL); | 2043 | GFP_KERNEL); |
2040 | if (adapter->msix_entries) { | 2044 | if (adapter->msix_entries) { |
2045 | struct e1000_adapter *a = adapter; | ||
2046 | |||
2041 | for (i = 0; i < adapter->num_vectors; i++) | 2047 | for (i = 0; i < adapter->num_vectors; i++) |
2042 | adapter->msix_entries[i].entry = i; | 2048 | adapter->msix_entries[i].entry = i; |
2043 | 2049 | ||
2044 | err = pci_enable_msix(adapter->pdev, | 2050 | err = pci_enable_msix_range(a->pdev, |
2045 | adapter->msix_entries, | 2051 | a->msix_entries, |
2046 | adapter->num_vectors); | 2052 | a->num_vectors, |
2047 | if (err == 0) | 2053 | a->num_vectors); |
2054 | if (err > 0) | ||
2048 | return; | 2055 | return; |
2049 | } | 2056 | } |
2050 | /* MSI-X failed, so fall through and try MSI */ | 2057 | /* MSI-X failed, so fall through and try MSI */ |
@@ -2402,7 +2409,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) | |||
2402 | tx_ring->next_to_clean = 0; | 2409 | tx_ring->next_to_clean = 0; |
2403 | 2410 | ||
2404 | writel(0, tx_ring->head); | 2411 | writel(0, tx_ring->head); |
2405 | if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) | 2412 | if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) |
2406 | e1000e_update_tdt_wa(tx_ring, 0); | 2413 | e1000e_update_tdt_wa(tx_ring, 0); |
2407 | else | 2414 | else |
2408 | writel(0, tx_ring->tail); | 2415 | writel(0, tx_ring->tail); |
@@ -2894,7 +2901,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2894 | struct e1000_hw *hw = &adapter->hw; | 2901 | struct e1000_hw *hw = &adapter->hw; |
2895 | struct e1000_ring *tx_ring = adapter->tx_ring; | 2902 | struct e1000_ring *tx_ring = adapter->tx_ring; |
2896 | u64 tdba; | 2903 | u64 tdba; |
2897 | u32 tdlen, tarc; | 2904 | u32 tdlen, tctl, tarc; |
2898 | 2905 | ||
2899 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 2906 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
2900 | tdba = tx_ring->dma; | 2907 | tdba = tx_ring->dma; |
@@ -2931,6 +2938,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2931 | /* erratum work around: set txdctl the same for both queues */ | 2938 | /* erratum work around: set txdctl the same for both queues */ |
2932 | ew32(TXDCTL(1), er32(TXDCTL(0))); | 2939 | ew32(TXDCTL(1), er32(TXDCTL(0))); |
2933 | 2940 | ||
2941 | /* Program the Transmit Control Register */ | ||
2942 | tctl = er32(TCTL); | ||
2943 | tctl &= ~E1000_TCTL_CT; | ||
2944 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | ||
2945 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | ||
2946 | |||
2934 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { | 2947 | if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { |
2935 | tarc = er32(TARC(0)); | 2948 | tarc = er32(TARC(0)); |
2936 | /* set the speed mode bit, we'll clear it if we're not at | 2949 | /* set the speed mode bit, we'll clear it if we're not at |
@@ -2961,6 +2974,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) | |||
2961 | /* enable Report Status bit */ | 2974 | /* enable Report Status bit */ |
2962 | adapter->txd_cmd |= E1000_TXD_CMD_RS; | 2975 | adapter->txd_cmd |= E1000_TXD_CMD_RS; |
2963 | 2976 | ||
2977 | ew32(TCTL, tctl); | ||
2978 | |||
2964 | hw->mac.ops.config_collision_dist(hw); | 2979 | hw->mac.ops.config_collision_dist(hw); |
2965 | } | 2980 | } |
2966 | 2981 | ||
@@ -2976,11 +2991,21 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2976 | u32 rctl, rfctl; | 2991 | u32 rctl, rfctl; |
2977 | u32 pages = 0; | 2992 | u32 pages = 0; |
2978 | 2993 | ||
2979 | /* Workaround Si errata on PCHx - configure jumbo frame flow */ | 2994 | /* Workaround Si errata on PCHx - configure jumbo frame flow. |
2980 | if ((hw->mac.type >= e1000_pch2lan) && | 2995 | * If jumbo frames not set, program related MAC/PHY registers |
2981 | (adapter->netdev->mtu > ETH_DATA_LEN) && | 2996 | * to h/w defaults |
2982 | e1000_lv_jumbo_workaround_ich8lan(hw, true)) | 2997 | */ |
2983 | e_dbg("failed to enable jumbo frame workaround mode\n"); | 2998 | if (hw->mac.type >= e1000_pch2lan) { |
2999 | s32 ret_val; | ||
3000 | |||
3001 | if (adapter->netdev->mtu > ETH_DATA_LEN) | ||
3002 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); | ||
3003 | else | ||
3004 | ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); | ||
3005 | |||
3006 | if (ret_val) | ||
3007 | e_dbg("failed to enable|disable jumbo frame workaround mode\n"); | ||
3008 | } | ||
2984 | 3009 | ||
2985 | /* Program MC offset vector base */ | 3010 | /* Program MC offset vector base */ |
2986 | rctl = er32(RCTL); | 3011 | rctl = er32(RCTL); |
@@ -3331,6 +3356,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev) | |||
3331 | struct e1000_hw *hw = &adapter->hw; | 3356 | struct e1000_hw *hw = &adapter->hw; |
3332 | u32 rctl; | 3357 | u32 rctl; |
3333 | 3358 | ||
3359 | if (pm_runtime_suspended(netdev->dev.parent)) | ||
3360 | return; | ||
3361 | |||
3334 | /* Check for Promiscuous and All Multicast modes */ | 3362 | /* Check for Promiscuous and All Multicast modes */ |
3335 | rctl = er32(RCTL); | 3363 | rctl = er32(RCTL); |
3336 | 3364 | ||
@@ -3691,10 +3719,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter) | |||
3691 | */ | 3719 | */ |
3692 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | 3720 | static void e1000_power_down_phy(struct e1000_adapter *adapter) |
3693 | { | 3721 | { |
3694 | /* WoL is enabled */ | ||
3695 | if (adapter->wol) | ||
3696 | return; | ||
3697 | |||
3698 | if (adapter->hw.phy.ops.power_down) | 3722 | if (adapter->hw.phy.ops.power_down) |
3699 | adapter->hw.phy.ops.power_down(&adapter->hw); | 3723 | adapter->hw.phy.ops.power_down(&adapter->hw); |
3700 | } | 3724 | } |
@@ -3911,10 +3935,8 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
3911 | } | 3935 | } |
3912 | 3936 | ||
3913 | if (!netif_running(adapter->netdev) && | 3937 | if (!netif_running(adapter->netdev) && |
3914 | !test_bit(__E1000_TESTING, &adapter->state)) { | 3938 | !test_bit(__E1000_TESTING, &adapter->state)) |
3915 | e1000_power_down_phy(adapter); | 3939 | e1000_power_down_phy(adapter); |
3916 | return; | ||
3917 | } | ||
3918 | 3940 | ||
3919 | e1000_get_phy_info(hw); | 3941 | e1000_get_phy_info(hw); |
3920 | 3942 | ||
@@ -3981,7 +4003,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | |||
3981 | 4003 | ||
3982 | static void e1000e_update_stats(struct e1000_adapter *adapter); | 4004 | static void e1000e_update_stats(struct e1000_adapter *adapter); |
3983 | 4005 | ||
3984 | void e1000e_down(struct e1000_adapter *adapter) | 4006 | /** |
4007 | * e1000e_down - quiesce the device and optionally reset the hardware | ||
4008 | * @adapter: board private structure | ||
4009 | * @reset: boolean flag to reset the hardware or not | ||
4010 | */ | ||
4011 | void e1000e_down(struct e1000_adapter *adapter, bool reset) | ||
3985 | { | 4012 | { |
3986 | struct net_device *netdev = adapter->netdev; | 4013 | struct net_device *netdev = adapter->netdev; |
3987 | struct e1000_hw *hw = &adapter->hw; | 4014 | struct e1000_hw *hw = &adapter->hw; |
@@ -4035,12 +4062,8 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
4035 | e1000_lv_jumbo_workaround_ich8lan(hw, false)) | 4062 | e1000_lv_jumbo_workaround_ich8lan(hw, false)) |
4036 | e_dbg("failed to disable jumbo frame workaround mode\n"); | 4063 | e_dbg("failed to disable jumbo frame workaround mode\n"); |
4037 | 4064 | ||
4038 | if (!pci_channel_offline(adapter->pdev)) | 4065 | if (reset && !pci_channel_offline(adapter->pdev)) |
4039 | e1000e_reset(adapter); | 4066 | e1000e_reset(adapter); |
4040 | |||
4041 | /* TODO: for power management, we could drop the link and | ||
4042 | * pci_disable_device here. | ||
4043 | */ | ||
4044 | } | 4067 | } |
4045 | 4068 | ||
4046 | void e1000e_reinit_locked(struct e1000_adapter *adapter) | 4069 | void e1000e_reinit_locked(struct e1000_adapter *adapter) |
@@ -4048,7 +4071,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) | |||
4048 | might_sleep(); | 4071 | might_sleep(); |
4049 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 4072 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
4050 | usleep_range(1000, 2000); | 4073 | usleep_range(1000, 2000); |
4051 | e1000e_down(adapter); | 4074 | e1000e_down(adapter, true); |
4052 | e1000e_up(adapter); | 4075 | e1000e_up(adapter); |
4053 | clear_bit(__E1000_RESETTING, &adapter->state); | 4076 | clear_bit(__E1000_RESETTING, &adapter->state); |
4054 | } | 4077 | } |
@@ -4326,7 +4349,6 @@ static int e1000_open(struct net_device *netdev) | |||
4326 | adapter->tx_hang_recheck = false; | 4349 | adapter->tx_hang_recheck = false; |
4327 | netif_start_queue(netdev); | 4350 | netif_start_queue(netdev); |
4328 | 4351 | ||
4329 | adapter->idle_check = true; | ||
4330 | hw->mac.get_link_status = true; | 4352 | hw->mac.get_link_status = true; |
4331 | pm_runtime_put(&pdev->dev); | 4353 | pm_runtime_put(&pdev->dev); |
4332 | 4354 | ||
@@ -4376,14 +4398,15 @@ static int e1000_close(struct net_device *netdev) | |||
4376 | pm_runtime_get_sync(&pdev->dev); | 4398 | pm_runtime_get_sync(&pdev->dev); |
4377 | 4399 | ||
4378 | if (!test_bit(__E1000_DOWN, &adapter->state)) { | 4400 | if (!test_bit(__E1000_DOWN, &adapter->state)) { |
4379 | e1000e_down(adapter); | 4401 | e1000e_down(adapter, true); |
4380 | e1000_free_irq(adapter); | 4402 | e1000_free_irq(adapter); |
4403 | |||
4404 | /* Link status message must follow this format */ | ||
4405 | pr_info("%s NIC Link is Down\n", adapter->netdev->name); | ||
4381 | } | 4406 | } |
4382 | 4407 | ||
4383 | napi_disable(&adapter->napi); | 4408 | napi_disable(&adapter->napi); |
4384 | 4409 | ||
4385 | e1000_power_down_phy(adapter); | ||
4386 | |||
4387 | e1000e_free_tx_resources(adapter->tx_ring); | 4410 | e1000e_free_tx_resources(adapter->tx_ring); |
4388 | e1000e_free_rx_resources(adapter->rx_ring); | 4411 | e1000e_free_rx_resources(adapter->rx_ring); |
4389 | 4412 | ||
@@ -4460,11 +4483,16 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
4460 | struct e1000_adapter *adapter = container_of(work, | 4483 | struct e1000_adapter *adapter = container_of(work, |
4461 | struct e1000_adapter, | 4484 | struct e1000_adapter, |
4462 | update_phy_task); | 4485 | update_phy_task); |
4486 | struct e1000_hw *hw = &adapter->hw; | ||
4463 | 4487 | ||
4464 | if (test_bit(__E1000_DOWN, &adapter->state)) | 4488 | if (test_bit(__E1000_DOWN, &adapter->state)) |
4465 | return; | 4489 | return; |
4466 | 4490 | ||
4467 | e1000_get_phy_info(&adapter->hw); | 4491 | e1000_get_phy_info(hw); |
4492 | |||
4493 | /* Enable EEE on 82579 after link up */ | ||
4494 | if (hw->phy.type == e1000_phy_82579) | ||
4495 | e1000_set_eee_pchlan(hw); | ||
4468 | } | 4496 | } |
4469 | 4497 | ||
4470 | /** | 4498 | /** |
@@ -4799,6 +4827,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) | |||
4799 | 4827 | ||
4800 | if (adapter->phy_hang_count > 1) { | 4828 | if (adapter->phy_hang_count > 1) { |
4801 | adapter->phy_hang_count = 0; | 4829 | adapter->phy_hang_count = 0; |
4830 | e_dbg("PHY appears hung - resetting\n"); | ||
4802 | schedule_work(&adapter->reset_task); | 4831 | schedule_work(&adapter->reset_task); |
4803 | } | 4832 | } |
4804 | } | 4833 | } |
@@ -4957,15 +4986,11 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4957 | mod_timer(&adapter->phy_info_timer, | 4986 | mod_timer(&adapter->phy_info_timer, |
4958 | round_jiffies(jiffies + 2 * HZ)); | 4987 | round_jiffies(jiffies + 2 * HZ)); |
4959 | 4988 | ||
4960 | /* The link is lost so the controller stops DMA. | 4989 | /* 8000ES2LAN requires a Rx packet buffer work-around |
4961 | * If there is queued Tx work that cannot be done | 4990 | * on link down event; reset the controller to flush |
4962 | * or if on an 8000ES2LAN which requires a Rx packet | 4991 | * the Rx packet buffer. |
4963 | * buffer work-around on link down event, reset the | ||
4964 | * controller to flush the Tx/Rx packet buffers. | ||
4965 | * (Do the reset outside of interrupt context). | ||
4966 | */ | 4992 | */ |
4967 | if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || | 4993 | if (adapter->flags & FLAG_RX_NEEDS_RESTART) |
4968 | (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) | ||
4969 | adapter->flags |= FLAG_RESTART_NOW; | 4994 | adapter->flags |= FLAG_RESTART_NOW; |
4970 | else | 4995 | else |
4971 | pm_schedule_suspend(netdev->dev.parent, | 4996 | pm_schedule_suspend(netdev->dev.parent, |
@@ -4988,6 +5013,15 @@ link_up: | |||
4988 | adapter->gotc_old = adapter->stats.gotc; | 5013 | adapter->gotc_old = adapter->stats.gotc; |
4989 | spin_unlock(&adapter->stats64_lock); | 5014 | spin_unlock(&adapter->stats64_lock); |
4990 | 5015 | ||
5016 | /* If the link is lost the controller stops DMA, but | ||
5017 | * if there is queued Tx work it cannot be done. So | ||
5018 | * reset the controller to flush the Tx packet buffers. | ||
5019 | */ | ||
5020 | if (!netif_carrier_ok(netdev) && | ||
5021 | (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) | ||
5022 | adapter->flags |= FLAG_RESTART_NOW; | ||
5023 | |||
5024 | /* If reset is necessary, do it outside of interrupt context. */ | ||
4991 | if (adapter->flags & FLAG_RESTART_NOW) { | 5025 | if (adapter->flags & FLAG_RESTART_NOW) { |
4992 | schedule_work(&adapter->reset_task); | 5026 | schedule_work(&adapter->reset_task); |
4993 | /* return immediately since reset is imminent */ | 5027 | /* return immediately since reset is imminent */ |
@@ -5066,16 +5100,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
5066 | u32 cmd_length = 0; | 5100 | u32 cmd_length = 0; |
5067 | u16 ipcse = 0, mss; | 5101 | u16 ipcse = 0, mss; |
5068 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | 5102 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
5103 | int err; | ||
5069 | 5104 | ||
5070 | if (!skb_is_gso(skb)) | 5105 | if (!skb_is_gso(skb)) |
5071 | return 0; | 5106 | return 0; |
5072 | 5107 | ||
5073 | if (skb_header_cloned(skb)) { | 5108 | err = skb_cow_head(skb, 0); |
5074 | int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 5109 | if (err < 0) |
5075 | 5110 | return err; | |
5076 | if (err) | ||
5077 | return err; | ||
5078 | } | ||
5079 | 5111 | ||
5080 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 5112 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
5081 | mss = skb_shinfo(skb)->gso_size; | 5113 | mss = skb_shinfo(skb)->gso_size; |
@@ -5546,6 +5578,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5546 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 5578 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
5547 | tx_flags |= E1000_TX_FLAGS_HWTSTAMP; | 5579 | tx_flags |= E1000_TX_FLAGS_HWTSTAMP; |
5548 | adapter->tx_hwtstamp_skb = skb_get(skb); | 5580 | adapter->tx_hwtstamp_skb = skb_get(skb); |
5581 | adapter->tx_hwtstamp_start = jiffies; | ||
5549 | schedule_work(&adapter->tx_hwtstamp_work); | 5582 | schedule_work(&adapter->tx_hwtstamp_work); |
5550 | } else { | 5583 | } else { |
5551 | skb_tx_timestamp(skb); | 5584 | skb_tx_timestamp(skb); |
@@ -5654,7 +5687,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, | |||
5654 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | 5687 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
5655 | { | 5688 | { |
5656 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5689 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5657 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 5690 | int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN; |
5658 | 5691 | ||
5659 | /* Jumbo frame support */ | 5692 | /* Jumbo frame support */ |
5660 | if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && | 5693 | if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && |
@@ -5684,8 +5717,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
5684 | adapter->max_frame_size = max_frame; | 5717 | adapter->max_frame_size = max_frame; |
5685 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | 5718 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
5686 | netdev->mtu = new_mtu; | 5719 | netdev->mtu = new_mtu; |
5720 | |||
5721 | pm_runtime_get_sync(netdev->dev.parent); | ||
5722 | |||
5687 | if (netif_running(netdev)) | 5723 | if (netif_running(netdev)) |
5688 | e1000e_down(adapter); | 5724 | e1000e_down(adapter, true); |
5689 | 5725 | ||
5690 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 5726 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
5691 | * means we reserve 2 more, this pushes us to allocate from the next | 5727 | * means we reserve 2 more, this pushes us to allocate from the next |
@@ -5711,6 +5747,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
5711 | else | 5747 | else |
5712 | e1000e_reset(adapter); | 5748 | e1000e_reset(adapter); |
5713 | 5749 | ||
5750 | pm_runtime_put_sync(netdev->dev.parent); | ||
5751 | |||
5714 | clear_bit(__E1000_RESETTING, &adapter->state); | 5752 | clear_bit(__E1000_RESETTING, &adapter->state); |
5715 | 5753 | ||
5716 | return 0; | 5754 | return 0; |
@@ -5852,7 +5890,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |||
5852 | static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | 5890 | static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) |
5853 | { | 5891 | { |
5854 | struct e1000_hw *hw = &adapter->hw; | 5892 | struct e1000_hw *hw = &adapter->hw; |
5855 | u32 i, mac_reg; | 5893 | u32 i, mac_reg, wuc; |
5856 | u16 phy_reg, wuc_enable; | 5894 | u16 phy_reg, wuc_enable; |
5857 | int retval; | 5895 | int retval; |
5858 | 5896 | ||
@@ -5899,13 +5937,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) | |||
5899 | phy_reg |= BM_RCTL_RFCE; | 5937 | phy_reg |= BM_RCTL_RFCE; |
5900 | hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); | 5938 | hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); |
5901 | 5939 | ||
5940 | wuc = E1000_WUC_PME_EN; | ||
5941 | if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) | ||
5942 | wuc |= E1000_WUC_APME; | ||
5943 | |||
5902 | /* enable PHY wakeup in MAC register */ | 5944 | /* enable PHY wakeup in MAC register */ |
5903 | ew32(WUFC, wufc); | 5945 | ew32(WUFC, wufc); |
5904 | ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); | 5946 | ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | |
5947 | E1000_WUC_PME_STATUS | wuc)); | ||
5905 | 5948 | ||
5906 | /* configure and enable PHY wakeup in PHY registers */ | 5949 | /* configure and enable PHY wakeup in PHY registers */ |
5907 | hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); | 5950 | hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); |
5908 | hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); | 5951 | hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); |
5909 | 5952 | ||
5910 | /* activate PHY wakeup */ | 5953 | /* activate PHY wakeup */ |
5911 | wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; | 5954 | wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; |
@@ -5918,15 +5961,10 @@ release: | |||
5918 | return retval; | 5961 | return retval; |
5919 | } | 5962 | } |
5920 | 5963 | ||
5921 | static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) | 5964 | static int e1000e_pm_freeze(struct device *dev) |
5922 | { | 5965 | { |
5923 | struct net_device *netdev = pci_get_drvdata(pdev); | 5966 | struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); |
5924 | struct e1000_adapter *adapter = netdev_priv(netdev); | 5967 | struct e1000_adapter *adapter = netdev_priv(netdev); |
5925 | struct e1000_hw *hw = &adapter->hw; | ||
5926 | u32 ctrl, ctrl_ext, rctl, status; | ||
5927 | /* Runtime suspend should only enable wakeup for link changes */ | ||
5928 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; | ||
5929 | int retval = 0; | ||
5930 | 5968 | ||
5931 | netif_device_detach(netdev); | 5969 | netif_device_detach(netdev); |
5932 | 5970 | ||
@@ -5937,11 +5975,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) | |||
5937 | usleep_range(10000, 20000); | 5975 | usleep_range(10000, 20000); |
5938 | 5976 | ||
5939 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | 5977 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); |
5940 | e1000e_down(adapter); | 5978 | |
5979 | /* Quiesce the device without resetting the hardware */ | ||
5980 | e1000e_down(adapter, false); | ||
5941 | e1000_free_irq(adapter); | 5981 | e1000_free_irq(adapter); |
5942 | } | 5982 | } |
5943 | e1000e_reset_interrupt_capability(adapter); | 5983 | e1000e_reset_interrupt_capability(adapter); |
5944 | 5984 | ||
5985 | /* Allow time for pending master requests to run */ | ||
5986 | e1000e_disable_pcie_master(&adapter->hw); | ||
5987 | |||
5988 | return 0; | ||
5989 | } | ||
5990 | |||
5991 | static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) | ||
5992 | { | ||
5993 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
5994 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
5995 | struct e1000_hw *hw = &adapter->hw; | ||
5996 | u32 ctrl, ctrl_ext, rctl, status; | ||
5997 | /* Runtime suspend should only enable wakeup for link changes */ | ||
5998 | u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; | ||
5999 | int retval = 0; | ||
6000 | |||
5945 | status = er32(STATUS); | 6001 | status = er32(STATUS); |
5946 | if (status & E1000_STATUS_LU) | 6002 | if (status & E1000_STATUS_LU) |
5947 | wufc &= ~E1000_WUFC_LNKC; | 6003 | wufc &= ~E1000_WUFC_LNKC; |
@@ -5972,12 +6028,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) | |||
5972 | ew32(CTRL_EXT, ctrl_ext); | 6028 | ew32(CTRL_EXT, ctrl_ext); |
5973 | } | 6029 | } |
5974 | 6030 | ||
6031 | if (!runtime) | ||
6032 | e1000e_power_up_phy(adapter); | ||
6033 | |||
5975 | if (adapter->flags & FLAG_IS_ICH) | 6034 | if (adapter->flags & FLAG_IS_ICH) |
5976 | e1000_suspend_workarounds_ich8lan(&adapter->hw); | 6035 | e1000_suspend_workarounds_ich8lan(&adapter->hw); |
5977 | 6036 | ||
5978 | /* Allow time for pending master requests to run */ | ||
5979 | e1000e_disable_pcie_master(&adapter->hw); | ||
5980 | |||
5981 | if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { | 6037 | if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { |
5982 | /* enable wakeup by the PHY */ | 6038 | /* enable wakeup by the PHY */ |
5983 | retval = e1000_init_phy_wakeup(adapter, wufc); | 6039 | retval = e1000_init_phy_wakeup(adapter, wufc); |
@@ -5991,10 +6047,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) | |||
5991 | } else { | 6047 | } else { |
5992 | ew32(WUC, 0); | 6048 | ew32(WUC, 0); |
5993 | ew32(WUFC, 0); | 6049 | ew32(WUFC, 0); |
6050 | |||
6051 | e1000_power_down_phy(adapter); | ||
5994 | } | 6052 | } |
5995 | 6053 | ||
5996 | if (adapter->hw.phy.type == e1000_phy_igp_3) | 6054 | if (adapter->hw.phy.type == e1000_phy_igp_3) { |
5997 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); | 6055 | e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); |
6056 | } else if (hw->mac.type == e1000_pch_lpt) { | ||
6057 | if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) | ||
6058 | /* ULP does not support wake from unicast, multicast | ||
6059 | * or broadcast. | ||
6060 | */ | ||
6061 | retval = e1000_enable_ulp_lpt_lp(hw, !runtime); | ||
6062 | |||
6063 | if (retval) | ||
6064 | return retval; | ||
6065 | } | ||
6066 | |||
5998 | 6067 | ||
5999 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | 6068 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
6000 | * would have already happened in close and is redundant. | 6069 | * would have already happened in close and is redundant. |
@@ -6102,18 +6171,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) | |||
6102 | } | 6171 | } |
6103 | 6172 | ||
6104 | #ifdef CONFIG_PM | 6173 | #ifdef CONFIG_PM |
6105 | static bool e1000e_pm_ready(struct e1000_adapter *adapter) | ||
6106 | { | ||
6107 | return !!adapter->tx_ring->buffer_info; | ||
6108 | } | ||
6109 | |||
6110 | static int __e1000_resume(struct pci_dev *pdev) | 6174 | static int __e1000_resume(struct pci_dev *pdev) |
6111 | { | 6175 | { |
6112 | struct net_device *netdev = pci_get_drvdata(pdev); | 6176 | struct net_device *netdev = pci_get_drvdata(pdev); |
6113 | struct e1000_adapter *adapter = netdev_priv(netdev); | 6177 | struct e1000_adapter *adapter = netdev_priv(netdev); |
6114 | struct e1000_hw *hw = &adapter->hw; | 6178 | struct e1000_hw *hw = &adapter->hw; |
6115 | u16 aspm_disable_flag = 0; | 6179 | u16 aspm_disable_flag = 0; |
6116 | u32 err; | ||
6117 | 6180 | ||
6118 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) | 6181 | if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) |
6119 | aspm_disable_flag = PCIE_LINK_STATE_L0S; | 6182 | aspm_disable_flag = PCIE_LINK_STATE_L0S; |
@@ -6124,13 +6187,6 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
6124 | 6187 | ||
6125 | pci_set_master(pdev); | 6188 | pci_set_master(pdev); |
6126 | 6189 | ||
6127 | e1000e_set_interrupt_capability(adapter); | ||
6128 | if (netif_running(netdev)) { | ||
6129 | err = e1000_request_irq(adapter); | ||
6130 | if (err) | ||
6131 | return err; | ||
6132 | } | ||
6133 | |||
6134 | if (hw->mac.type >= e1000_pch2lan) | 6190 | if (hw->mac.type >= e1000_pch2lan) |
6135 | e1000_resume_workarounds_pchlan(&adapter->hw); | 6191 | e1000_resume_workarounds_pchlan(&adapter->hw); |
6136 | 6192 | ||
@@ -6169,11 +6225,6 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
6169 | 6225 | ||
6170 | e1000_init_manageability_pt(adapter); | 6226 | e1000_init_manageability_pt(adapter); |
6171 | 6227 | ||
6172 | if (netif_running(netdev)) | ||
6173 | e1000e_up(adapter); | ||
6174 | |||
6175 | netif_device_attach(netdev); | ||
6176 | |||
6177 | /* If the controller has AMT, do not set DRV_LOAD until the interface | 6228 | /* If the controller has AMT, do not set DRV_LOAD until the interface |
6178 | * is up. For all other cases, let the f/w know that the h/w is now | 6229 | * is up. For all other cases, let the f/w know that the h/w is now |
6179 | * under the control of the driver. | 6230 | * under the control of the driver. |
@@ -6185,74 +6236,110 @@ static int __e1000_resume(struct pci_dev *pdev) | |||
6185 | } | 6236 | } |
6186 | 6237 | ||
6187 | #ifdef CONFIG_PM_SLEEP | 6238 | #ifdef CONFIG_PM_SLEEP |
6188 | static int e1000_suspend(struct device *dev) | 6239 | static int e1000e_pm_thaw(struct device *dev) |
6240 | { | ||
6241 | struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); | ||
6242 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
6243 | |||
6244 | e1000e_set_interrupt_capability(adapter); | ||
6245 | if (netif_running(netdev)) { | ||
6246 | u32 err = e1000_request_irq(adapter); | ||
6247 | |||
6248 | if (err) | ||
6249 | return err; | ||
6250 | |||
6251 | e1000e_up(adapter); | ||
6252 | } | ||
6253 | |||
6254 | netif_device_attach(netdev); | ||
6255 | |||
6256 | return 0; | ||
6257 | } | ||
6258 | |||
6259 | static int e1000e_pm_suspend(struct device *dev) | ||
6189 | { | 6260 | { |
6190 | struct pci_dev *pdev = to_pci_dev(dev); | 6261 | struct pci_dev *pdev = to_pci_dev(dev); |
6191 | 6262 | ||
6263 | e1000e_pm_freeze(dev); | ||
6264 | |||
6192 | return __e1000_shutdown(pdev, false); | 6265 | return __e1000_shutdown(pdev, false); |
6193 | } | 6266 | } |
6194 | 6267 | ||
6195 | static int e1000_resume(struct device *dev) | 6268 | static int e1000e_pm_resume(struct device *dev) |
6196 | { | 6269 | { |
6197 | struct pci_dev *pdev = to_pci_dev(dev); | 6270 | struct pci_dev *pdev = to_pci_dev(dev); |
6198 | struct net_device *netdev = pci_get_drvdata(pdev); | 6271 | int rc; |
6199 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
6200 | 6272 | ||
6201 | if (e1000e_pm_ready(adapter)) | 6273 | rc = __e1000_resume(pdev); |
6202 | adapter->idle_check = true; | 6274 | if (rc) |
6275 | return rc; | ||
6203 | 6276 | ||
6204 | return __e1000_resume(pdev); | 6277 | return e1000e_pm_thaw(dev); |
6205 | } | 6278 | } |
6206 | #endif /* CONFIG_PM_SLEEP */ | 6279 | #endif /* CONFIG_PM_SLEEP */ |
6207 | 6280 | ||
6208 | #ifdef CONFIG_PM_RUNTIME | 6281 | #ifdef CONFIG_PM_RUNTIME |
6209 | static int e1000_runtime_suspend(struct device *dev) | 6282 | static int e1000e_pm_runtime_idle(struct device *dev) |
6210 | { | 6283 | { |
6211 | struct pci_dev *pdev = to_pci_dev(dev); | 6284 | struct pci_dev *pdev = to_pci_dev(dev); |
6212 | struct net_device *netdev = pci_get_drvdata(pdev); | 6285 | struct net_device *netdev = pci_get_drvdata(pdev); |
6213 | struct e1000_adapter *adapter = netdev_priv(netdev); | 6286 | struct e1000_adapter *adapter = netdev_priv(netdev); |
6214 | 6287 | ||
6215 | if (!e1000e_pm_ready(adapter)) | 6288 | if (!e1000e_has_link(adapter)) |
6216 | return 0; | 6289 | pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); |
6217 | 6290 | ||
6218 | return __e1000_shutdown(pdev, true); | 6291 | return -EBUSY; |
6219 | } | 6292 | } |
6220 | 6293 | ||
6221 | static int e1000_idle(struct device *dev) | 6294 | static int e1000e_pm_runtime_resume(struct device *dev) |
6222 | { | 6295 | { |
6223 | struct pci_dev *pdev = to_pci_dev(dev); | 6296 | struct pci_dev *pdev = to_pci_dev(dev); |
6224 | struct net_device *netdev = pci_get_drvdata(pdev); | 6297 | struct net_device *netdev = pci_get_drvdata(pdev); |
6225 | struct e1000_adapter *adapter = netdev_priv(netdev); | 6298 | struct e1000_adapter *adapter = netdev_priv(netdev); |
6299 | int rc; | ||
6226 | 6300 | ||
6227 | if (!e1000e_pm_ready(adapter)) | 6301 | rc = __e1000_resume(pdev); |
6228 | return 0; | 6302 | if (rc) |
6303 | return rc; | ||
6229 | 6304 | ||
6230 | if (adapter->idle_check) { | 6305 | if (netdev->flags & IFF_UP) |
6231 | adapter->idle_check = false; | 6306 | rc = e1000e_up(adapter); |
6232 | if (!e1000e_has_link(adapter)) | ||
6233 | pm_schedule_suspend(dev, MSEC_PER_SEC); | ||
6234 | } | ||
6235 | 6307 | ||
6236 | return -EBUSY; | 6308 | return rc; |
6237 | } | 6309 | } |
6238 | 6310 | ||
6239 | static int e1000_runtime_resume(struct device *dev) | 6311 | static int e1000e_pm_runtime_suspend(struct device *dev) |
6240 | { | 6312 | { |
6241 | struct pci_dev *pdev = to_pci_dev(dev); | 6313 | struct pci_dev *pdev = to_pci_dev(dev); |
6242 | struct net_device *netdev = pci_get_drvdata(pdev); | 6314 | struct net_device *netdev = pci_get_drvdata(pdev); |
6243 | struct e1000_adapter *adapter = netdev_priv(netdev); | 6315 | struct e1000_adapter *adapter = netdev_priv(netdev); |
6244 | 6316 | ||
6245 | if (!e1000e_pm_ready(adapter)) | 6317 | if (netdev->flags & IFF_UP) { |
6246 | return 0; | 6318 | int count = E1000_CHECK_RESET_COUNT; |
6319 | |||
6320 | while (test_bit(__E1000_RESETTING, &adapter->state) && count--) | ||
6321 | usleep_range(10000, 20000); | ||
6322 | |||
6323 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); | ||
6324 | |||
6325 | /* Down the device without resetting the hardware */ | ||
6326 | e1000e_down(adapter, false); | ||
6327 | } | ||
6247 | 6328 | ||
6248 | adapter->idle_check = !dev->power.runtime_auto; | 6329 | if (__e1000_shutdown(pdev, true)) { |
6249 | return __e1000_resume(pdev); | 6330 | e1000e_pm_runtime_resume(dev); |
6331 | return -EBUSY; | ||
6332 | } | ||
6333 | |||
6334 | return 0; | ||
6250 | } | 6335 | } |
6251 | #endif /* CONFIG_PM_RUNTIME */ | 6336 | #endif /* CONFIG_PM_RUNTIME */ |
6252 | #endif /* CONFIG_PM */ | 6337 | #endif /* CONFIG_PM */ |
6253 | 6338 | ||
6254 | static void e1000_shutdown(struct pci_dev *pdev) | 6339 | static void e1000_shutdown(struct pci_dev *pdev) |
6255 | { | 6340 | { |
6341 | e1000e_pm_freeze(&pdev->dev); | ||
6342 | |||
6256 | __e1000_shutdown(pdev, false); | 6343 | __e1000_shutdown(pdev, false); |
6257 | } | 6344 | } |
6258 | 6345 | ||
@@ -6338,7 +6425,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |||
6338 | return PCI_ERS_RESULT_DISCONNECT; | 6425 | return PCI_ERS_RESULT_DISCONNECT; |
6339 | 6426 | ||
6340 | if (netif_running(netdev)) | 6427 | if (netif_running(netdev)) |
6341 | e1000e_down(adapter); | 6428 | e1000e_down(adapter, true); |
6342 | pci_disable_device(pdev); | 6429 | pci_disable_device(pdev); |
6343 | 6430 | ||
6344 | /* Request a slot slot reset. */ | 6431 | /* Request a slot slot reset. */ |
@@ -6350,7 +6437,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |||
6350 | * @pdev: Pointer to PCI device | 6437 | * @pdev: Pointer to PCI device |
6351 | * | 6438 | * |
6352 | * Restart the card from scratch, as if from a cold-boot. Implementation | 6439 | * Restart the card from scratch, as if from a cold-boot. Implementation |
6353 | * resembles the first-half of the e1000_resume routine. | 6440 | * resembles the first-half of the e1000e_pm_resume routine. |
6354 | */ | 6441 | */ |
6355 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | 6442 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) |
6356 | { | 6443 | { |
@@ -6397,7 +6484,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |||
6397 | * | 6484 | * |
6398 | * This callback is called when the error recovery driver tells us that | 6485 | * This callback is called when the error recovery driver tells us that |
6399 | * its OK to resume normal operation. Implementation resembles the | 6486 | * its OK to resume normal operation. Implementation resembles the |
6400 | * second-half of the e1000_resume routine. | 6487 | * second-half of the e1000e_pm_resume routine. |
6401 | */ | 6488 | */ |
6402 | static void e1000_io_resume(struct pci_dev *pdev) | 6489 | static void e1000_io_resume(struct pci_dev *pdev) |
6403 | { | 6490 | { |
@@ -6902,9 +6989,6 @@ static void e1000_remove(struct pci_dev *pdev) | |||
6902 | } | 6989 | } |
6903 | } | 6990 | } |
6904 | 6991 | ||
6905 | if (!(netdev->flags & IFF_UP)) | ||
6906 | e1000_power_down_phy(adapter); | ||
6907 | |||
6908 | /* Don't lie to e1000_close() down the road. */ | 6992 | /* Don't lie to e1000_close() down the road. */ |
6909 | if (!down) | 6993 | if (!down) |
6910 | clear_bit(__E1000_DOWN, &adapter->state); | 6994 | clear_bit(__E1000_DOWN, &adapter->state); |
@@ -7026,9 +7110,16 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { | |||
7026 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | 7110 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
7027 | 7111 | ||
7028 | static const struct dev_pm_ops e1000_pm_ops = { | 7112 | static const struct dev_pm_ops e1000_pm_ops = { |
7029 | SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) | 7113 | #ifdef CONFIG_PM_SLEEP |
7030 | SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, | 7114 | .suspend = e1000e_pm_suspend, |
7031 | e1000_idle) | 7115 | .resume = e1000e_pm_resume, |
7116 | .freeze = e1000e_pm_freeze, | ||
7117 | .thaw = e1000e_pm_thaw, | ||
7118 | .poweroff = e1000e_pm_suspend, | ||
7119 | .restore = e1000e_pm_resume, | ||
7120 | #endif | ||
7121 | SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, | ||
7122 | e1000e_pm_runtime_idle) | ||
7032 | }; | 7123 | }; |
7033 | 7124 | ||
7034 | /* PCI Device API Driver */ | 7125 | /* PCI Device API Driver */ |
@@ -7055,7 +7146,7 @@ static int __init e1000_init_module(void) | |||
7055 | int ret; | 7146 | int ret; |
7056 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", | 7147 | pr_info("Intel(R) PRO/1000 Network Driver - %s\n", |
7057 | e1000e_driver_version); | 7148 | e1000e_driver_version); |
7058 | pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n"); | 7149 | pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n"); |
7059 | ret = pci_register_driver(&e1000_driver); | 7150 | ret = pci_register_driver(&e1000_driver); |
7060 | 7151 | ||
7061 | return ret; | 7152 | return ret; |