aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/ich8lan.c76
-rw-r--r--drivers/net/e1000e/netdev.c35
-rw-r--r--drivers/net/e1000e/phy.c85
4 files changed, 98 insertions, 100 deletions
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index aec378e7441d..318bdb28a7cd 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -421,6 +421,7 @@ struct e1000_info {
421/* CRC Stripping defines */ 421/* CRC Stripping defines */
422#define FLAG2_CRC_STRIPPING (1 << 0) 422#define FLAG2_CRC_STRIPPING (1 << 0)
423#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 423#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
424#define FLAG2_IS_DISCARDING (1 << 2)
424 425
425#define E1000_RX_DESC_PS(R, i) \ 426#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -583,7 +584,6 @@ extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
583extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); 584extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
584extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, 585extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
585 u16 data); 586 u16 data);
586extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
587extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); 587extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
588extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); 588extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
589extern s32 e1000_check_polarity_82577(struct e1000_hw *hw); 589extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 061cd100aac2..54d03a0ce3ce 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -138,6 +138,10 @@
138#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ 138#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
139#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ 139#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
140 140
141/* KMRN Mode Control */
142#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
143#define HV_KMRN_MDIO_SLOW 0x0400
144
141/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ 145/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
142/* Offset 04h HSFSTS */ 146/* Offset 04h HSFSTS */
143union ich8_hws_flash_status { 147union ich8_hws_flash_status {
@@ -219,6 +223,7 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
219static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); 223static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
220static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 224static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
221static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 225static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
226static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
222 227
223static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 228static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
224{ 229{
@@ -270,7 +275,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
270 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
271 276
272 phy->id = e1000_phy_unknown; 277 phy->id = e1000_phy_unknown;
273 e1000e_get_phy_id(hw); 278 ret_val = e1000e_get_phy_id(hw);
279 if (ret_val)
280 goto out;
281 if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
282 /*
283 * In case the PHY needs to be in mdio slow mode (eg. 82577),
284 * set slow mode and try to get the PHY id again.
285 */
286 ret_val = e1000_set_mdio_slow_mode_hv(hw);
287 if (ret_val)
288 goto out;
289 ret_val = e1000e_get_phy_id(hw);
290 if (ret_val)
291 goto out;
292 }
274 phy->type = e1000e_get_phy_type_from_id(phy->id); 293 phy->type = e1000e_get_phy_type_from_id(phy->id);
275 294
276 switch (phy->type) { 295 switch (phy->type) {
@@ -292,6 +311,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
292 break; 311 break;
293 } 312 }
294 313
314out:
295 return ret_val; 315 return ret_val;
296} 316}
297 317
@@ -1076,16 +1096,44 @@ out:
1076 1096
1077 1097
1078/** 1098/**
1099 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1100 * @hw: pointer to the HW structure
1101 **/
1102static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1103{
1104 s32 ret_val;
1105 u16 data;
1106
1107 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1108 if (ret_val)
1109 return ret_val;
1110
1111 data |= HV_KMRN_MDIO_SLOW;
1112
1113 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1114
1115 return ret_val;
1116}
1117
1118/**
1079 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be 1119 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1080 * done after every PHY reset. 1120 * done after every PHY reset.
1081 **/ 1121 **/
1082static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) 1122static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1083{ 1123{
1084 s32 ret_val = 0; 1124 s32 ret_val = 0;
1125 u16 phy_data;
1085 1126
1086 if (hw->mac.type != e1000_pchlan) 1127 if (hw->mac.type != e1000_pchlan)
1087 return ret_val; 1128 return ret_val;
1088 1129
1130 /* Set MDIO slow mode before any other MDIO access */
1131 if (hw->phy.type == e1000_phy_82577) {
1132 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1133 if (ret_val)
1134 goto out;
1135 }
1136
1089 if (((hw->phy.type == e1000_phy_82577) && 1137 if (((hw->phy.type == e1000_phy_82577) &&
1090 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || 1138 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1091 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { 1139 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
@@ -1118,16 +1166,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1118 1166
1119 hw->phy.addr = 1; 1167 hw->phy.addr = 1;
1120 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1168 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1169 hw->phy.ops.release(hw);
1121 if (ret_val) 1170 if (ret_val)
1122 goto out; 1171 goto out;
1123 hw->phy.ops.release(hw);
1124 1172
1125 /* 1173 /*
1126 * Configure the K1 Si workaround during phy reset assuming there is 1174 * Configure the K1 Si workaround during phy reset assuming there is
1127 * link so that it disables K1 if link is in 1Gbps. 1175 * link so that it disables K1 if link is in 1Gbps.
1128 */ 1176 */
1129 ret_val = e1000_k1_gig_workaround_hv(hw, true); 1177 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1178 if (ret_val)
1179 goto out;
1130 1180
1181 /* Workaround for link disconnects on a busy hub in half duplex */
1182 ret_val = hw->phy.ops.acquire(hw);
1183 if (ret_val)
1184 goto out;
1185 ret_val = hw->phy.ops.read_reg_locked(hw,
1186 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1187 &phy_data);
1188 if (ret_val)
1189 goto release;
1190 ret_val = hw->phy.ops.write_reg_locked(hw,
1191 PHY_REG(BM_PORT_CTRL_PAGE, 17),
1192 phy_data & 0x00FF);
1193release:
1194 hw->phy.ops.release(hw);
1131out: 1195out:
1132 return ret_val; 1196 return ret_val;
1133} 1197}
@@ -1184,6 +1248,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1184 /* Allow time for h/w to get to a quiescent state after reset */ 1248 /* Allow time for h/w to get to a quiescent state after reset */
1185 mdelay(10); 1249 mdelay(10);
1186 1250
1251 /* Perform any necessary post-reset workarounds */
1187 if (hw->mac.type == e1000_pchlan) { 1252 if (hw->mac.type == e1000_pchlan) {
1188 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 1253 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1189 if (ret_val) 1254 if (ret_val)
@@ -2484,6 +2549,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2484 if (!ret_val) 2549 if (!ret_val)
2485 e1000_release_swflag_ich8lan(hw); 2550 e1000_release_swflag_ich8lan(hw);
2486 2551
2552 /* Perform any necessary post-reset workarounds */
2553 if (hw->mac.type == e1000_pchlan)
2554 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2555
2487 if (ctrl & E1000_CTRL_PHY_RST) 2556 if (ctrl & E1000_CTRL_PHY_RST)
2488 ret_val = hw->phy.ops.get_cfg_done(hw); 2557 ret_val = hw->phy.ops.get_cfg_done(hw);
2489 2558
@@ -2528,9 +2597,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2528 kab |= E1000_KABGTXD_BGSQLBIAS; 2597 kab |= E1000_KABGTXD_BGSQLBIAS;
2529 ew32(KABGTXD, kab); 2598 ew32(KABGTXD, kab);
2530 2599
2531 if (hw->mac.type == e1000_pchlan)
2532 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2533
2534out: 2600out:
2535 return ret_val; 2601 return ret_val;
2536} 2602}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3d57ca5482f4..5d7a760194d4 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -450,13 +450,23 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
450 450
451 length = le16_to_cpu(rx_desc->length); 451 length = le16_to_cpu(rx_desc->length);
452 452
453 /* !EOP means multiple descriptors were used to store a single 453 /*
454 * packet, also make sure the frame isn't just CRC only */ 454 * !EOP means multiple descriptors were used to store a single
455 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 455 * packet, if that's the case we need to toss it. In fact, we
456 * need to toss every packet with the EOP bit clear and the
457 * next frame that _does_ have the EOP bit set, as it is by
458 * definition only a frame fragment
459 */
460 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
461 adapter->flags2 |= FLAG2_IS_DISCARDING;
462
463 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
456 /* All receives must fit into a single buffer */ 464 /* All receives must fit into a single buffer */
457 e_dbg("Receive packet consumed multiple buffers\n"); 465 e_dbg("Receive packet consumed multiple buffers\n");
458 /* recycle */ 466 /* recycle */
459 buffer_info->skb = skb; 467 buffer_info->skb = skb;
468 if (status & E1000_RXD_STAT_EOP)
469 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
460 goto next_desc; 470 goto next_desc;
461 } 471 }
462 472
@@ -745,10 +755,16 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
745 PCI_DMA_FROMDEVICE); 755 PCI_DMA_FROMDEVICE);
746 buffer_info->dma = 0; 756 buffer_info->dma = 0;
747 757
748 if (!(staterr & E1000_RXD_STAT_EOP)) { 758 /* see !EOP comment in other rx routine */
759 if (!(staterr & E1000_RXD_STAT_EOP))
760 adapter->flags2 |= FLAG2_IS_DISCARDING;
761
762 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
749 e_dbg("Packet Split buffers didn't pick up the full " 763 e_dbg("Packet Split buffers didn't pick up the full "
750 "packet\n"); 764 "packet\n");
751 dev_kfree_skb_irq(skb); 765 dev_kfree_skb_irq(skb);
766 if (staterr & E1000_RXD_STAT_EOP)
767 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
752 goto next_desc; 768 goto next_desc;
753 } 769 }
754 770
@@ -1118,6 +1134,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1118 1134
1119 rx_ring->next_to_clean = 0; 1135 rx_ring->next_to_clean = 0;
1120 rx_ring->next_to_use = 0; 1136 rx_ring->next_to_use = 0;
1137 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1121 1138
1122 writel(0, adapter->hw.hw_addr + rx_ring->head); 1139 writel(0, adapter->hw.hw_addr + rx_ring->head);
1123 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1140 writel(0, adapter->hw.hw_addr + rx_ring->tail);
@@ -3952,13 +3969,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3952dma_error: 3969dma_error:
3953 dev_err(&pdev->dev, "TX DMA map failed\n"); 3970 dev_err(&pdev->dev, "TX DMA map failed\n");
3954 buffer_info->dma = 0; 3971 buffer_info->dma = 0;
3955 count--; 3972 if (count)
3956
3957 while (count >= 0) {
3958 count--; 3973 count--;
3959 i--; 3974
3960 if (i < 0) 3975 while (count--) {
3976 if (i==0)
3961 i += tx_ring->count; 3977 i += tx_ring->count;
3978 i--;
3962 buffer_info = &tx_ring->buffer_info[i]; 3979 buffer_info = &tx_ring->buffer_info[i];
3963 e1000_put_txbuf(adapter, buffer_info);; 3980 e1000_put_txbuf(adapter, buffer_info);;
3964 } 3981 }
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 55a2c0acfee7..7f3ceb9dad6a 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -152,32 +152,9 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
152 if (phy->id != 0 && phy->id != PHY_REVISION_MASK) 152 if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
153 goto out; 153 goto out;
154 154
155 /*
156 * If the PHY ID is still unknown, we may have an 82577
157 * without link. We will try again after setting Slow MDIC
158 * mode. No harm in trying again in this case since the PHY
159 * ID is unknown at this point anyway.
160 */
161 ret_val = phy->ops.acquire(hw);
162 if (ret_val)
163 goto out;
164 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
165 if (ret_val)
166 goto out;
167 phy->ops.release(hw);
168
169 retry_count++; 155 retry_count++;
170 } 156 }
171out: 157out:
172 /* Revert to MDIO fast mode, if applicable */
173 if (retry_count) {
174 ret_val = phy->ops.acquire(hw);
175 if (ret_val)
176 return ret_val;
177 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
178 phy->ops.release(hw);
179 }
180
181 return ret_val; 158 return ret_val;
182} 159}
183 160
@@ -2791,38 +2768,6 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
2791} 2768}
2792 2769
2793/** 2770/**
2794 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2795 * @hw: pointer to the HW structure
2796 * @slow: true for slow mode, false for normal mode
2797 *
2798 * Assumes semaphore already acquired.
2799 **/
2800s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
2801{
2802 s32 ret_val = 0;
2803 u16 data = 0;
2804
2805 /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
2806 hw->phy.addr = 1;
2807 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
2808 (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
2809 if (ret_val)
2810 goto out;
2811
2812 ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1,
2813 (0x2180 | (slow << 10)));
2814 if (ret_val)
2815 goto out;
2816
2817 /* dummy read when reverting to fast mode - throw away result */
2818 if (!slow)
2819 ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
2820
2821out:
2822 return ret_val;
2823}
2824
2825/**
2826 * __e1000_read_phy_reg_hv - Read HV PHY register 2771 * __e1000_read_phy_reg_hv - Read HV PHY register
2827 * @hw: pointer to the HW structure 2772 * @hw: pointer to the HW structure
2828 * @offset: register offset to be read 2773 * @offset: register offset to be read
@@ -2839,7 +2784,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2839 s32 ret_val; 2784 s32 ret_val;
2840 u16 page = BM_PHY_REG_PAGE(offset); 2785 u16 page = BM_PHY_REG_PAGE(offset);
2841 u16 reg = BM_PHY_REG_NUM(offset); 2786 u16 reg = BM_PHY_REG_NUM(offset);
2842 bool in_slow_mode = false;
2843 2787
2844 if (!locked) { 2788 if (!locked) {
2845 ret_val = hw->phy.ops.acquire(hw); 2789 ret_val = hw->phy.ops.acquire(hw);
@@ -2847,16 +2791,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2847 return ret_val; 2791 return ret_val;
2848 } 2792 }
2849 2793
2850 /* Workaround failure in MDIO access while cable is disconnected */
2851 if ((hw->phy.type == e1000_phy_82577) &&
2852 !(er32(STATUS) & E1000_STATUS_LU)) {
2853 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2854 if (ret_val)
2855 goto out;
2856
2857 in_slow_mode = true;
2858 }
2859
2860 /* Page 800 works differently than the rest so it has its own func */ 2794 /* Page 800 works differently than the rest so it has its own func */
2861 if (page == BM_WUC_PAGE) { 2795 if (page == BM_WUC_PAGE) {
2862 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, 2796 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -2893,10 +2827,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2893 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2827 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2894 data); 2828 data);
2895out: 2829out:
2896 /* Revert to MDIO fast mode, if applicable */
2897 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2898 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2899
2900 if (!locked) 2830 if (!locked)
2901 hw->phy.ops.release(hw); 2831 hw->phy.ops.release(hw);
2902 2832
@@ -2948,7 +2878,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2948 s32 ret_val; 2878 s32 ret_val;
2949 u16 page = BM_PHY_REG_PAGE(offset); 2879 u16 page = BM_PHY_REG_PAGE(offset);
2950 u16 reg = BM_PHY_REG_NUM(offset); 2880 u16 reg = BM_PHY_REG_NUM(offset);
2951 bool in_slow_mode = false;
2952 2881
2953 if (!locked) { 2882 if (!locked) {
2954 ret_val = hw->phy.ops.acquire(hw); 2883 ret_val = hw->phy.ops.acquire(hw);
@@ -2956,16 +2885,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2956 return ret_val; 2885 return ret_val;
2957 } 2886 }
2958 2887
2959 /* Workaround failure in MDIO access while cable is disconnected */
2960 if ((hw->phy.type == e1000_phy_82577) &&
2961 !(er32(STATUS) & E1000_STATUS_LU)) {
2962 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
2963 if (ret_val)
2964 goto out;
2965
2966 in_slow_mode = true;
2967 }
2968
2969 /* Page 800 works differently than the rest so it has its own func */ 2888 /* Page 800 works differently than the rest so it has its own func */
2970 if (page == BM_WUC_PAGE) { 2889 if (page == BM_WUC_PAGE) {
2971 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, 2890 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@@ -3019,10 +2938,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
3019 data); 2938 data);
3020 2939
3021out: 2940out:
3022 /* Revert to MDIO fast mode, if applicable */
3023 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
3024 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
3025
3026 if (!locked) 2941 if (!locked)
3027 hw->phy.ops.release(hw); 2942 hw->phy.ops.release(hw);
3028 2943