diff options
author | Bruce Allan <bruce.w.allan@intel.com> | 2008-04-29 12:16:05 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-06 12:04:14 -0400 |
commit | 97ac8caee238d2a81c23661916f7acd3a22c85fe (patch) | |
tree | 52723d8582162e862c78fecb5da2d4d13f7a9579 | |
parent | e284e5c6601cbb16e48854be26aa57a8fa844e35 (diff) |
e1000e: Add support for BM PHYs on ICH9
This patch adds support for the BM PHY, a new PHY model being used
on ICH9-based implementations.
This new PHY exposes issues in the ICH9 silicon when receiving
jumbo frames large enough to use more than a certain part of the
Rx FIFO, and this unfortunately breaks packet split jumbo receives.
For this reason we re-introduce (for affected adapters only) the
jumbo single-skb receive routine back so that people who do
wish to use jumbo frames on these ich9 platforms can do so.
Part of this problem has to do with CPU sleep states and to make
sure that all the wake up timings are correctly we force them
with the recently merged pm_qos infrastructure written by Mark
Gross. (See http://lkml.org/lkml/2007/10/4/400).
To make code read a bit easier we introduce a _IS_ICH flag so
that we don't need to do mac type checks over the code.
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r-- | drivers/net/e1000e/defines.h | 10 | ||||
-rw-r--r-- | drivers/net/e1000e/e1000.h | 7 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 39 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 22 | ||||
-rw-r--r-- | drivers/net/e1000e/ich8lan.c | 83 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 330 | ||||
-rw-r--r-- | drivers/net/e1000e/phy.c | 278 |
7 files changed, 748 insertions, 21 deletions
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 2a53875cddbf..f823b8ba5785 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -648,6 +648,8 @@ | |||
648 | #define IFE_E_PHY_ID 0x02A80330 | 648 | #define IFE_E_PHY_ID 0x02A80330 |
649 | #define IFE_PLUS_E_PHY_ID 0x02A80320 | 649 | #define IFE_PLUS_E_PHY_ID 0x02A80320 |
650 | #define IFE_C_E_PHY_ID 0x02A80310 | 650 | #define IFE_C_E_PHY_ID 0x02A80310 |
651 | #define BME1000_E_PHY_ID 0x01410CB0 | ||
652 | #define BME1000_E_PHY_ID_R2 0x01410CB1 | ||
651 | 653 | ||
652 | /* M88E1000 Specific Registers */ | 654 | /* M88E1000 Specific Registers */ |
653 | #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ | 655 | #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ |
@@ -701,6 +703,14 @@ | |||
701 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 | 703 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 |
702 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 | 704 | #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 |
703 | 705 | ||
706 | /* BME1000 PHY Specific Control Register */ | ||
707 | #define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ | ||
708 | |||
709 | |||
710 | #define PHY_PAGE_SHIFT 5 | ||
711 | #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ | ||
712 | ((reg) & MAX_PHY_REG_ADDRESS)) | ||
713 | |||
704 | /* | 714 | /* |
705 | * Bits... | 715 | * Bits... |
706 | * 15-5: page | 716 | * 15-5: page |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 38bfd0d261fe..d3bc6f8101fa 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -127,7 +127,7 @@ struct e1000_buffer { | |||
127 | /* arrays of page information for packet split */ | 127 | /* arrays of page information for packet split */ |
128 | struct e1000_ps_page *ps_pages; | 128 | struct e1000_ps_page *ps_pages; |
129 | }; | 129 | }; |
130 | 130 | struct page *page; | |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct e1000_ring { | 133 | struct e1000_ring { |
@@ -304,6 +304,7 @@ struct e1000_info { | |||
304 | #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) | 304 | #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) |
305 | #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) | 305 | #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) |
306 | #define FLAG_HAS_JUMBO_FRAMES (1 << 7) | 306 | #define FLAG_HAS_JUMBO_FRAMES (1 << 7) |
307 | #define FLAG_IS_ICH (1 << 9) | ||
307 | #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) | 308 | #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) |
308 | #define FLAG_IS_QUAD_PORT_A (1 << 12) | 309 | #define FLAG_IS_QUAD_PORT_A (1 << 12) |
309 | #define FLAG_IS_QUAD_PORT (1 << 13) | 310 | #define FLAG_IS_QUAD_PORT (1 << 13) |
@@ -386,6 +387,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | |||
386 | bool state); | 387 | bool state); |
387 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); | 388 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); |
388 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); | 389 | extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); |
390 | extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); | ||
389 | 391 | ||
390 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); | 392 | extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); |
391 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); | 393 | extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); |
@@ -443,6 +445,9 @@ extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); | |||
443 | extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); | 445 | extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); |
444 | extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); | 446 | extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); |
445 | extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); | 447 | extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); |
448 | extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); | ||
449 | extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); | ||
450 | extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); | ||
446 | extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); | 451 | extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); |
447 | extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); | 452 | extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); |
448 | extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); | 453 | extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index ce045acce63e..2bb6da057c40 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -803,8 +803,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
803 | /* restore previous status */ | 803 | /* restore previous status */ |
804 | ew32(STATUS, before); | 804 | ew32(STATUS, before); |
805 | 805 | ||
806 | if ((mac->type != e1000_ich8lan) && | 806 | if (!(adapter->flags & FLAG_IS_ICH)) { |
807 | (mac->type != e1000_ich9lan)) { | ||
808 | REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); | 807 | REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); |
809 | REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); | 808 | REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); |
810 | REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); | 809 | REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); |
@@ -824,15 +823,13 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) | |||
824 | 823 | ||
825 | REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); | 824 | REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); |
826 | 825 | ||
827 | before = (((mac->type == e1000_ich8lan) || | 826 | before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); |
828 | (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE); | ||
829 | REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); | 827 | REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); |
830 | REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); | 828 | REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); |
831 | 829 | ||
832 | REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); | 830 | REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); |
833 | REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 831 | REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
834 | if ((mac->type != e1000_ich8lan) && | 832 | if (!(adapter->flags & FLAG_IS_ICH)) |
835 | (mac->type != e1000_ich9lan)) | ||
836 | REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); | 833 | REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); |
837 | REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); | 834 | REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); |
838 | REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); | 835 | REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); |
@@ -911,9 +908,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) | |||
911 | 908 | ||
912 | /* Test each interrupt */ | 909 | /* Test each interrupt */ |
913 | for (i = 0; i < 10; i++) { | 910 | for (i = 0; i < 10; i++) { |
914 | 911 | if ((adapter->flags & FLAG_IS_ICH) && (i == 8)) | |
915 | if (((adapter->hw.mac.type == e1000_ich8lan) || | ||
916 | (adapter->hw.mac.type == e1000_ich9lan)) && i == 8) | ||
917 | continue; | 912 | continue; |
918 | 913 | ||
919 | /* Interrupt to test */ | 914 | /* Interrupt to test */ |
@@ -1184,6 +1179,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1184 | struct e1000_hw *hw = &adapter->hw; | 1179 | struct e1000_hw *hw = &adapter->hw; |
1185 | u32 ctrl_reg = 0; | 1180 | u32 ctrl_reg = 0; |
1186 | u32 stat_reg = 0; | 1181 | u32 stat_reg = 0; |
1182 | u16 phy_reg = 0; | ||
1187 | 1183 | ||
1188 | hw->mac.autoneg = 0; | 1184 | hw->mac.autoneg = 0; |
1189 | 1185 | ||
@@ -1211,6 +1207,28 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1211 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ | 1207 | E1000_CTRL_SPD_100 |/* Force Speed to 100 */ |
1212 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1208 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1213 | break; | 1209 | break; |
1210 | case e1000_phy_bm: | ||
1211 | /* Set Default MAC Interface speed to 1GB */ | ||
1212 | e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); | ||
1213 | phy_reg &= ~0x0007; | ||
1214 | phy_reg |= 0x006; | ||
1215 | e1e_wphy(hw, PHY_REG(2, 21), phy_reg); | ||
1216 | /* Assert SW reset for above settings to take effect */ | ||
1217 | e1000e_commit_phy(hw); | ||
1218 | mdelay(1); | ||
1219 | /* Force Full Duplex */ | ||
1220 | e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); | ||
1221 | e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); | ||
1222 | /* Set Link Up (in force link) */ | ||
1223 | e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); | ||
1224 | e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); | ||
1225 | /* Force Link */ | ||
1226 | e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); | ||
1227 | e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); | ||
1228 | /* Set Early Link Enable */ | ||
1229 | e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); | ||
1230 | e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); | ||
1231 | /* fall through */ | ||
1214 | default: | 1232 | default: |
1215 | /* force 1000, set loopback */ | 1233 | /* force 1000, set loopback */ |
1216 | e1e_wphy(hw, PHY_CONTROL, 0x4140); | 1234 | e1e_wphy(hw, PHY_CONTROL, 0x4140); |
@@ -1224,8 +1242,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) | |||
1224 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ | 1242 | E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ |
1225 | E1000_CTRL_FD); /* Force Duplex to FULL */ | 1243 | E1000_CTRL_FD); /* Force Duplex to FULL */ |
1226 | 1244 | ||
1227 | if ((adapter->hw.mac.type == e1000_ich8lan) || | 1245 | if (adapter->flags & FLAG_IS_ICH) |
1228 | (adapter->hw.mac.type == e1000_ich9lan)) | ||
1229 | ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ | 1246 | ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ |
1230 | } | 1247 | } |
1231 | 1248 | ||
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index a930e6d9cf02..74f263acb172 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -216,6 +216,21 @@ enum e1e_registers { | |||
216 | #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ | 216 | #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ |
217 | #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ | 217 | #define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ |
218 | #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ | 218 | #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ |
219 | #define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ | ||
220 | #define IGP_PAGE_SHIFT 5 | ||
221 | #define PHY_REG_MASK 0x1F | ||
222 | |||
223 | #define BM_WUC_PAGE 800 | ||
224 | #define BM_WUC_ADDRESS_OPCODE 0x11 | ||
225 | #define BM_WUC_DATA_OPCODE 0x12 | ||
226 | #define BM_WUC_ENABLE_PAGE 769 | ||
227 | #define BM_WUC_ENABLE_REG 17 | ||
228 | #define BM_WUC_ENABLE_BIT (1 << 2) | ||
229 | #define BM_WUC_HOST_WU_BIT (1 << 4) | ||
230 | |||
231 | #define BM_WUC PHY_REG(BM_WUC_PAGE, 1) | ||
232 | #define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) | ||
233 | #define BM_WUS PHY_REG(BM_WUC_PAGE, 3) | ||
219 | 234 | ||
220 | #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 | 235 | #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 |
221 | #define IGP01E1000_PHY_POLARITY_MASK 0x0078 | 236 | #define IGP01E1000_PHY_POLARITY_MASK 0x0078 |
@@ -331,10 +346,16 @@ enum e1e_registers { | |||
331 | #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 | 346 | #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 |
332 | #define E1000_DEV_ID_ICH8_IGP_M 0x104D | 347 | #define E1000_DEV_ID_ICH8_IGP_M 0x104D |
333 | #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD | 348 | #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD |
349 | #define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 | ||
350 | #define E1000_DEV_ID_ICH9_IGP_M 0x10BF | ||
351 | #define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB | ||
334 | #define E1000_DEV_ID_ICH9_IGP_C 0x294C | 352 | #define E1000_DEV_ID_ICH9_IGP_C 0x294C |
335 | #define E1000_DEV_ID_ICH9_IFE 0x10C0 | 353 | #define E1000_DEV_ID_ICH9_IFE 0x10C0 |
336 | #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 | 354 | #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 |
337 | #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 | 355 | #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 |
356 | #define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC | ||
357 | #define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD | ||
358 | #define E1000_DEV_ID_ICH10_R_BM_V 0x10CE | ||
338 | 359 | ||
339 | #define E1000_FUNC_1 1 | 360 | #define E1000_FUNC_1 1 |
340 | 361 | ||
@@ -378,6 +399,7 @@ enum e1000_phy_type { | |||
378 | e1000_phy_gg82563, | 399 | e1000_phy_gg82563, |
379 | e1000_phy_igp_3, | 400 | e1000_phy_igp_3, |
380 | e1000_phy_ife, | 401 | e1000_phy_ife, |
402 | e1000_phy_bm, | ||
381 | }; | 403 | }; |
382 | 404 | ||
383 | enum e1000_bus_width { | 405 | enum e1000_bus_width { |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 768485dbb2c6..9e38452a738c 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -38,6 +38,12 @@ | |||
38 | * 82566DM Gigabit Network Connection | 38 | * 82566DM Gigabit Network Connection |
39 | * 82566MC Gigabit Network Connection | 39 | * 82566MC Gigabit Network Connection |
40 | * 82566MM Gigabit Network Connection | 40 | * 82566MM Gigabit Network Connection |
41 | * 82567LM Gigabit Network Connection | ||
42 | * 82567LF Gigabit Network Connection | ||
43 | * 82567LM-2 Gigabit Network Connection | ||
44 | * 82567LF-2 Gigabit Network Connection | ||
45 | * 82567V-2 Gigabit Network Connection | ||
46 | * 82562GT-3 10/100 Network Connection | ||
41 | */ | 47 | */ |
42 | 48 | ||
43 | #include <linux/netdevice.h> | 49 | #include <linux/netdevice.h> |
@@ -198,6 +204,19 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
198 | phy->addr = 1; | 204 | phy->addr = 1; |
199 | phy->reset_delay_us = 100; | 205 | phy->reset_delay_us = 100; |
200 | 206 | ||
207 | /* | ||
208 | * We may need to do this twice - once for IGP and if that fails, | ||
209 | * we'll set BM func pointers and try again | ||
210 | */ | ||
211 | ret_val = e1000e_determine_phy_address(hw); | ||
212 | if (ret_val) { | ||
213 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | ||
214 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | ||
215 | ret_val = e1000e_determine_phy_address(hw); | ||
216 | if (ret_val) | ||
217 | return ret_val; | ||
218 | } | ||
219 | |||
201 | phy->id = 0; | 220 | phy->id = 0; |
202 | while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && | 221 | while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && |
203 | (i++ < 100)) { | 222 | (i++ < 100)) { |
@@ -219,6 +238,13 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
219 | phy->type = e1000_phy_ife; | 238 | phy->type = e1000_phy_ife; |
220 | phy->autoneg_mask = E1000_ALL_NOT_GIG; | 239 | phy->autoneg_mask = E1000_ALL_NOT_GIG; |
221 | break; | 240 | break; |
241 | case BME1000_E_PHY_ID: | ||
242 | phy->type = e1000_phy_bm; | ||
243 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | ||
244 | hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; | ||
245 | hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; | ||
246 | hw->phy.ops.commit_phy = e1000e_phy_sw_reset; | ||
247 | break; | ||
222 | default: | 248 | default: |
223 | return -E1000_ERR_PHY; | 249 | return -E1000_ERR_PHY; |
224 | break; | 250 | break; |
@@ -664,6 +690,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) | |||
664 | return e1000_get_phy_info_ife_ich8lan(hw); | 690 | return e1000_get_phy_info_ife_ich8lan(hw); |
665 | break; | 691 | break; |
666 | case e1000_phy_igp_3: | 692 | case e1000_phy_igp_3: |
693 | case e1000_phy_bm: | ||
667 | return e1000e_get_phy_info_igp(hw); | 694 | return e1000e_get_phy_info_igp(hw); |
668 | break; | 695 | break; |
669 | default: | 696 | default: |
@@ -728,7 +755,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) | |||
728 | s32 ret_val = 0; | 755 | s32 ret_val = 0; |
729 | u16 data; | 756 | u16 data; |
730 | 757 | ||
731 | if (phy->type != e1000_phy_igp_3) | 758 | if (phy->type == e1000_phy_ife) |
732 | return ret_val; | 759 | return ret_val; |
733 | 760 | ||
734 | phy_ctrl = er32(PHY_CTRL); | 761 | phy_ctrl = er32(PHY_CTRL); |
@@ -1918,8 +1945,35 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) | |||
1918 | ret_val = e1000e_copper_link_setup_igp(hw); | 1945 | ret_val = e1000e_copper_link_setup_igp(hw); |
1919 | if (ret_val) | 1946 | if (ret_val) |
1920 | return ret_val; | 1947 | return ret_val; |
1948 | } else if (hw->phy.type == e1000_phy_bm) { | ||
1949 | ret_val = e1000e_copper_link_setup_m88(hw); | ||
1950 | if (ret_val) | ||
1951 | return ret_val; | ||
1921 | } | 1952 | } |
1922 | 1953 | ||
1954 | if (hw->phy.type == e1000_phy_ife) { | ||
1955 | ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); | ||
1956 | if (ret_val) | ||
1957 | return ret_val; | ||
1958 | |||
1959 | reg_data &= ~IFE_PMC_AUTO_MDIX; | ||
1960 | |||
1961 | switch (hw->phy.mdix) { | ||
1962 | case 1: | ||
1963 | reg_data &= ~IFE_PMC_FORCE_MDIX; | ||
1964 | break; | ||
1965 | case 2: | ||
1966 | reg_data |= IFE_PMC_FORCE_MDIX; | ||
1967 | break; | ||
1968 | case 0: | ||
1969 | default: | ||
1970 | reg_data |= IFE_PMC_AUTO_MDIX; | ||
1971 | break; | ||
1972 | } | ||
1973 | ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); | ||
1974 | if (ret_val) | ||
1975 | return ret_val; | ||
1976 | } | ||
1923 | return e1000e_setup_copper_link(hw); | 1977 | return e1000e_setup_copper_link(hw); |
1924 | } | 1978 | } |
1925 | 1979 | ||
@@ -2127,6 +2181,31 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) | |||
2127 | } | 2181 | } |
2128 | 2182 | ||
2129 | /** | 2183 | /** |
2184 | * e1000e_disable_gig_wol_ich8lan - disable gig during WoL | ||
2185 | * @hw: pointer to the HW structure | ||
2186 | * | ||
2187 | * During S0 to Sx transition, it is possible the link remains at gig | ||
2188 | * instead of negotiating to a lower speed. Before going to Sx, set | ||
2189 | * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation | ||
2190 | * to a lower speed. | ||
2191 | * | ||
2192 | * Should only be called for ICH9 devices. | ||
2193 | **/ | ||
2194 | void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) | ||
2195 | { | ||
2196 | u32 phy_ctrl; | ||
2197 | |||
2198 | if (hw->mac.type == e1000_ich9lan) { | ||
2199 | phy_ctrl = er32(PHY_CTRL); | ||
2200 | phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | | ||
2201 | E1000_PHY_CTRL_GBE_DISABLE; | ||
2202 | ew32(PHY_CTRL, phy_ctrl); | ||
2203 | } | ||
2204 | |||
2205 | return; | ||
2206 | } | ||
2207 | |||
2208 | /** | ||
2130 | * e1000_cleanup_led_ich8lan - Restore the default LED operation | 2209 | * e1000_cleanup_led_ich8lan - Restore the default LED operation |
2131 | * @hw: pointer to the HW structure | 2210 | * @hw: pointer to the HW structure |
2132 | * | 2211 | * |
@@ -2247,6 +2326,7 @@ static struct e1000_nvm_operations ich8_nvm_ops = { | |||
2247 | struct e1000_info e1000_ich8_info = { | 2326 | struct e1000_info e1000_ich8_info = { |
2248 | .mac = e1000_ich8lan, | 2327 | .mac = e1000_ich8lan, |
2249 | .flags = FLAG_HAS_WOL | 2328 | .flags = FLAG_HAS_WOL |
2329 | | FLAG_IS_ICH | ||
2250 | | FLAG_RX_CSUM_ENABLED | 2330 | | FLAG_RX_CSUM_ENABLED |
2251 | | FLAG_HAS_CTRLEXT_ON_LOAD | 2331 | | FLAG_HAS_CTRLEXT_ON_LOAD |
2252 | | FLAG_HAS_AMT | 2332 | | FLAG_HAS_AMT |
@@ -2262,6 +2342,7 @@ struct e1000_info e1000_ich8_info = { | |||
2262 | struct e1000_info e1000_ich9_info = { | 2342 | struct e1000_info e1000_ich9_info = { |
2263 | .mac = e1000_ich9lan, | 2343 | .mac = e1000_ich9lan, |
2264 | .flags = FLAG_HAS_JUMBO_FRAMES | 2344 | .flags = FLAG_HAS_JUMBO_FRAMES |
2345 | | FLAG_IS_ICH | ||
2265 | | FLAG_HAS_WOL | 2346 | | FLAG_HAS_WOL |
2266 | | FLAG_RX_CSUM_ENABLED | 2347 | | FLAG_RX_CSUM_ENABLED |
2267 | | FLAG_HAS_CTRLEXT_ON_LOAD | 2348 | | FLAG_HAS_CTRLEXT_ON_LOAD |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 8991ab8911e2..8cbb40f3a506 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -43,10 +43,11 @@ | |||
43 | #include <linux/if_vlan.h> | 43 | #include <linux/if_vlan.h> |
44 | #include <linux/cpu.h> | 44 | #include <linux/cpu.h> |
45 | #include <linux/smp.h> | 45 | #include <linux/smp.h> |
46 | #include <linux/pm_qos_params.h> | ||
46 | 47 | ||
47 | #include "e1000.h" | 48 | #include "e1000.h" |
48 | 49 | ||
49 | #define DRV_VERSION "0.2.1" | 50 | #define DRV_VERSION "0.3.3.3-k2" |
50 | char e1000e_driver_name[] = "e1000e"; | 51 | char e1000e_driver_name[] = "e1000e"; |
51 | const char e1000e_driver_version[] = DRV_VERSION; | 52 | const char e1000e_driver_version[] = DRV_VERSION; |
52 | 53 | ||
@@ -341,6 +342,89 @@ no_buffers: | |||
341 | } | 342 | } |
342 | 343 | ||
343 | /** | 344 | /** |
345 | * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers | ||
346 | * @adapter: address of board private structure | ||
347 | * @rx_ring: pointer to receive ring structure | ||
348 | * @cleaned_count: number of buffers to allocate this pass | ||
349 | **/ | ||
350 | |||
351 | static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, | ||
352 | int cleaned_count) | ||
353 | { | ||
354 | struct net_device *netdev = adapter->netdev; | ||
355 | struct pci_dev *pdev = adapter->pdev; | ||
356 | struct e1000_rx_desc *rx_desc; | ||
357 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
358 | struct e1000_buffer *buffer_info; | ||
359 | struct sk_buff *skb; | ||
360 | unsigned int i; | ||
361 | unsigned int bufsz = 256 - | ||
362 | 16 /* for skb_reserve */ - | ||
363 | NET_IP_ALIGN; | ||
364 | |||
365 | i = rx_ring->next_to_use; | ||
366 | buffer_info = &rx_ring->buffer_info[i]; | ||
367 | |||
368 | while (cleaned_count--) { | ||
369 | skb = buffer_info->skb; | ||
370 | if (skb) { | ||
371 | skb_trim(skb, 0); | ||
372 | goto check_page; | ||
373 | } | ||
374 | |||
375 | skb = netdev_alloc_skb(netdev, bufsz); | ||
376 | if (unlikely(!skb)) { | ||
377 | /* Better luck next round */ | ||
378 | adapter->alloc_rx_buff_failed++; | ||
379 | break; | ||
380 | } | ||
381 | |||
382 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
383 | * this will result in a 16 byte aligned IP header after | ||
384 | * the 14 byte MAC header is removed | ||
385 | */ | ||
386 | skb_reserve(skb, NET_IP_ALIGN); | ||
387 | |||
388 | buffer_info->skb = skb; | ||
389 | check_page: | ||
390 | /* allocate a new page if necessary */ | ||
391 | if (!buffer_info->page) { | ||
392 | buffer_info->page = alloc_page(GFP_ATOMIC); | ||
393 | if (unlikely(!buffer_info->page)) { | ||
394 | adapter->alloc_rx_buff_failed++; | ||
395 | break; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | if (!buffer_info->dma) | ||
400 | buffer_info->dma = pci_map_page(pdev, | ||
401 | buffer_info->page, 0, | ||
402 | PAGE_SIZE, | ||
403 | PCI_DMA_FROMDEVICE); | ||
404 | |||
405 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
406 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
407 | |||
408 | if (unlikely(++i == rx_ring->count)) | ||
409 | i = 0; | ||
410 | buffer_info = &rx_ring->buffer_info[i]; | ||
411 | } | ||
412 | |||
413 | if (likely(rx_ring->next_to_use != i)) { | ||
414 | rx_ring->next_to_use = i; | ||
415 | if (unlikely(i-- == 0)) | ||
416 | i = (rx_ring->count - 1); | ||
417 | |||
418 | /* Force memory writes to complete before letting h/w | ||
419 | * know there are new descriptors to fetch. (Only | ||
420 | * applicable for weak-ordered memory model archs, | ||
421 | * such as IA-64). */ | ||
422 | wmb(); | ||
423 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
424 | } | ||
425 | } | ||
426 | |||
427 | /** | ||
344 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | 428 | * e1000_clean_rx_irq - Send received data up the network stack; legacy |
345 | * @adapter: board private structure | 429 | * @adapter: board private structure |
346 | * | 430 | * |
@@ -783,6 +867,186 @@ next_desc: | |||
783 | } | 867 | } |
784 | 868 | ||
785 | /** | 869 | /** |
870 | * e1000_consume_page - helper function | ||
871 | **/ | ||
872 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | ||
873 | u16 length) | ||
874 | { | ||
875 | bi->page = NULL; | ||
876 | skb->len += length; | ||
877 | skb->data_len += length; | ||
878 | skb->truesize += length; | ||
879 | } | ||
880 | |||
881 | /** | ||
882 | * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy | ||
883 | * @adapter: board private structure | ||
884 | * | ||
885 | * the return value indicates whether actual cleaning was done, there | ||
886 | * is no guarantee that everything was cleaned | ||
887 | **/ | ||
888 | |||
889 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, | ||
890 | int *work_done, int work_to_do) | ||
891 | { | ||
892 | struct net_device *netdev = adapter->netdev; | ||
893 | struct pci_dev *pdev = adapter->pdev; | ||
894 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
895 | struct e1000_rx_desc *rx_desc, *next_rxd; | ||
896 | struct e1000_buffer *buffer_info, *next_buffer; | ||
897 | u32 length; | ||
898 | unsigned int i; | ||
899 | int cleaned_count = 0; | ||
900 | bool cleaned = false; | ||
901 | unsigned int total_rx_bytes=0, total_rx_packets=0; | ||
902 | |||
903 | i = rx_ring->next_to_clean; | ||
904 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
905 | buffer_info = &rx_ring->buffer_info[i]; | ||
906 | |||
907 | while (rx_desc->status & E1000_RXD_STAT_DD) { | ||
908 | struct sk_buff *skb; | ||
909 | u8 status; | ||
910 | |||
911 | if (*work_done >= work_to_do) | ||
912 | break; | ||
913 | (*work_done)++; | ||
914 | |||
915 | status = rx_desc->status; | ||
916 | skb = buffer_info->skb; | ||
917 | buffer_info->skb = NULL; | ||
918 | |||
919 | ++i; | ||
920 | if (i == rx_ring->count) | ||
921 | i = 0; | ||
922 | next_rxd = E1000_RX_DESC(*rx_ring, i); | ||
923 | prefetch(next_rxd); | ||
924 | |||
925 | next_buffer = &rx_ring->buffer_info[i]; | ||
926 | |||
927 | cleaned = true; | ||
928 | cleaned_count++; | ||
929 | pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, | ||
930 | PCI_DMA_FROMDEVICE); | ||
931 | buffer_info->dma = 0; | ||
932 | |||
933 | length = le16_to_cpu(rx_desc->length); | ||
934 | |||
935 | /* errors is only valid for DD + EOP descriptors */ | ||
936 | if (unlikely((status & E1000_RXD_STAT_EOP) && | ||
937 | (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { | ||
938 | /* recycle both page and skb */ | ||
939 | buffer_info->skb = skb; | ||
940 | /* an error means any chain goes out the window | ||
941 | * too */ | ||
942 | if (rx_ring->rx_skb_top) | ||
943 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
944 | rx_ring->rx_skb_top = NULL; | ||
945 | goto next_desc; | ||
946 | } | ||
947 | |||
948 | #define rxtop rx_ring->rx_skb_top | ||
949 | if (!(status & E1000_RXD_STAT_EOP)) { | ||
950 | /* this descriptor is only the beginning (or middle) */ | ||
951 | if (!rxtop) { | ||
952 | /* this is the beginning of a chain */ | ||
953 | rxtop = skb; | ||
954 | skb_fill_page_desc(rxtop, 0, buffer_info->page, | ||
955 | 0, length); | ||
956 | } else { | ||
957 | /* this is the middle of a chain */ | ||
958 | skb_fill_page_desc(rxtop, | ||
959 | skb_shinfo(rxtop)->nr_frags, | ||
960 | buffer_info->page, 0, length); | ||
961 | /* re-use the skb, only consumed the page */ | ||
962 | buffer_info->skb = skb; | ||
963 | } | ||
964 | e1000_consume_page(buffer_info, rxtop, length); | ||
965 | goto next_desc; | ||
966 | } else { | ||
967 | if (rxtop) { | ||
968 | /* end of the chain */ | ||
969 | skb_fill_page_desc(rxtop, | ||
970 | skb_shinfo(rxtop)->nr_frags, | ||
971 | buffer_info->page, 0, length); | ||
972 | /* re-use the current skb, we only consumed the | ||
973 | * page */ | ||
974 | buffer_info->skb = skb; | ||
975 | skb = rxtop; | ||
976 | rxtop = NULL; | ||
977 | e1000_consume_page(buffer_info, skb, length); | ||
978 | } else { | ||
979 | /* no chain, got EOP, this buf is the packet | ||
980 | * copybreak to save the put_page/alloc_page */ | ||
981 | if (length <= copybreak && | ||
982 | skb_tailroom(skb) >= length) { | ||
983 | u8 *vaddr; | ||
984 | vaddr = kmap_atomic(buffer_info->page, | ||
985 | KM_SKB_DATA_SOFTIRQ); | ||
986 | memcpy(skb_tail_pointer(skb), vaddr, | ||
987 | length); | ||
988 | kunmap_atomic(vaddr, | ||
989 | KM_SKB_DATA_SOFTIRQ); | ||
990 | /* re-use the page, so don't erase | ||
991 | * buffer_info->page */ | ||
992 | skb_put(skb, length); | ||
993 | } else { | ||
994 | skb_fill_page_desc(skb, 0, | ||
995 | buffer_info->page, 0, | ||
996 | length); | ||
997 | e1000_consume_page(buffer_info, skb, | ||
998 | length); | ||
999 | } | ||
1000 | } | ||
1001 | } | ||
1002 | |||
1003 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ | ||
1004 | e1000_rx_checksum(adapter, | ||
1005 | (u32)(status) | | ||
1006 | ((u32)(rx_desc->errors) << 24), | ||
1007 | le16_to_cpu(rx_desc->csum), skb); | ||
1008 | |||
1009 | /* probably a little skewed due to removing CRC */ | ||
1010 | total_rx_bytes += skb->len; | ||
1011 | total_rx_packets++; | ||
1012 | |||
1013 | /* eth type trans needs skb->data to point to something */ | ||
1014 | if (!pskb_may_pull(skb, ETH_HLEN)) { | ||
1015 | ndev_err(netdev, "pskb_may_pull failed.\n"); | ||
1016 | dev_kfree_skb(skb); | ||
1017 | goto next_desc; | ||
1018 | } | ||
1019 | |||
1020 | e1000_receive_skb(adapter, netdev, skb, status, | ||
1021 | rx_desc->special); | ||
1022 | |||
1023 | next_desc: | ||
1024 | rx_desc->status = 0; | ||
1025 | |||
1026 | /* return some buffers to hardware, one at a time is too slow */ | ||
1027 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | ||
1028 | adapter->alloc_rx_buf(adapter, cleaned_count); | ||
1029 | cleaned_count = 0; | ||
1030 | } | ||
1031 | |||
1032 | /* use prefetched values */ | ||
1033 | rx_desc = next_rxd; | ||
1034 | buffer_info = next_buffer; | ||
1035 | } | ||
1036 | rx_ring->next_to_clean = i; | ||
1037 | |||
1038 | cleaned_count = e1000_desc_unused(rx_ring); | ||
1039 | if (cleaned_count) | ||
1040 | adapter->alloc_rx_buf(adapter, cleaned_count); | ||
1041 | |||
1042 | adapter->total_rx_bytes += total_rx_bytes; | ||
1043 | adapter->total_rx_packets += total_rx_packets; | ||
1044 | adapter->net_stats.rx_bytes += total_rx_bytes; | ||
1045 | adapter->net_stats.rx_packets += total_rx_packets; | ||
1046 | return cleaned; | ||
1047 | } | ||
1048 | |||
1049 | /** | ||
786 | * e1000_clean_rx_ring - Free Rx Buffers per Queue | 1050 | * e1000_clean_rx_ring - Free Rx Buffers per Queue |
787 | * @adapter: board private structure | 1051 | * @adapter: board private structure |
788 | **/ | 1052 | **/ |
@@ -802,6 +1066,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
802 | pci_unmap_single(pdev, buffer_info->dma, | 1066 | pci_unmap_single(pdev, buffer_info->dma, |
803 | adapter->rx_buffer_len, | 1067 | adapter->rx_buffer_len, |
804 | PCI_DMA_FROMDEVICE); | 1068 | PCI_DMA_FROMDEVICE); |
1069 | else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) | ||
1070 | pci_unmap_page(pdev, buffer_info->dma, | ||
1071 | PAGE_SIZE, | ||
1072 | PCI_DMA_FROMDEVICE); | ||
805 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) | 1073 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) |
806 | pci_unmap_single(pdev, buffer_info->dma, | 1074 | pci_unmap_single(pdev, buffer_info->dma, |
807 | adapter->rx_ps_bsize0, | 1075 | adapter->rx_ps_bsize0, |
@@ -809,6 +1077,11 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
809 | buffer_info->dma = 0; | 1077 | buffer_info->dma = 0; |
810 | } | 1078 | } |
811 | 1079 | ||
1080 | if (buffer_info->page) { | ||
1081 | put_page(buffer_info->page); | ||
1082 | buffer_info->page = NULL; | ||
1083 | } | ||
1084 | |||
812 | if (buffer_info->skb) { | 1085 | if (buffer_info->skb) { |
813 | dev_kfree_skb(buffer_info->skb); | 1086 | dev_kfree_skb(buffer_info->skb); |
814 | buffer_info->skb = NULL; | 1087 | buffer_info->skb = NULL; |
@@ -1755,10 +2028,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
1755 | * a lot of memory, since we allocate 3 pages at all times | 2028 | * a lot of memory, since we allocate 3 pages at all times |
1756 | * per packet. | 2029 | * per packet. |
1757 | */ | 2030 | */ |
1758 | adapter->rx_ps_pages = 0; | ||
1759 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); | 2031 | pages = PAGE_USE_COUNT(adapter->netdev->mtu); |
1760 | if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) | 2032 | if (!(adapter->flags & FLAG_IS_ICH) && (pages <= 3) && |
2033 | (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) | ||
1761 | adapter->rx_ps_pages = pages; | 2034 | adapter->rx_ps_pages = pages; |
2035 | else | ||
2036 | adapter->rx_ps_pages = 0; | ||
1762 | 2037 | ||
1763 | if (adapter->rx_ps_pages) { | 2038 | if (adapter->rx_ps_pages) { |
1764 | /* Configure extra packet-split registers */ | 2039 | /* Configure extra packet-split registers */ |
@@ -1819,9 +2094,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1819 | sizeof(union e1000_rx_desc_packet_split); | 2094 | sizeof(union e1000_rx_desc_packet_split); |
1820 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 2095 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
1821 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | 2096 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
2097 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { | ||
2098 | rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); | ||
2099 | adapter->clean_rx = e1000_clean_jumbo_rx_irq; | ||
2100 | adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; | ||
1822 | } else { | 2101 | } else { |
1823 | rdlen = rx_ring->count * | 2102 | rdlen = rx_ring->count * sizeof(struct e1000_rx_desc); |
1824 | sizeof(struct e1000_rx_desc); | ||
1825 | adapter->clean_rx = e1000_clean_rx_irq; | 2103 | adapter->clean_rx = e1000_clean_rx_irq; |
1826 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; | 2104 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; |
1827 | } | 2105 | } |
@@ -1885,8 +2163,21 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
1885 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 | 2163 | * units), e.g. using jumbo frames when setting to E1000_ERT_2048 |
1886 | */ | 2164 | */ |
1887 | if ((adapter->flags & FLAG_HAS_ERT) && | 2165 | if ((adapter->flags & FLAG_HAS_ERT) && |
1888 | (adapter->netdev->mtu > ETH_DATA_LEN)) | 2166 | (adapter->netdev->mtu > ETH_DATA_LEN)) { |
1889 | ew32(ERT, E1000_ERT_2048); | 2167 | u32 rxdctl = er32(RXDCTL(0)); |
2168 | ew32(RXDCTL(0), rxdctl | 0x3); | ||
2169 | ew32(ERT, E1000_ERT_2048 | (1 << 13)); | ||
2170 | /* | ||
2171 | * With jumbo frames and early-receive enabled, excessive | ||
2172 | * C4->C2 latencies result in dropped transactions. | ||
2173 | */ | ||
2174 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2175 | e1000e_driver_name, 55); | ||
2176 | } else { | ||
2177 | pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, | ||
2178 | e1000e_driver_name, | ||
2179 | PM_QOS_DEFAULT_VALUE); | ||
2180 | } | ||
1890 | 2181 | ||
1891 | /* Enable Receives */ | 2182 | /* Enable Receives */ |
1892 | ew32(RCTL, rctl); | 2183 | ew32(RCTL, rctl); |
@@ -2155,6 +2446,14 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2155 | 2446 | ||
2156 | /* Allow time for pending master requests to run */ | 2447 | /* Allow time for pending master requests to run */ |
2157 | mac->ops.reset_hw(hw); | 2448 | mac->ops.reset_hw(hw); |
2449 | |||
2450 | /* | ||
2451 | * For parts with AMT enabled, let the firmware know | ||
2452 | * that the network interface is in control | ||
2453 | */ | ||
2454 | if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) | ||
2455 | e1000_get_hw_control(adapter); | ||
2456 | |||
2158 | ew32(WUC, 0); | 2457 | ew32(WUC, 0); |
2159 | 2458 | ||
2160 | if (mac->ops.init_hw(hw)) | 2459 | if (mac->ops.init_hw(hw)) |
@@ -3469,6 +3768,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3469 | * means we reserve 2 more, this pushes us to allocate from the next | 3768 | * means we reserve 2 more, this pushes us to allocate from the next |
3470 | * larger slab size. | 3769 | * larger slab size. |
3471 | * i.e. RXBUFFER_2048 --> size-4096 slab | 3770 | * i.e. RXBUFFER_2048 --> size-4096 slab |
3771 | * However with the new *_jumbo_rx* routines, jumbo receives will use | ||
3772 | * fragmented skbs | ||
3472 | */ | 3773 | */ |
3473 | 3774 | ||
3474 | if (max_frame <= 256) | 3775 | if (max_frame <= 256) |
@@ -3626,6 +3927,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3626 | ew32(CTRL_EXT, ctrl_ext); | 3927 | ew32(CTRL_EXT, ctrl_ext); |
3627 | } | 3928 | } |
3628 | 3929 | ||
3930 | if (adapter->flags & FLAG_IS_ICH) | ||
3931 | e1000e_disable_gig_wol_ich8lan(&adapter->hw); | ||
3932 | |||
3629 | /* Allow time for pending master requests to run */ | 3933 | /* Allow time for pending master requests to run */ |
3630 | e1000e_disable_pcie_master(&adapter->hw); | 3934 | e1000e_disable_pcie_master(&adapter->hw); |
3631 | 3935 | ||
@@ -4292,6 +4596,13 @@ static struct pci_device_id e1000_pci_tbl[] = { | |||
4292 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, | 4596 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, |
4293 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, | 4597 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, |
4294 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, | 4598 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, |
4599 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, | ||
4600 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, | ||
4601 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, | ||
4602 | |||
4603 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, | ||
4604 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, | ||
4605 | { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, | ||
4295 | 4606 | ||
4296 | { } /* terminate list */ | 4607 | { } /* terminate list */ |
4297 | }; | 4608 | }; |
@@ -4326,7 +4637,9 @@ static int __init e1000_init_module(void) | |||
4326 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", | 4637 | printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", |
4327 | e1000e_driver_name); | 4638 | e1000e_driver_name); |
4328 | ret = pci_register_driver(&e1000_driver); | 4639 | ret = pci_register_driver(&e1000_driver); |
4329 | 4640 | pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, | |
4641 | PM_QOS_DEFAULT_VALUE); | ||
4642 | |||
4330 | return ret; | 4643 | return ret; |
4331 | } | 4644 | } |
4332 | module_init(e1000_init_module); | 4645 | module_init(e1000_init_module); |
@@ -4340,6 +4653,7 @@ module_init(e1000_init_module); | |||
4340 | static void __exit e1000_exit_module(void) | 4653 | static void __exit e1000_exit_module(void) |
4341 | { | 4654 | { |
4342 | pci_unregister_driver(&e1000_driver); | 4655 | pci_unregister_driver(&e1000_driver); |
4656 | pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name); | ||
4343 | } | 4657 | } |
4344 | module_exit(e1000_exit_module); | 4658 | module_exit(e1000_exit_module); |
4345 | 4659 | ||
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index e102332a6bee..b133dcf0e950 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -34,6 +34,9 @@ static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); | |||
34 | static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); | 34 | static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); |
35 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); | 35 | static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); |
36 | static s32 e1000_wait_autoneg(struct e1000_hw *hw); | 36 | static s32 e1000_wait_autoneg(struct e1000_hw *hw); |
37 | static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg); | ||
38 | static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | ||
39 | u16 *data, bool read); | ||
37 | 40 | ||
38 | /* Cable length tables */ | 41 | /* Cable length tables */ |
39 | static const u16 e1000_m88_cable_length_table[] = | 42 | static const u16 e1000_m88_cable_length_table[] = |
@@ -465,6 +468,10 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) | |||
465 | if (phy->disable_polarity_correction == 1) | 468 | if (phy->disable_polarity_correction == 1) |
466 | phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; | 469 | phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; |
467 | 470 | ||
471 | /* Enable downshift on BM (disabled by default) */ | ||
472 | if (phy->type == e1000_phy_bm) | ||
473 | phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; | ||
474 | |||
468 | ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); | 475 | ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); |
469 | if (ret_val) | 476 | if (ret_val) |
470 | return ret_val; | 477 | return ret_val; |
@@ -1776,6 +1783,10 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) | |||
1776 | case IFE_C_E_PHY_ID: | 1783 | case IFE_C_E_PHY_ID: |
1777 | phy_type = e1000_phy_ife; | 1784 | phy_type = e1000_phy_ife; |
1778 | break; | 1785 | break; |
1786 | case BME1000_E_PHY_ID: | ||
1787 | case BME1000_E_PHY_ID_R2: | ||
1788 | phy_type = e1000_phy_bm; | ||
1789 | break; | ||
1779 | default: | 1790 | default: |
1780 | phy_type = e1000_phy_unknown; | 1791 | phy_type = e1000_phy_unknown; |
1781 | break; | 1792 | break; |
@@ -1784,6 +1795,273 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) | |||
1784 | } | 1795 | } |
1785 | 1796 | ||
1786 | /** | 1797 | /** |
1798 | * e1000e_determine_phy_address - Determines PHY address. | ||
1799 | * @hw: pointer to the HW structure | ||
1800 | * | ||
1801 | * This uses a trial and error method to loop through possible PHY | ||
1802 | * addresses. It tests each by reading the PHY ID registers and | ||
1803 | * checking for a match. | ||
1804 | **/ | ||
1805 | s32 e1000e_determine_phy_address(struct e1000_hw *hw) | ||
1806 | { | ||
1807 | s32 ret_val = -E1000_ERR_PHY_TYPE; | ||
1808 | u32 phy_addr= 0; | ||
1809 | u32 i = 0; | ||
1810 | enum e1000_phy_type phy_type = e1000_phy_unknown; | ||
1811 | |||
1812 | do { | ||
1813 | for (phy_addr = 0; phy_addr < 4; phy_addr++) { | ||
1814 | hw->phy.addr = phy_addr; | ||
1815 | e1000e_get_phy_id(hw); | ||
1816 | phy_type = e1000e_get_phy_type_from_id(hw->phy.id); | ||
1817 | |||
1818 | /* | ||
1819 | * If phy_type is valid, break - we found our | ||
1820 | * PHY address | ||
1821 | */ | ||
1822 | if (phy_type != e1000_phy_unknown) { | ||
1823 | ret_val = 0; | ||
1824 | break; | ||
1825 | } | ||
1826 | } | ||
1827 | i++; | ||
1828 | } while ((ret_val != 0) && (i < 100)); | ||
1829 | |||
1830 | return ret_val; | ||
1831 | } | ||
1832 | |||
1833 | /** | ||
1834 | * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address | ||
1835 | * @page: page to access | ||
1836 | * | ||
1837 | * Returns the phy address for the page requested. | ||
1838 | **/ | ||
1839 | static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) | ||
1840 | { | ||
1841 | u32 phy_addr = 2; | ||
1842 | |||
1843 | if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) | ||
1844 | phy_addr = 1; | ||
1845 | |||
1846 | return phy_addr; | ||
1847 | } | ||
1848 | |||
1849 | /** | ||
1850 | * e1000e_write_phy_reg_bm - Write BM PHY register | ||
1851 | * @hw: pointer to the HW structure | ||
1852 | * @offset: register offset to write to | ||
1853 | * @data: data to write at register offset | ||
1854 | * | ||
1855 | * Acquires semaphore, if necessary, then writes the data to PHY register | ||
1856 | * at the offset. Release any acquired semaphores before exiting. | ||
1857 | **/ | ||
1858 | s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | ||
1859 | { | ||
1860 | s32 ret_val; | ||
1861 | u32 page_select = 0; | ||
1862 | u32 page = offset >> IGP_PAGE_SHIFT; | ||
1863 | u32 page_shift = 0; | ||
1864 | |||
1865 | /* Page 800 works differently than the rest so it has its own func */ | ||
1866 | if (page == BM_WUC_PAGE) { | ||
1867 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | ||
1868 | false); | ||
1869 | goto out; | ||
1870 | } | ||
1871 | |||
1872 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
1873 | if (ret_val) | ||
1874 | goto out; | ||
1875 | |||
1876 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | ||
1877 | |||
1878 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
1879 | /* | ||
1880 | * Page select is register 31 for phy address 1 and 22 for | ||
1881 | * phy address 2 and 3. Page select is shifted only for | ||
1882 | * phy address 1. | ||
1883 | */ | ||
1884 | if (hw->phy.addr == 1) { | ||
1885 | page_shift = IGP_PAGE_SHIFT; | ||
1886 | page_select = IGP01E1000_PHY_PAGE_SELECT; | ||
1887 | } else { | ||
1888 | page_shift = 0; | ||
1889 | page_select = BM_PHY_PAGE_SELECT; | ||
1890 | } | ||
1891 | |||
1892 | /* Page is shifted left, PHY expects (page x 32) */ | ||
1893 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | ||
1894 | (page << page_shift)); | ||
1895 | if (ret_val) { | ||
1896 | hw->phy.ops.release_phy(hw); | ||
1897 | goto out; | ||
1898 | } | ||
1899 | } | ||
1900 | |||
1901 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
1902 | data); | ||
1903 | |||
1904 | hw->phy.ops.release_phy(hw); | ||
1905 | |||
1906 | out: | ||
1907 | return ret_val; | ||
1908 | } | ||
1909 | |||
1910 | /** | ||
1911 | * e1000e_read_phy_reg_bm - Read BM PHY register | ||
1912 | * @hw: pointer to the HW structure | ||
1913 | * @offset: register offset to be read | ||
1914 | * @data: pointer to the read data | ||
1915 | * | ||
1916 | * Acquires semaphore, if necessary, then reads the PHY register at offset | ||
1917 | * and storing the retrieved information in data. Release any acquired | ||
1918 | * semaphores before exiting. | ||
1919 | **/ | ||
1920 | s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | ||
1921 | { | ||
1922 | s32 ret_val; | ||
1923 | u32 page_select = 0; | ||
1924 | u32 page = offset >> IGP_PAGE_SHIFT; | ||
1925 | u32 page_shift = 0; | ||
1926 | |||
1927 | /* Page 800 works differently than the rest so it has its own func */ | ||
1928 | if (page == BM_WUC_PAGE) { | ||
1929 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | ||
1930 | true); | ||
1931 | goto out; | ||
1932 | } | ||
1933 | |||
1934 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
1935 | if (ret_val) | ||
1936 | goto out; | ||
1937 | |||
1938 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | ||
1939 | |||
1940 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | ||
1941 | /* | ||
1942 | * Page select is register 31 for phy address 1 and 22 for | ||
1943 | * phy address 2 and 3. Page select is shifted only for | ||
1944 | * phy address 1. | ||
1945 | */ | ||
1946 | if (hw->phy.addr == 1) { | ||
1947 | page_shift = IGP_PAGE_SHIFT; | ||
1948 | page_select = IGP01E1000_PHY_PAGE_SELECT; | ||
1949 | } else { | ||
1950 | page_shift = 0; | ||
1951 | page_select = BM_PHY_PAGE_SELECT; | ||
1952 | } | ||
1953 | |||
1954 | /* Page is shifted left, PHY expects (page x 32) */ | ||
1955 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | ||
1956 | (page << page_shift)); | ||
1957 | if (ret_val) { | ||
1958 | hw->phy.ops.release_phy(hw); | ||
1959 | goto out; | ||
1960 | } | ||
1961 | } | ||
1962 | |||
1963 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | ||
1964 | data); | ||
1965 | hw->phy.ops.release_phy(hw); | ||
1966 | |||
1967 | out: | ||
1968 | return ret_val; | ||
1969 | } | ||
1970 | |||
1971 | /** | ||
1972 | * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register | ||
1973 | * @hw: pointer to the HW structure | ||
1974 | * @offset: register offset to be read or written | ||
1975 | * @data: pointer to the data to read or write | ||
1976 | * @read: determines if operation is read or write | ||
1977 | * | ||
1978 | * Acquires semaphore, if necessary, then reads the PHY register at offset | ||
1979 | * and storing the retrieved information in data. Release any acquired | ||
1980 | * semaphores before exiting. Note that procedure to read the wakeup | ||
1981 | * registers are different. It works as such: | ||
1982 | * 1) Set page 769, register 17, bit 2 = 1 | ||
1983 | * 2) Set page to 800 for host (801 if we were manageability) | ||
1984 | * 3) Write the address using the address opcode (0x11) | ||
1985 | * 4) Read or write the data using the data opcode (0x12) | ||
1986 | * 5) Restore 769_17.2 to its original value | ||
1987 | **/ | ||
1988 | static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | ||
1989 | u16 *data, bool read) | ||
1990 | { | ||
1991 | s32 ret_val; | ||
1992 | u16 reg = ((u16)offset) & PHY_REG_MASK; | ||
1993 | u16 phy_reg = 0; | ||
1994 | u8 phy_acquired = 1; | ||
1995 | |||
1996 | |||
1997 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
1998 | if (ret_val) { | ||
1999 | phy_acquired = 0; | ||
2000 | goto out; | ||
2001 | } | ||
2002 | |||
2003 | /* All operations in this function are phy address 1 */ | ||
2004 | hw->phy.addr = 1; | ||
2005 | |||
2006 | /* Set page 769 */ | ||
2007 | e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | ||
2008 | (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); | ||
2009 | |||
2010 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &phy_reg); | ||
2011 | if (ret_val) | ||
2012 | goto out; | ||
2013 | |||
2014 | /* First clear bit 4 to avoid a power state change */ | ||
2015 | phy_reg &= ~(BM_WUC_HOST_WU_BIT); | ||
2016 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | ||
2017 | if (ret_val) | ||
2018 | goto out; | ||
2019 | |||
2020 | /* Write bit 2 = 1, and clear bit 4 to 769_17 */ | ||
2021 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, | ||
2022 | phy_reg | BM_WUC_ENABLE_BIT); | ||
2023 | if (ret_val) | ||
2024 | goto out; | ||
2025 | |||
2026 | /* Select page 800 */ | ||
2027 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | ||
2028 | (BM_WUC_PAGE << IGP_PAGE_SHIFT)); | ||
2029 | |||
2030 | /* Write the page 800 offset value using opcode 0x11 */ | ||
2031 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); | ||
2032 | if (ret_val) | ||
2033 | goto out; | ||
2034 | |||
2035 | if (read) { | ||
2036 | /* Read the page 800 value using opcode 0x12 */ | ||
2037 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | ||
2038 | data); | ||
2039 | } else { | ||
2040 | /* Read the page 800 value using opcode 0x12 */ | ||
2041 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, | ||
2042 | *data); | ||
2043 | } | ||
2044 | |||
2045 | if (ret_val) | ||
2046 | goto out; | ||
2047 | |||
2048 | /* | ||
2049 | * Restore 769_17.2 to its original value | ||
2050 | * Set page 769 | ||
2051 | */ | ||
2052 | e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | ||
2053 | (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); | ||
2054 | |||
2055 | /* Clear 769_17.2 */ | ||
2056 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | ||
2057 | |||
2058 | out: | ||
2059 | if (phy_acquired == 1) | ||
2060 | hw->phy.ops.release_phy(hw); | ||
2061 | return ret_val; | ||
2062 | } | ||
2063 | |||
2064 | /** | ||
1787 | * e1000e_commit_phy - Soft PHY reset | 2065 | * e1000e_commit_phy - Soft PHY reset |
1788 | * @hw: pointer to the HW structure | 2066 | * @hw: pointer to the HW structure |
1789 | * | 2067 | * |