diff options
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r-- | drivers/net/e1000e/defines.h | 2 | ||||
-rw-r--r-- | drivers/net/e1000e/e1000.h | 28 | ||||
-rw-r--r-- | drivers/net/e1000e/ethtool.c | 12 | ||||
-rw-r--r-- | drivers/net/e1000e/hw.h | 3 | ||||
-rw-r--r-- | drivers/net/e1000e/ich8lan.c | 630 | ||||
-rw-r--r-- | drivers/net/e1000e/netdev.c | 50 | ||||
-rw-r--r-- | drivers/net/e1000e/phy.c | 540 |
7 files changed, 932 insertions, 333 deletions
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index c0f185beb8bc..1190167a8b3d 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -76,6 +76,7 @@ | |||
76 | /* Extended Device Control */ | 76 | /* Extended Device Control */ |
77 | #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ | 77 | #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ |
78 | #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ | 78 | #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ |
79 | #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ | ||
79 | #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ | 80 | #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ |
80 | #define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ | 81 | #define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ |
81 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 | 82 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 |
@@ -347,6 +348,7 @@ | |||
347 | /* Extended Configuration Control and Size */ | 348 | /* Extended Configuration Control and Size */ |
348 | #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 | 349 | #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 |
349 | #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 | 350 | #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 |
351 | #define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 | ||
350 | #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 | 352 | #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 |
351 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 | 353 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 |
352 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 | 354 | #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 981936c1fb46..3e187b0e4203 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -141,6 +141,22 @@ struct e1000_info; | |||
141 | #define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ | 141 | #define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ |
142 | #define HV_TNCRS_LOWER PHY_REG(778, 30) | 142 | #define HV_TNCRS_LOWER PHY_REG(778, 30) |
143 | 143 | ||
144 | #define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ | ||
145 | |||
146 | /* BM PHY Copper Specific Status */ | ||
147 | #define BM_CS_STATUS 17 | ||
148 | #define BM_CS_STATUS_LINK_UP 0x0400 | ||
149 | #define BM_CS_STATUS_RESOLVED 0x0800 | ||
150 | #define BM_CS_STATUS_SPEED_MASK 0xC000 | ||
151 | #define BM_CS_STATUS_SPEED_1000 0x8000 | ||
152 | |||
153 | /* 82577 Mobile Phy Status Register */ | ||
154 | #define HV_M_STATUS 26 | ||
155 | #define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 | ||
156 | #define HV_M_STATUS_SPEED_MASK 0x0300 | ||
157 | #define HV_M_STATUS_SPEED_1000 0x0200 | ||
158 | #define HV_M_STATUS_LINK_UP 0x0040 | ||
159 | |||
144 | enum e1000_boards { | 160 | enum e1000_boards { |
145 | board_82571, | 161 | board_82571, |
146 | board_82572, | 162 | board_82572, |
@@ -519,9 +535,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); | |||
519 | extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); | 535 | extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); |
520 | extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); | 536 | extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); |
521 | extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); | 537 | extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); |
538 | extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, | ||
539 | u16 *data); | ||
522 | extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); | 540 | extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); |
523 | extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); | 541 | extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); |
524 | extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); | 542 | extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); |
543 | extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, | ||
544 | u16 data); | ||
525 | extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); | 545 | extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); |
526 | extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); | 546 | extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); |
527 | extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); | 547 | extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); |
@@ -538,7 +558,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); | |||
538 | extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); | 558 | extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); |
539 | extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); | 559 | extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); |
540 | extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); | 560 | extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); |
561 | extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | ||
562 | u16 data); | ||
541 | extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); | 563 | extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); |
564 | extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, | ||
565 | u16 *data); | ||
542 | extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, | 566 | extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, |
543 | u32 usec_interval, bool *success); | 567 | u32 usec_interval, bool *success); |
544 | extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); | 568 | extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); |
@@ -546,7 +570,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); | |||
546 | extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); | 570 | extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); |
547 | extern s32 e1000e_check_downshift(struct e1000_hw *hw); | 571 | extern s32 e1000e_check_downshift(struct e1000_hw *hw); |
548 | extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); | 572 | extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); |
573 | extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | ||
574 | u16 *data); | ||
549 | extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); | 575 | extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); |
576 | extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, | ||
577 | u16 data); | ||
550 | extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); | 578 | extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); |
551 | extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); | 579 | extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); |
552 | extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); | 580 | extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 1bf4d2a5d34f..e82638ecae88 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -327,10 +327,18 @@ static int e1000_set_pauseparam(struct net_device *netdev, | |||
327 | 327 | ||
328 | hw->fc.current_mode = hw->fc.requested_mode; | 328 | hw->fc.current_mode = hw->fc.requested_mode; |
329 | 329 | ||
330 | retval = ((hw->phy.media_type == e1000_media_type_fiber) ? | 330 | if (hw->phy.media_type == e1000_media_type_fiber) { |
331 | hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); | 331 | retval = hw->mac.ops.setup_link(hw); |
332 | /* implicit goto out */ | ||
333 | } else { | ||
334 | retval = e1000e_force_mac_fc(hw); | ||
335 | if (retval) | ||
336 | goto out; | ||
337 | e1000e_set_fc_watermarks(hw); | ||
338 | } | ||
332 | } | 339 | } |
333 | 340 | ||
341 | out: | ||
334 | clear_bit(__E1000_RESETTING, &adapter->state); | 342 | clear_bit(__E1000_RESETTING, &adapter->state); |
335 | return retval; | 343 | return retval; |
336 | } | 344 | } |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index fd44d9f90769..aaea41ef794d 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -764,11 +764,13 @@ struct e1000_phy_operations { | |||
764 | s32 (*get_cable_length)(struct e1000_hw *); | 764 | s32 (*get_cable_length)(struct e1000_hw *); |
765 | s32 (*get_phy_info)(struct e1000_hw *); | 765 | s32 (*get_phy_info)(struct e1000_hw *); |
766 | s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); | 766 | s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); |
767 | s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *); | ||
767 | void (*release_phy)(struct e1000_hw *); | 768 | void (*release_phy)(struct e1000_hw *); |
768 | s32 (*reset_phy)(struct e1000_hw *); | 769 | s32 (*reset_phy)(struct e1000_hw *); |
769 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); | 770 | s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); |
770 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); | 771 | s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); |
771 | s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); | 772 | s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); |
773 | s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16); | ||
772 | s32 (*cfg_on_link_up)(struct e1000_hw *); | 774 | s32 (*cfg_on_link_up)(struct e1000_hw *); |
773 | }; | 775 | }; |
774 | 776 | ||
@@ -901,6 +903,7 @@ struct e1000_shadow_ram { | |||
901 | struct e1000_dev_spec_ich8lan { | 903 | struct e1000_dev_spec_ich8lan { |
902 | bool kmrn_lock_loss_workaround_enabled; | 904 | bool kmrn_lock_loss_workaround_enabled; |
903 | struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; | 905 | struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; |
906 | bool nvm_k1_enabled; | ||
904 | }; | 907 | }; |
905 | 908 | ||
906 | struct e1000_hw { | 909 | struct e1000_hw { |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 99df2abf82a9..eff3f4783655 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -122,6 +122,27 @@ | |||
122 | 122 | ||
123 | #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ | 123 | #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ |
124 | 124 | ||
125 | #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ | ||
126 | |||
127 | /* SMBus Address Phy Register */ | ||
128 | #define HV_SMB_ADDR PHY_REG(768, 26) | ||
129 | #define HV_SMB_ADDR_PEC_EN 0x0200 | ||
130 | #define HV_SMB_ADDR_VALID 0x0080 | ||
131 | |||
132 | /* Strapping Option Register - RO */ | ||
133 | #define E1000_STRAP 0x0000C | ||
134 | #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 | ||
135 | #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 | ||
136 | |||
137 | /* OEM Bits Phy Register */ | ||
138 | #define HV_OEM_BITS PHY_REG(768, 25) | ||
139 | #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ | ||
140 | #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ | ||
141 | #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ | ||
142 | |||
143 | #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ | ||
144 | #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ | ||
145 | |||
125 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ | 146 | /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ |
126 | /* Offset 04h HSFSTS */ | 147 | /* Offset 04h HSFSTS */ |
127 | union ich8_hws_flash_status { | 148 | union ich8_hws_flash_status { |
@@ -200,6 +221,10 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); | |||
200 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); | 221 | static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); |
201 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw); | 222 | static s32 e1000_led_on_pchlan(struct e1000_hw *hw); |
202 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw); | 223 | static s32 e1000_led_off_pchlan(struct e1000_hw *hw); |
224 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); | ||
225 | static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); | ||
226 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); | ||
227 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); | ||
203 | 228 | ||
204 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) | 229 | static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) |
205 | { | 230 | { |
@@ -242,7 +267,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) | |||
242 | 267 | ||
243 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; | 268 | phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; |
244 | phy->ops.read_phy_reg = e1000_read_phy_reg_hv; | 269 | phy->ops.read_phy_reg = e1000_read_phy_reg_hv; |
270 | phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked; | ||
271 | phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; | ||
272 | phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; | ||
245 | phy->ops.write_phy_reg = e1000_write_phy_reg_hv; | 273 | phy->ops.write_phy_reg = e1000_write_phy_reg_hv; |
274 | phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked; | ||
246 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 275 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
247 | 276 | ||
248 | phy->id = e1000_phy_unknown; | 277 | phy->id = e1000_phy_unknown; |
@@ -303,6 +332,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) | |||
303 | case IGP03E1000_E_PHY_ID: | 332 | case IGP03E1000_E_PHY_ID: |
304 | phy->type = e1000_phy_igp_3; | 333 | phy->type = e1000_phy_igp_3; |
305 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; | 334 | phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; |
335 | phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked; | ||
336 | phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked; | ||
306 | break; | 337 | break; |
307 | case IFE_E_PHY_ID: | 338 | case IFE_E_PHY_ID: |
308 | case IFE_PLUS_E_PHY_ID: | 339 | case IFE_PLUS_E_PHY_ID: |
@@ -469,14 +500,6 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
469 | goto out; | 500 | goto out; |
470 | } | 501 | } |
471 | 502 | ||
472 | if (hw->mac.type == e1000_pchlan) { | ||
473 | ret_val = e1000e_write_kmrn_reg(hw, | ||
474 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
475 | E1000_KMRNCTRLSTA_K1_ENABLE); | ||
476 | if (ret_val) | ||
477 | goto out; | ||
478 | } | ||
479 | |||
480 | /* | 503 | /* |
481 | * First we want to see if the MII Status Register reports | 504 | * First we want to see if the MII Status Register reports |
482 | * link. If so, then we want to get the current speed/duplex | 505 | * link. If so, then we want to get the current speed/duplex |
@@ -486,6 +509,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
486 | if (ret_val) | 509 | if (ret_val) |
487 | goto out; | 510 | goto out; |
488 | 511 | ||
512 | if (hw->mac.type == e1000_pchlan) { | ||
513 | ret_val = e1000_k1_gig_workaround_hv(hw, link); | ||
514 | if (ret_val) | ||
515 | goto out; | ||
516 | } | ||
517 | |||
489 | if (!link) | 518 | if (!link) |
490 | goto out; /* No link detected */ | 519 | goto out; /* No link detected */ |
491 | 520 | ||
@@ -568,12 +597,39 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | |||
568 | static DEFINE_MUTEX(nvm_mutex); | 597 | static DEFINE_MUTEX(nvm_mutex); |
569 | 598 | ||
570 | /** | 599 | /** |
600 | * e1000_acquire_nvm_ich8lan - Acquire NVM mutex | ||
601 | * @hw: pointer to the HW structure | ||
602 | * | ||
603 | * Acquires the mutex for performing NVM operations. | ||
604 | **/ | ||
605 | static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) | ||
606 | { | ||
607 | mutex_lock(&nvm_mutex); | ||
608 | |||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | /** | ||
613 | * e1000_release_nvm_ich8lan - Release NVM mutex | ||
614 | * @hw: pointer to the HW structure | ||
615 | * | ||
616 | * Releases the mutex used while performing NVM operations. | ||
617 | **/ | ||
618 | static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) | ||
619 | { | ||
620 | mutex_unlock(&nvm_mutex); | ||
621 | |||
622 | return; | ||
623 | } | ||
624 | |||
625 | static DEFINE_MUTEX(swflag_mutex); | ||
626 | |||
627 | /** | ||
571 | * e1000_acquire_swflag_ich8lan - Acquire software control flag | 628 | * e1000_acquire_swflag_ich8lan - Acquire software control flag |
572 | * @hw: pointer to the HW structure | 629 | * @hw: pointer to the HW structure |
573 | * | 630 | * |
574 | * Acquires the software control flag for performing NVM and PHY | 631 | * Acquires the software control flag for performing PHY and select |
575 | * operations. This is a function pointer entry point only called by | 632 | * MAC CSR accesses. |
576 | * read/write routines for the PHY and NVM parts. | ||
577 | **/ | 633 | **/ |
578 | static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | 634 | static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) |
579 | { | 635 | { |
@@ -582,7 +638,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
582 | 638 | ||
583 | might_sleep(); | 639 | might_sleep(); |
584 | 640 | ||
585 | mutex_lock(&nvm_mutex); | 641 | mutex_lock(&swflag_mutex); |
586 | 642 | ||
587 | while (timeout) { | 643 | while (timeout) { |
588 | extcnf_ctrl = er32(EXTCNF_CTRL); | 644 | extcnf_ctrl = er32(EXTCNF_CTRL); |
@@ -599,7 +655,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
599 | goto out; | 655 | goto out; |
600 | } | 656 | } |
601 | 657 | ||
602 | timeout = PHY_CFG_TIMEOUT * 2; | 658 | timeout = SW_FLAG_TIMEOUT; |
603 | 659 | ||
604 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | 660 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; |
605 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 661 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
@@ -623,7 +679,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
623 | 679 | ||
624 | out: | 680 | out: |
625 | if (ret_val) | 681 | if (ret_val) |
626 | mutex_unlock(&nvm_mutex); | 682 | mutex_unlock(&swflag_mutex); |
627 | 683 | ||
628 | return ret_val; | 684 | return ret_val; |
629 | } | 685 | } |
@@ -632,9 +688,8 @@ out: | |||
632 | * e1000_release_swflag_ich8lan - Release software control flag | 688 | * e1000_release_swflag_ich8lan - Release software control flag |
633 | * @hw: pointer to the HW structure | 689 | * @hw: pointer to the HW structure |
634 | * | 690 | * |
635 | * Releases the software control flag for performing NVM and PHY operations. | 691 | * Releases the software control flag for performing PHY and select |
636 | * This is a function pointer entry point only called by read/write | 692 | * MAC CSR accesses. |
637 | * routines for the PHY and NVM parts. | ||
638 | **/ | 693 | **/ |
639 | static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | 694 | static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) |
640 | { | 695 | { |
@@ -644,7 +699,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | |||
644 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 699 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
645 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 700 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
646 | 701 | ||
647 | mutex_unlock(&nvm_mutex); | 702 | mutex_unlock(&swflag_mutex); |
703 | |||
704 | return; | ||
648 | } | 705 | } |
649 | 706 | ||
650 | /** | 707 | /** |
@@ -752,6 +809,327 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) | |||
752 | } | 809 | } |
753 | 810 | ||
754 | /** | 811 | /** |
812 | * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration | ||
813 | * @hw: pointer to the HW structure | ||
814 | * | ||
815 | * SW should configure the LCD from the NVM extended configuration region | ||
816 | * as a workaround for certain parts. | ||
817 | **/ | ||
818 | static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) | ||
819 | { | ||
820 | struct e1000_phy_info *phy = &hw->phy; | ||
821 | u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; | ||
822 | s32 ret_val; | ||
823 | u16 word_addr, reg_data, reg_addr, phy_page = 0; | ||
824 | |||
825 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
826 | if (ret_val) | ||
827 | return ret_val; | ||
828 | |||
829 | /* | ||
830 | * Initialize the PHY from the NVM on ICH platforms. This | ||
831 | * is needed due to an issue where the NVM configuration is | ||
832 | * not properly autoloaded after power transitions. | ||
833 | * Therefore, after each PHY reset, we will load the | ||
834 | * configuration data out of the NVM manually. | ||
835 | */ | ||
836 | if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) || | ||
837 | (hw->mac.type == e1000_pchlan)) { | ||
838 | struct e1000_adapter *adapter = hw->adapter; | ||
839 | |||
840 | /* Check if SW needs to configure the PHY */ | ||
841 | if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || | ||
842 | (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) || | ||
843 | (hw->mac.type == e1000_pchlan)) | ||
844 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; | ||
845 | else | ||
846 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; | ||
847 | |||
848 | data = er32(FEXTNVM); | ||
849 | if (!(data & sw_cfg_mask)) | ||
850 | goto out; | ||
851 | |||
852 | /* Wait for basic configuration completes before proceeding */ | ||
853 | e1000_lan_init_done_ich8lan(hw); | ||
854 | |||
855 | /* | ||
856 | * Make sure HW does not configure LCD from PHY | ||
857 | * extended configuration before SW configuration | ||
858 | */ | ||
859 | data = er32(EXTCNF_CTRL); | ||
860 | if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) | ||
861 | goto out; | ||
862 | |||
863 | cnf_size = er32(EXTCNF_SIZE); | ||
864 | cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; | ||
865 | cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; | ||
866 | if (!cnf_size) | ||
867 | goto out; | ||
868 | |||
869 | cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; | ||
870 | cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; | ||
871 | |||
872 | if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && | ||
873 | (hw->mac.type == e1000_pchlan)) { | ||
874 | /* | ||
875 | * HW configures the SMBus address and LEDs when the | ||
876 | * OEM and LCD Write Enable bits are set in the NVM. | ||
877 | * When both NVM bits are cleared, SW will configure | ||
878 | * them instead. | ||
879 | */ | ||
880 | data = er32(STRAP); | ||
881 | data &= E1000_STRAP_SMBUS_ADDRESS_MASK; | ||
882 | reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; | ||
883 | reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; | ||
884 | ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, | ||
885 | reg_data); | ||
886 | if (ret_val) | ||
887 | goto out; | ||
888 | |||
889 | data = er32(LEDCTL); | ||
890 | ret_val = e1000_write_phy_reg_hv_locked(hw, | ||
891 | HV_LED_CONFIG, | ||
892 | (u16)data); | ||
893 | if (ret_val) | ||
894 | goto out; | ||
895 | } | ||
896 | /* Configure LCD from extended configuration region. */ | ||
897 | |||
898 | /* cnf_base_addr is in DWORD */ | ||
899 | word_addr = (u16)(cnf_base_addr << 1); | ||
900 | |||
901 | for (i = 0; i < cnf_size; i++) { | ||
902 | ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, | ||
903 | ®_data); | ||
904 | if (ret_val) | ||
905 | goto out; | ||
906 | |||
907 | ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), | ||
908 | 1, ®_addr); | ||
909 | if (ret_val) | ||
910 | goto out; | ||
911 | |||
912 | /* Save off the PHY page for future writes. */ | ||
913 | if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { | ||
914 | phy_page = reg_data; | ||
915 | continue; | ||
916 | } | ||
917 | |||
918 | reg_addr &= PHY_REG_MASK; | ||
919 | reg_addr |= phy_page; | ||
920 | |||
921 | ret_val = phy->ops.write_phy_reg_locked(hw, | ||
922 | (u32)reg_addr, | ||
923 | reg_data); | ||
924 | if (ret_val) | ||
925 | goto out; | ||
926 | } | ||
927 | } | ||
928 | |||
929 | out: | ||
930 | hw->phy.ops.release_phy(hw); | ||
931 | return ret_val; | ||
932 | } | ||
933 | |||
934 | /** | ||
935 | * e1000_k1_gig_workaround_hv - K1 Si workaround | ||
936 | * @hw: pointer to the HW structure | ||
937 | * @link: link up bool flag | ||
938 | * | ||
939 | * If K1 is enabled for 1Gbps, the MAC might stall when transitioning | ||
940 | * from a lower speed. This workaround disables K1 whenever link is at 1Gig | ||
941 | * If link is down, the function will restore the default K1 setting located | ||
942 | * in the NVM. | ||
943 | **/ | ||
944 | static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) | ||
945 | { | ||
946 | s32 ret_val = 0; | ||
947 | u16 status_reg = 0; | ||
948 | bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; | ||
949 | |||
950 | if (hw->mac.type != e1000_pchlan) | ||
951 | goto out; | ||
952 | |||
953 | /* Wrap the whole flow with the sw flag */ | ||
954 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
955 | if (ret_val) | ||
956 | goto out; | ||
957 | |||
958 | /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ | ||
959 | if (link) { | ||
960 | if (hw->phy.type == e1000_phy_82578) { | ||
961 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | ||
962 | BM_CS_STATUS, | ||
963 | &status_reg); | ||
964 | if (ret_val) | ||
965 | goto release; | ||
966 | |||
967 | status_reg &= BM_CS_STATUS_LINK_UP | | ||
968 | BM_CS_STATUS_RESOLVED | | ||
969 | BM_CS_STATUS_SPEED_MASK; | ||
970 | |||
971 | if (status_reg == (BM_CS_STATUS_LINK_UP | | ||
972 | BM_CS_STATUS_RESOLVED | | ||
973 | BM_CS_STATUS_SPEED_1000)) | ||
974 | k1_enable = false; | ||
975 | } | ||
976 | |||
977 | if (hw->phy.type == e1000_phy_82577) { | ||
978 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, | ||
979 | HV_M_STATUS, | ||
980 | &status_reg); | ||
981 | if (ret_val) | ||
982 | goto release; | ||
983 | |||
984 | status_reg &= HV_M_STATUS_LINK_UP | | ||
985 | HV_M_STATUS_AUTONEG_COMPLETE | | ||
986 | HV_M_STATUS_SPEED_MASK; | ||
987 | |||
988 | if (status_reg == (HV_M_STATUS_LINK_UP | | ||
989 | HV_M_STATUS_AUTONEG_COMPLETE | | ||
990 | HV_M_STATUS_SPEED_1000)) | ||
991 | k1_enable = false; | ||
992 | } | ||
993 | |||
994 | /* Link stall fix for link up */ | ||
995 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | ||
996 | 0x0100); | ||
997 | if (ret_val) | ||
998 | goto release; | ||
999 | |||
1000 | } else { | ||
1001 | /* Link stall fix for link down */ | ||
1002 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), | ||
1003 | 0x4100); | ||
1004 | if (ret_val) | ||
1005 | goto release; | ||
1006 | } | ||
1007 | |||
1008 | ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); | ||
1009 | |||
1010 | release: | ||
1011 | hw->phy.ops.release_phy(hw); | ||
1012 | out: | ||
1013 | return ret_val; | ||
1014 | } | ||
1015 | |||
1016 | /** | ||
1017 | * e1000_configure_k1_ich8lan - Configure K1 power state | ||
1018 | * @hw: pointer to the HW structure | ||
1019 | * @enable: K1 state to configure | ||
1020 | * | ||
1021 | * Configure the K1 power state based on the provided parameter. | ||
1022 | * Assumes semaphore already acquired. | ||
1023 | * | ||
1024 | * Success returns 0, Failure returns -E1000_ERR_PHY (-2) | ||
1025 | **/ | ||
1026 | static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) | ||
1027 | { | ||
1028 | s32 ret_val = 0; | ||
1029 | u32 ctrl_reg = 0; | ||
1030 | u32 ctrl_ext = 0; | ||
1031 | u32 reg = 0; | ||
1032 | u16 kmrn_reg = 0; | ||
1033 | |||
1034 | ret_val = e1000e_read_kmrn_reg_locked(hw, | ||
1035 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
1036 | &kmrn_reg); | ||
1037 | if (ret_val) | ||
1038 | goto out; | ||
1039 | |||
1040 | if (k1_enable) | ||
1041 | kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; | ||
1042 | else | ||
1043 | kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; | ||
1044 | |||
1045 | ret_val = e1000e_write_kmrn_reg_locked(hw, | ||
1046 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
1047 | kmrn_reg); | ||
1048 | if (ret_val) | ||
1049 | goto out; | ||
1050 | |||
1051 | udelay(20); | ||
1052 | ctrl_ext = er32(CTRL_EXT); | ||
1053 | ctrl_reg = er32(CTRL); | ||
1054 | |||
1055 | reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); | ||
1056 | reg |= E1000_CTRL_FRCSPD; | ||
1057 | ew32(CTRL, reg); | ||
1058 | |||
1059 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); | ||
1060 | udelay(20); | ||
1061 | ew32(CTRL, ctrl_reg); | ||
1062 | ew32(CTRL_EXT, ctrl_ext); | ||
1063 | udelay(20); | ||
1064 | |||
1065 | out: | ||
1066 | return ret_val; | ||
1067 | } | ||
1068 | |||
1069 | /** | ||
1070 | * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration | ||
1071 | * @hw: pointer to the HW structure | ||
1072 | * @d0_state: boolean if entering d0 or d3 device state | ||
1073 | * | ||
1074 | * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are | ||
1075 | * collectively called OEM bits. The OEM Write Enable bit and SW Config bit | ||
1076 | * in NVM determines whether HW should configure LPLU and Gbe Disable. | ||
1077 | **/ | ||
1078 | static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) | ||
1079 | { | ||
1080 | s32 ret_val = 0; | ||
1081 | u32 mac_reg; | ||
1082 | u16 oem_reg; | ||
1083 | |||
1084 | if (hw->mac.type != e1000_pchlan) | ||
1085 | return ret_val; | ||
1086 | |||
1087 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
1088 | if (ret_val) | ||
1089 | return ret_val; | ||
1090 | |||
1091 | mac_reg = er32(EXTCNF_CTRL); | ||
1092 | if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) | ||
1093 | goto out; | ||
1094 | |||
1095 | mac_reg = er32(FEXTNVM); | ||
1096 | if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) | ||
1097 | goto out; | ||
1098 | |||
1099 | mac_reg = er32(PHY_CTRL); | ||
1100 | |||
1101 | ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg); | ||
1102 | if (ret_val) | ||
1103 | goto out; | ||
1104 | |||
1105 | oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); | ||
1106 | |||
1107 | if (d0_state) { | ||
1108 | if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) | ||
1109 | oem_reg |= HV_OEM_BITS_GBE_DIS; | ||
1110 | |||
1111 | if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) | ||
1112 | oem_reg |= HV_OEM_BITS_LPLU; | ||
1113 | } else { | ||
1114 | if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) | ||
1115 | oem_reg |= HV_OEM_BITS_GBE_DIS; | ||
1116 | |||
1117 | if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) | ||
1118 | oem_reg |= HV_OEM_BITS_LPLU; | ||
1119 | } | ||
1120 | /* Restart auto-neg to activate the bits */ | ||
1121 | if (!e1000_check_reset_block(hw)) | ||
1122 | oem_reg |= HV_OEM_BITS_RESTART_AN; | ||
1123 | ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); | ||
1124 | |||
1125 | out: | ||
1126 | hw->phy.ops.release_phy(hw); | ||
1127 | |||
1128 | return ret_val; | ||
1129 | } | ||
1130 | |||
1131 | |||
1132 | /** | ||
755 | * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be | 1133 | * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be |
756 | * done after every PHY reset. | 1134 | * done after every PHY reset. |
757 | **/ | 1135 | **/ |
@@ -791,10 +1169,20 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) | |||
791 | ret_val = hw->phy.ops.acquire_phy(hw); | 1169 | ret_val = hw->phy.ops.acquire_phy(hw); |
792 | if (ret_val) | 1170 | if (ret_val) |
793 | return ret_val; | 1171 | return ret_val; |
1172 | |||
794 | hw->phy.addr = 1; | 1173 | hw->phy.addr = 1; |
795 | e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); | 1174 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); |
1175 | if (ret_val) | ||
1176 | goto out; | ||
796 | hw->phy.ops.release_phy(hw); | 1177 | hw->phy.ops.release_phy(hw); |
797 | 1178 | ||
1179 | /* | ||
1180 | * Configure the K1 Si workaround during phy reset assuming there is | ||
1181 | * link so that it disables K1 if link is in 1Gbps. | ||
1182 | */ | ||
1183 | ret_val = e1000_k1_gig_workaround_hv(hw, true); | ||
1184 | |||
1185 | out: | ||
798 | return ret_val; | 1186 | return ret_val; |
799 | } | 1187 | } |
800 | 1188 | ||
@@ -840,11 +1228,8 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) | |||
840 | **/ | 1228 | **/ |
841 | static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | 1229 | static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) |
842 | { | 1230 | { |
843 | struct e1000_phy_info *phy = &hw->phy; | 1231 | s32 ret_val = 0; |
844 | u32 i; | 1232 | u16 reg; |
845 | u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; | ||
846 | s32 ret_val; | ||
847 | u16 word_addr, reg_data, reg_addr, phy_page = 0; | ||
848 | 1233 | ||
849 | ret_val = e1000e_phy_hw_reset_generic(hw); | 1234 | ret_val = e1000e_phy_hw_reset_generic(hw); |
850 | if (ret_val) | 1235 | if (ret_val) |
@@ -859,81 +1244,20 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) | |||
859 | return ret_val; | 1244 | return ret_val; |
860 | } | 1245 | } |
861 | 1246 | ||
862 | /* | 1247 | /* Dummy read to clear the phy wakeup bit after lcd reset */ |
863 | * Initialize the PHY from the NVM on ICH platforms. This | 1248 | if (hw->mac.type == e1000_pchlan) |
864 | * is needed due to an issue where the NVM configuration is | 1249 | e1e_rphy(hw, BM_WUC, ®); |
865 | * not properly autoloaded after power transitions. | ||
866 | * Therefore, after each PHY reset, we will load the | ||
867 | * configuration data out of the NVM manually. | ||
868 | */ | ||
869 | if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) { | ||
870 | struct e1000_adapter *adapter = hw->adapter; | ||
871 | |||
872 | /* Check if SW needs configure the PHY */ | ||
873 | if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || | ||
874 | (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M)) | ||
875 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; | ||
876 | else | ||
877 | sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; | ||
878 | |||
879 | data = er32(FEXTNVM); | ||
880 | if (!(data & sw_cfg_mask)) | ||
881 | return 0; | ||
882 | |||
883 | /* Wait for basic configuration completes before proceeding */ | ||
884 | e1000_lan_init_done_ich8lan(hw); | ||
885 | |||
886 | /* | ||
887 | * Make sure HW does not configure LCD from PHY | ||
888 | * extended configuration before SW configuration | ||
889 | */ | ||
890 | data = er32(EXTCNF_CTRL); | ||
891 | if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) | ||
892 | return 0; | ||
893 | |||
894 | cnf_size = er32(EXTCNF_SIZE); | ||
895 | cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; | ||
896 | cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; | ||
897 | if (!cnf_size) | ||
898 | return 0; | ||
899 | |||
900 | cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; | ||
901 | cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; | ||
902 | |||
903 | /* Configure LCD from extended configuration region. */ | ||
904 | |||
905 | /* cnf_base_addr is in DWORD */ | ||
906 | word_addr = (u16)(cnf_base_addr << 1); | ||
907 | |||
908 | for (i = 0; i < cnf_size; i++) { | ||
909 | ret_val = e1000_read_nvm(hw, | ||
910 | (word_addr + i * 2), | ||
911 | 1, | ||
912 | ®_data); | ||
913 | if (ret_val) | ||
914 | return ret_val; | ||
915 | |||
916 | ret_val = e1000_read_nvm(hw, | ||
917 | (word_addr + i * 2 + 1), | ||
918 | 1, | ||
919 | ®_addr); | ||
920 | if (ret_val) | ||
921 | return ret_val; | ||
922 | |||
923 | /* Save off the PHY page for future writes. */ | ||
924 | if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { | ||
925 | phy_page = reg_data; | ||
926 | continue; | ||
927 | } | ||
928 | 1250 | ||
929 | reg_addr |= phy_page; | 1251 | /* Configure the LCD with the extended configuration region in NVM */ |
1252 | ret_val = e1000_sw_lcd_config_ich8lan(hw); | ||
1253 | if (ret_val) | ||
1254 | goto out; | ||
930 | 1255 | ||
931 | ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data); | 1256 | /* Configure the LCD with the OEM bits in NVM */ |
932 | if (ret_val) | 1257 | if (hw->mac.type == e1000_pchlan) |
933 | return ret_val; | 1258 | ret_val = e1000_oem_bits_config_ich8lan(hw, true); |
934 | } | ||
935 | } | ||
936 | 1259 | ||
1260 | out: | ||
937 | return 0; | 1261 | return 0; |
938 | } | 1262 | } |
939 | 1263 | ||
@@ -1054,6 +1378,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) | |||
1054 | } | 1378 | } |
1055 | 1379 | ||
1056 | /** | 1380 | /** |
1381 | * e1000_set_lplu_state_pchlan - Set Low Power Link Up state | ||
1382 | * @hw: pointer to the HW structure | ||
1383 | * @active: true to enable LPLU, false to disable | ||
1384 | * | ||
1385 | * Sets the LPLU state according to the active flag. For PCH, if OEM write | ||
1386 | * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set | ||
1387 | * the phy speed. This function will manually set the LPLU bit and restart | ||
1388 | * auto-neg as hw would do. D3 and D0 LPLU will call the same function | ||
1389 | * since it configures the same bit. | ||
1390 | **/ | ||
1391 | static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) | ||
1392 | { | ||
1393 | s32 ret_val = 0; | ||
1394 | u16 oem_reg; | ||
1395 | |||
1396 | ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); | ||
1397 | if (ret_val) | ||
1398 | goto out; | ||
1399 | |||
1400 | if (active) | ||
1401 | oem_reg |= HV_OEM_BITS_LPLU; | ||
1402 | else | ||
1403 | oem_reg &= ~HV_OEM_BITS_LPLU; | ||
1404 | |||
1405 | oem_reg |= HV_OEM_BITS_RESTART_AN; | ||
1406 | ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); | ||
1407 | |||
1408 | out: | ||
1409 | return ret_val; | ||
1410 | } | ||
1411 | |||
1412 | /** | ||
1057 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state | 1413 | * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state |
1058 | * @hw: pointer to the HW structure | 1414 | * @hw: pointer to the HW structure |
1059 | * @active: TRUE to enable LPLU, FALSE to disable | 1415 | * @active: TRUE to enable LPLU, FALSE to disable |
@@ -1314,12 +1670,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1314 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || | 1670 | if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || |
1315 | (words == 0)) { | 1671 | (words == 0)) { |
1316 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); | 1672 | hw_dbg(hw, "nvm parameter(s) out of bounds\n"); |
1317 | return -E1000_ERR_NVM; | 1673 | ret_val = -E1000_ERR_NVM; |
1674 | goto out; | ||
1318 | } | 1675 | } |
1319 | 1676 | ||
1320 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 1677 | nvm->ops.acquire_nvm(hw); |
1321 | if (ret_val) | ||
1322 | goto out; | ||
1323 | 1678 | ||
1324 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); | 1679 | ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); |
1325 | if (ret_val) { | 1680 | if (ret_val) { |
@@ -1345,7 +1700,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1345 | } | 1700 | } |
1346 | } | 1701 | } |
1347 | 1702 | ||
1348 | e1000_release_swflag_ich8lan(hw); | 1703 | nvm->ops.release_nvm(hw); |
1349 | 1704 | ||
1350 | out: | 1705 | out: |
1351 | if (ret_val) | 1706 | if (ret_val) |
@@ -1603,11 +1958,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1603 | return -E1000_ERR_NVM; | 1958 | return -E1000_ERR_NVM; |
1604 | } | 1959 | } |
1605 | 1960 | ||
1961 | nvm->ops.acquire_nvm(hw); | ||
1962 | |||
1606 | for (i = 0; i < words; i++) { | 1963 | for (i = 0; i < words; i++) { |
1607 | dev_spec->shadow_ram[offset+i].modified = 1; | 1964 | dev_spec->shadow_ram[offset+i].modified = 1; |
1608 | dev_spec->shadow_ram[offset+i].value = data[i]; | 1965 | dev_spec->shadow_ram[offset+i].value = data[i]; |
1609 | } | 1966 | } |
1610 | 1967 | ||
1968 | nvm->ops.release_nvm(hw); | ||
1969 | |||
1611 | return 0; | 1970 | return 0; |
1612 | } | 1971 | } |
1613 | 1972 | ||
@@ -1637,9 +1996,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1637 | if (nvm->type != e1000_nvm_flash_sw) | 1996 | if (nvm->type != e1000_nvm_flash_sw) |
1638 | goto out; | 1997 | goto out; |
1639 | 1998 | ||
1640 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 1999 | nvm->ops.acquire_nvm(hw); |
1641 | if (ret_val) | ||
1642 | goto out; | ||
1643 | 2000 | ||
1644 | /* | 2001 | /* |
1645 | * We're writing to the opposite bank so if we're on bank 1, | 2002 | * We're writing to the opposite bank so if we're on bank 1, |
@@ -1657,7 +2014,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1657 | old_bank_offset = 0; | 2014 | old_bank_offset = 0; |
1658 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); | 2015 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); |
1659 | if (ret_val) { | 2016 | if (ret_val) { |
1660 | e1000_release_swflag_ich8lan(hw); | 2017 | nvm->ops.release_nvm(hw); |
1661 | goto out; | 2018 | goto out; |
1662 | } | 2019 | } |
1663 | } else { | 2020 | } else { |
@@ -1665,7 +2022,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1665 | new_bank_offset = 0; | 2022 | new_bank_offset = 0; |
1666 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); | 2023 | ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); |
1667 | if (ret_val) { | 2024 | if (ret_val) { |
1668 | e1000_release_swflag_ich8lan(hw); | 2025 | nvm->ops.release_nvm(hw); |
1669 | goto out; | 2026 | goto out; |
1670 | } | 2027 | } |
1671 | } | 2028 | } |
@@ -1723,7 +2080,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1723 | if (ret_val) { | 2080 | if (ret_val) { |
1724 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | 2081 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ |
1725 | hw_dbg(hw, "Flash commit failed.\n"); | 2082 | hw_dbg(hw, "Flash commit failed.\n"); |
1726 | e1000_release_swflag_ich8lan(hw); | 2083 | nvm->ops.release_nvm(hw); |
1727 | goto out; | 2084 | goto out; |
1728 | } | 2085 | } |
1729 | 2086 | ||
@@ -1736,7 +2093,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1736 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; | 2093 | act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; |
1737 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); | 2094 | ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); |
1738 | if (ret_val) { | 2095 | if (ret_val) { |
1739 | e1000_release_swflag_ich8lan(hw); | 2096 | nvm->ops.release_nvm(hw); |
1740 | goto out; | 2097 | goto out; |
1741 | } | 2098 | } |
1742 | data &= 0xBFFF; | 2099 | data &= 0xBFFF; |
@@ -1744,7 +2101,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1744 | act_offset * 2 + 1, | 2101 | act_offset * 2 + 1, |
1745 | (u8)(data >> 8)); | 2102 | (u8)(data >> 8)); |
1746 | if (ret_val) { | 2103 | if (ret_val) { |
1747 | e1000_release_swflag_ich8lan(hw); | 2104 | nvm->ops.release_nvm(hw); |
1748 | goto out; | 2105 | goto out; |
1749 | } | 2106 | } |
1750 | 2107 | ||
@@ -1757,7 +2114,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1757 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; | 2114 | act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; |
1758 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); | 2115 | ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); |
1759 | if (ret_val) { | 2116 | if (ret_val) { |
1760 | e1000_release_swflag_ich8lan(hw); | 2117 | nvm->ops.release_nvm(hw); |
1761 | goto out; | 2118 | goto out; |
1762 | } | 2119 | } |
1763 | 2120 | ||
@@ -1767,7 +2124,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1767 | dev_spec->shadow_ram[i].value = 0xFFFF; | 2124 | dev_spec->shadow_ram[i].value = 0xFFFF; |
1768 | } | 2125 | } |
1769 | 2126 | ||
1770 | e1000_release_swflag_ich8lan(hw); | 2127 | nvm->ops.release_nvm(hw); |
1771 | 2128 | ||
1772 | /* | 2129 | /* |
1773 | * Reload the EEPROM, or else modifications will not appear | 2130 | * Reload the EEPROM, or else modifications will not appear |
@@ -1831,14 +2188,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1831 | **/ | 2188 | **/ |
1832 | void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | 2189 | void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) |
1833 | { | 2190 | { |
2191 | struct e1000_nvm_info *nvm = &hw->nvm; | ||
1834 | union ich8_flash_protected_range pr0; | 2192 | union ich8_flash_protected_range pr0; |
1835 | union ich8_hws_flash_status hsfsts; | 2193 | union ich8_hws_flash_status hsfsts; |
1836 | u32 gfpreg; | 2194 | u32 gfpreg; |
1837 | s32 ret_val; | ||
1838 | 2195 | ||
1839 | ret_val = e1000_acquire_swflag_ich8lan(hw); | 2196 | nvm->ops.acquire_nvm(hw); |
1840 | if (ret_val) | ||
1841 | return; | ||
1842 | 2197 | ||
1843 | gfpreg = er32flash(ICH_FLASH_GFPREG); | 2198 | gfpreg = er32flash(ICH_FLASH_GFPREG); |
1844 | 2199 | ||
@@ -1859,7 +2214,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | |||
1859 | hsfsts.hsf_status.flockdn = true; | 2214 | hsfsts.hsf_status.flockdn = true; |
1860 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); | 2215 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); |
1861 | 2216 | ||
1862 | e1000_release_swflag_ich8lan(hw); | 2217 | nvm->ops.release_nvm(hw); |
1863 | } | 2218 | } |
1864 | 2219 | ||
1865 | /** | 2220 | /** |
@@ -2229,6 +2584,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) | |||
2229 | **/ | 2584 | **/ |
2230 | static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | 2585 | static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) |
2231 | { | 2586 | { |
2587 | struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; | ||
2588 | u16 reg; | ||
2232 | u32 ctrl, icr, kab; | 2589 | u32 ctrl, icr, kab; |
2233 | s32 ret_val; | 2590 | s32 ret_val; |
2234 | 2591 | ||
@@ -2263,6 +2620,18 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2263 | ew32(PBS, E1000_PBS_16K); | 2620 | ew32(PBS, E1000_PBS_16K); |
2264 | } | 2621 | } |
2265 | 2622 | ||
2623 | if (hw->mac.type == e1000_pchlan) { | ||
2624 | /* Save the NVM K1 bit setting*/ | ||
2625 | ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); | ||
2626 | if (ret_val) | ||
2627 | return ret_val; | ||
2628 | |||
2629 | if (reg & E1000_NVM_K1_ENABLE) | ||
2630 | dev_spec->nvm_k1_enabled = true; | ||
2631 | else | ||
2632 | dev_spec->nvm_k1_enabled = false; | ||
2633 | } | ||
2634 | |||
2266 | ctrl = er32(CTRL); | 2635 | ctrl = er32(CTRL); |
2267 | 2636 | ||
2268 | if (!e1000_check_reset_block(hw)) { | 2637 | if (!e1000_check_reset_block(hw)) { |
@@ -2304,7 +2673,19 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2304 | hw_dbg(hw, "Auto Read Done did not complete\n"); | 2673 | hw_dbg(hw, "Auto Read Done did not complete\n"); |
2305 | } | 2674 | } |
2306 | } | 2675 | } |
2676 | /* Dummy read to clear the phy wakeup bit after lcd reset */ | ||
2677 | if (hw->mac.type == e1000_pchlan) | ||
2678 | e1e_rphy(hw, BM_WUC, ®); | ||
2307 | 2679 | ||
2680 | ret_val = e1000_sw_lcd_config_ich8lan(hw); | ||
2681 | if (ret_val) | ||
2682 | goto out; | ||
2683 | |||
2684 | if (hw->mac.type == e1000_pchlan) { | ||
2685 | ret_val = e1000_oem_bits_config_ich8lan(hw, true); | ||
2686 | if (ret_val) | ||
2687 | goto out; | ||
2688 | } | ||
2308 | /* | 2689 | /* |
2309 | * For PCH, this write will make sure that any noise | 2690 | * For PCH, this write will make sure that any noise |
2310 | * will be detected as a CRC error and be dropped rather than show up | 2691 | * will be detected as a CRC error and be dropped rather than show up |
@@ -2323,6 +2704,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
2323 | if (hw->mac.type == e1000_pchlan) | 2704 | if (hw->mac.type == e1000_pchlan) |
2324 | ret_val = e1000_hv_phy_workarounds_ich8lan(hw); | 2705 | ret_val = e1000_hv_phy_workarounds_ich8lan(hw); |
2325 | 2706 | ||
2707 | out: | ||
2326 | return ret_val; | 2708 | return ret_val; |
2327 | } | 2709 | } |
2328 | 2710 | ||
@@ -2627,14 +3009,6 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, | |||
2627 | if (ret_val) | 3009 | if (ret_val) |
2628 | return ret_val; | 3010 | return ret_val; |
2629 | 3011 | ||
2630 | if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) { | ||
2631 | ret_val = e1000e_write_kmrn_reg(hw, | ||
2632 | E1000_KMRNCTRLSTA_K1_CONFIG, | ||
2633 | E1000_KMRNCTRLSTA_K1_DISABLE); | ||
2634 | if (ret_val) | ||
2635 | return ret_val; | ||
2636 | } | ||
2637 | |||
2638 | if ((hw->mac.type == e1000_ich8lan) && | 3012 | if ((hw->mac.type == e1000_ich8lan) && |
2639 | (hw->phy.type == e1000_phy_igp_3) && | 3013 | (hw->phy.type == e1000_phy_igp_3) && |
2640 | (*speed == SPEED_1000)) { | 3014 | (*speed == SPEED_1000)) { |
@@ -2843,9 +3217,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) | |||
2843 | E1000_PHY_CTRL_GBE_DISABLE; | 3217 | E1000_PHY_CTRL_GBE_DISABLE; |
2844 | ew32(PHY_CTRL, phy_ctrl); | 3218 | ew32(PHY_CTRL, phy_ctrl); |
2845 | 3219 | ||
2846 | /* Workaround SWFLAG unexpectedly set during S0->Sx */ | ||
2847 | if (hw->mac.type == e1000_pchlan) | 3220 | if (hw->mac.type == e1000_pchlan) |
2848 | udelay(500); | 3221 | e1000_phy_hw_reset_ich8lan(hw); |
2849 | default: | 3222 | default: |
2850 | break; | 3223 | break; |
2851 | } | 3224 | } |
@@ -3113,9 +3486,9 @@ static struct e1000_phy_operations ich8_phy_ops = { | |||
3113 | }; | 3486 | }; |
3114 | 3487 | ||
3115 | static struct e1000_nvm_operations ich8_nvm_ops = { | 3488 | static struct e1000_nvm_operations ich8_nvm_ops = { |
3116 | .acquire_nvm = e1000_acquire_swflag_ich8lan, | 3489 | .acquire_nvm = e1000_acquire_nvm_ich8lan, |
3117 | .read_nvm = e1000_read_nvm_ich8lan, | 3490 | .read_nvm = e1000_read_nvm_ich8lan, |
3118 | .release_nvm = e1000_release_swflag_ich8lan, | 3491 | .release_nvm = e1000_release_nvm_ich8lan, |
3119 | .update_nvm = e1000_update_nvm_checksum_ich8lan, | 3492 | .update_nvm = e1000_update_nvm_checksum_ich8lan, |
3120 | .valid_led_default = e1000_valid_led_default_ich8lan, | 3493 | .valid_led_default = e1000_valid_led_default_ich8lan, |
3121 | .validate_nvm = e1000_validate_nvm_checksum_ich8lan, | 3494 | .validate_nvm = e1000_validate_nvm_checksum_ich8lan, |
@@ -3186,6 +3559,7 @@ struct e1000_info e1000_pch_info = { | |||
3186 | | FLAG_HAS_AMT | 3559 | | FLAG_HAS_AMT |
3187 | | FLAG_HAS_FLASH | 3560 | | FLAG_HAS_FLASH |
3188 | | FLAG_HAS_JUMBO_FRAMES | 3561 | | FLAG_HAS_JUMBO_FRAMES |
3562 | | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ | ||
3189 | | FLAG_APME_IN_WUC, | 3563 | | FLAG_APME_IN_WUC, |
3190 | .pba = 26, | 3564 | .pba = 26, |
3191 | .max_hw_frame_size = 4096, | 3565 | .max_hw_frame_size = 4096, |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 0687c6aa4e46..fad8f9ea0043 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -2769,25 +2769,38 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2769 | /* | 2769 | /* |
2770 | * flow control settings | 2770 | * flow control settings |
2771 | * | 2771 | * |
2772 | * The high water mark must be low enough to fit two full frame | 2772 | * The high water mark must be low enough to fit one full frame |
2773 | * (or the size used for early receive) above it in the Rx FIFO. | 2773 | * (or the size used for early receive) above it in the Rx FIFO. |
2774 | * Set it to the lower of: | 2774 | * Set it to the lower of: |
2775 | * - 90% of the Rx FIFO size, and | 2775 | * - 90% of the Rx FIFO size, and |
2776 | * - the full Rx FIFO size minus the early receive size (for parts | 2776 | * - the full Rx FIFO size minus the early receive size (for parts |
2777 | * with ERT support assuming ERT set to E1000_ERT_2048), or | 2777 | * with ERT support assuming ERT set to E1000_ERT_2048), or |
2778 | * - the full Rx FIFO size minus two full frames | 2778 | * - the full Rx FIFO size minus one full frame |
2779 | */ | 2779 | */ |
2780 | if ((adapter->flags & FLAG_HAS_ERT) && | 2780 | if (hw->mac.type == e1000_pchlan) { |
2781 | (adapter->netdev->mtu > ETH_DATA_LEN)) | 2781 | /* |
2782 | hwm = min(((pba << 10) * 9 / 10), | 2782 | * Workaround PCH LOM adapter hangs with certain network |
2783 | ((pba << 10) - (E1000_ERT_2048 << 3))); | 2783 | * loads. If hangs persist, try disabling Tx flow control. |
2784 | else | 2784 | */ |
2785 | hwm = min(((pba << 10) * 9 / 10), | 2785 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
2786 | ((pba << 10) - (2 * adapter->max_frame_size))); | 2786 | fc->high_water = 0x3500; |
2787 | fc->low_water = 0x1500; | ||
2788 | } else { | ||
2789 | fc->high_water = 0x5000; | ||
2790 | fc->low_water = 0x3000; | ||
2791 | } | ||
2792 | } else { | ||
2793 | if ((adapter->flags & FLAG_HAS_ERT) && | ||
2794 | (adapter->netdev->mtu > ETH_DATA_LEN)) | ||
2795 | hwm = min(((pba << 10) * 9 / 10), | ||
2796 | ((pba << 10) - (E1000_ERT_2048 << 3))); | ||
2797 | else | ||
2798 | hwm = min(((pba << 10) * 9 / 10), | ||
2799 | ((pba << 10) - adapter->max_frame_size)); | ||
2787 | 2800 | ||
2788 | fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ | 2801 | fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ |
2789 | fc->low_water = (fc->high_water - (2 * adapter->max_frame_size)); | 2802 | fc->low_water = fc->high_water - 8; |
2790 | fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */ | 2803 | } |
2791 | 2804 | ||
2792 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) | 2805 | if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) |
2793 | fc->pause_time = 0xFFFF; | 2806 | fc->pause_time = 0xFFFF; |
@@ -2813,6 +2826,10 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2813 | if (mac->ops.init_hw(hw)) | 2826 | if (mac->ops.init_hw(hw)) |
2814 | e_err("Hardware Error\n"); | 2827 | e_err("Hardware Error\n"); |
2815 | 2828 | ||
2829 | /* additional part of the flow-control workaround above */ | ||
2830 | if (hw->mac.type == e1000_pchlan) | ||
2831 | ew32(FCRTV_PCH, 0x1000); | ||
2832 | |||
2816 | e1000_update_mng_vlan(adapter); | 2833 | e1000_update_mng_vlan(adapter); |
2817 | 2834 | ||
2818 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | 2835 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
@@ -3610,7 +3627,7 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
3610 | case SPEED_100: | 3627 | case SPEED_100: |
3611 | txb2b = 0; | 3628 | txb2b = 0; |
3612 | netdev->tx_queue_len = 100; | 3629 | netdev->tx_queue_len = 100; |
3613 | /* maybe add some timeout factor ? */ | 3630 | adapter->tx_timeout_factor = 10; |
3614 | break; | 3631 | break; |
3615 | } | 3632 | } |
3616 | 3633 | ||
@@ -4288,8 +4305,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4288 | 4305 | ||
4289 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) | 4306 | while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) |
4290 | msleep(1); | 4307 | msleep(1); |
4291 | /* e1000e_down has a dependency on max_frame_size */ | 4308 | /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ |
4292 | adapter->max_frame_size = max_frame; | 4309 | adapter->max_frame_size = max_frame; |
4310 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | ||
4311 | netdev->mtu = new_mtu; | ||
4293 | if (netif_running(netdev)) | 4312 | if (netif_running(netdev)) |
4294 | e1000e_down(adapter); | 4313 | e1000e_down(adapter); |
4295 | 4314 | ||
@@ -4319,9 +4338,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
4319 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN | 4338 | adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN |
4320 | + ETH_FCS_LEN; | 4339 | + ETH_FCS_LEN; |
4321 | 4340 | ||
4322 | e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); | ||
4323 | netdev->mtu = new_mtu; | ||
4324 | |||
4325 | if (netif_running(netdev)) | 4341 | if (netif_running(netdev)) |
4326 | e1000e_up(adapter); | 4342 | e1000e_up(adapter); |
4327 | else | 4343 | else |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 994401fd0664..85f955f70417 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -71,7 +71,6 @@ static const u16 e1000_igp_2_cable_length_table[] = | |||
71 | #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) | 71 | #define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) |
72 | #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ | 72 | #define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ |
73 | #define I82577_CTRL_REG 23 | 73 | #define I82577_CTRL_REG 23 |
74 | #define I82577_CTRL_DOWNSHIFT_MASK (7 << 10) | ||
75 | 74 | ||
76 | /* 82577 specific PHY registers */ | 75 | /* 82577 specific PHY registers */ |
77 | #define I82577_PHY_CTRL_2 18 | 76 | #define I82577_PHY_CTRL_2 18 |
@@ -95,13 +94,6 @@ static const u16 e1000_igp_2_cable_length_table[] = | |||
95 | /* BM PHY Copper Specific Control 1 */ | 94 | /* BM PHY Copper Specific Control 1 */ |
96 | #define BM_CS_CTRL1 16 | 95 | #define BM_CS_CTRL1 16 |
97 | 96 | ||
98 | /* BM PHY Copper Specific Status */ | ||
99 | #define BM_CS_STATUS 17 | ||
100 | #define BM_CS_STATUS_LINK_UP 0x0400 | ||
101 | #define BM_CS_STATUS_RESOLVED 0x0800 | ||
102 | #define BM_CS_STATUS_SPEED_MASK 0xC000 | ||
103 | #define BM_CS_STATUS_SPEED_1000 0x8000 | ||
104 | |||
105 | #define HV_MUX_DATA_CTRL PHY_REG(776, 16) | 97 | #define HV_MUX_DATA_CTRL PHY_REG(776, 16) |
106 | #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 | 98 | #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 |
107 | #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 | 99 | #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 |
@@ -164,16 +156,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) | |||
164 | * MDIC mode. No harm in trying again in this case since | 156 | * MDIC mode. No harm in trying again in this case since |
165 | * the PHY ID is unknown at this point anyway | 157 | * the PHY ID is unknown at this point anyway |
166 | */ | 158 | */ |
159 | ret_val = phy->ops.acquire_phy(hw); | ||
160 | if (ret_val) | ||
161 | goto out; | ||
167 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); | 162 | ret_val = e1000_set_mdio_slow_mode_hv(hw, true); |
168 | if (ret_val) | 163 | if (ret_val) |
169 | goto out; | 164 | goto out; |
165 | phy->ops.release_phy(hw); | ||
170 | 166 | ||
171 | retry_count++; | 167 | retry_count++; |
172 | } | 168 | } |
173 | out: | 169 | out: |
174 | /* Revert to MDIO fast mode, if applicable */ | 170 | /* Revert to MDIO fast mode, if applicable */ |
175 | if (retry_count) | 171 | if (retry_count) { |
172 | ret_val = phy->ops.acquire_phy(hw); | ||
173 | if (ret_val) | ||
174 | return ret_val; | ||
176 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | 175 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); |
176 | phy->ops.release_phy(hw); | ||
177 | } | ||
177 | 178 | ||
178 | return ret_val; | 179 | return ret_val; |
179 | } | 180 | } |
@@ -354,94 +355,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) | |||
354 | } | 355 | } |
355 | 356 | ||
356 | /** | 357 | /** |
357 | * e1000e_read_phy_reg_igp - Read igp PHY register | 358 | * __e1000e_read_phy_reg_igp - Read igp PHY register |
358 | * @hw: pointer to the HW structure | 359 | * @hw: pointer to the HW structure |
359 | * @offset: register offset to be read | 360 | * @offset: register offset to be read |
360 | * @data: pointer to the read data | 361 | * @data: pointer to the read data |
362 | * @locked: semaphore has already been acquired or not | ||
361 | * | 363 | * |
362 | * Acquires semaphore, if necessary, then reads the PHY register at offset | 364 | * Acquires semaphore, if necessary, then reads the PHY register at offset |
363 | * and storing the retrieved information in data. Release any acquired | 365 | * and stores the retrieved information in data. Release any acquired |
364 | * semaphores before exiting. | 366 | * semaphores before exiting. |
365 | **/ | 367 | **/ |
366 | s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) | 368 | static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, |
369 | bool locked) | ||
367 | { | 370 | { |
368 | s32 ret_val; | 371 | s32 ret_val = 0; |
369 | 372 | ||
370 | ret_val = hw->phy.ops.acquire_phy(hw); | 373 | if (!locked) { |
371 | if (ret_val) | 374 | if (!(hw->phy.ops.acquire_phy)) |
372 | return ret_val; | 375 | goto out; |
376 | |||
377 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
378 | if (ret_val) | ||
379 | goto out; | ||
380 | } | ||
373 | 381 | ||
374 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 382 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
375 | ret_val = e1000e_write_phy_reg_mdic(hw, | 383 | ret_val = e1000e_write_phy_reg_mdic(hw, |
376 | IGP01E1000_PHY_PAGE_SELECT, | 384 | IGP01E1000_PHY_PAGE_SELECT, |
377 | (u16)offset); | 385 | (u16)offset); |
378 | if (ret_val) { | 386 | if (ret_val) |
379 | hw->phy.ops.release_phy(hw); | 387 | goto release; |
380 | return ret_val; | ||
381 | } | ||
382 | } | 388 | } |
383 | 389 | ||
384 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 390 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
385 | data); | 391 | data); |
386 | |||
387 | hw->phy.ops.release_phy(hw); | ||
388 | 392 | ||
393 | release: | ||
394 | if (!locked) | ||
395 | hw->phy.ops.release_phy(hw); | ||
396 | out: | ||
389 | return ret_val; | 397 | return ret_val; |
390 | } | 398 | } |
391 | 399 | ||
392 | /** | 400 | /** |
401 | * e1000e_read_phy_reg_igp - Read igp PHY register | ||
402 | * @hw: pointer to the HW structure | ||
403 | * @offset: register offset to be read | ||
404 | * @data: pointer to the read data | ||
405 | * | ||
406 | * Acquires semaphore then reads the PHY register at offset and stores the | ||
407 | * retrieved information in data. | ||
408 | * Release the acquired semaphore before exiting. | ||
409 | **/ | ||
410 | s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) | ||
411 | { | ||
412 | return __e1000e_read_phy_reg_igp(hw, offset, data, false); | ||
413 | } | ||
414 | |||
415 | /** | ||
416 | * e1000e_read_phy_reg_igp_locked - Read igp PHY register | ||
417 | * @hw: pointer to the HW structure | ||
418 | * @offset: register offset to be read | ||
419 | * @data: pointer to the read data | ||
420 | * | ||
421 | * Reads the PHY register at offset and stores the retrieved information | ||
422 | * in data. Assumes semaphore already acquired. | ||
423 | **/ | ||
424 | s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) | ||
425 | { | ||
426 | return __e1000e_read_phy_reg_igp(hw, offset, data, true); | ||
427 | } | ||
428 | |||
429 | /** | ||
393 | * e1000e_write_phy_reg_igp - Write igp PHY register | 430 | * e1000e_write_phy_reg_igp - Write igp PHY register |
394 | * @hw: pointer to the HW structure | 431 | * @hw: pointer to the HW structure |
395 | * @offset: register offset to write to | 432 | * @offset: register offset to write to |
396 | * @data: data to write at register offset | 433 | * @data: data to write at register offset |
434 | * @locked: semaphore has already been acquired or not | ||
397 | * | 435 | * |
398 | * Acquires semaphore, if necessary, then writes the data to PHY register | 436 | * Acquires semaphore, if necessary, then writes the data to PHY register |
399 | * at the offset. Release any acquired semaphores before exiting. | 437 | * at the offset. Release any acquired semaphores before exiting. |
400 | **/ | 438 | **/ |
401 | s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) | 439 | static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, |
440 | bool locked) | ||
402 | { | 441 | { |
403 | s32 ret_val; | 442 | s32 ret_val = 0; |
404 | 443 | ||
405 | ret_val = hw->phy.ops.acquire_phy(hw); | 444 | if (!locked) { |
406 | if (ret_val) | 445 | if (!(hw->phy.ops.acquire_phy)) |
407 | return ret_val; | 446 | goto out; |
447 | |||
448 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
449 | if (ret_val) | ||
450 | goto out; | ||
451 | } | ||
408 | 452 | ||
409 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 453 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
410 | ret_val = e1000e_write_phy_reg_mdic(hw, | 454 | ret_val = e1000e_write_phy_reg_mdic(hw, |
411 | IGP01E1000_PHY_PAGE_SELECT, | 455 | IGP01E1000_PHY_PAGE_SELECT, |
412 | (u16)offset); | 456 | (u16)offset); |
413 | if (ret_val) { | 457 | if (ret_val) |
414 | hw->phy.ops.release_phy(hw); | 458 | goto release; |
415 | return ret_val; | ||
416 | } | ||
417 | } | 459 | } |
418 | 460 | ||
419 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 461 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
420 | data); | 462 | data); |
421 | 463 | ||
422 | hw->phy.ops.release_phy(hw); | 464 | release: |
465 | if (!locked) | ||
466 | hw->phy.ops.release_phy(hw); | ||
423 | 467 | ||
468 | out: | ||
424 | return ret_val; | 469 | return ret_val; |
425 | } | 470 | } |
426 | 471 | ||
427 | /** | 472 | /** |
428 | * e1000e_read_kmrn_reg - Read kumeran register | 473 | * e1000e_write_phy_reg_igp - Write igp PHY register |
474 | * @hw: pointer to the HW structure | ||
475 | * @offset: register offset to write to | ||
476 | * @data: data to write at register offset | ||
477 | * | ||
478 | * Acquires semaphore then writes the data to PHY register | ||
479 | * at the offset. Release any acquired semaphores before exiting. | ||
480 | **/ | ||
481 | s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) | ||
482 | { | ||
483 | return __e1000e_write_phy_reg_igp(hw, offset, data, false); | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * e1000e_write_phy_reg_igp_locked - Write igp PHY register | ||
488 | * @hw: pointer to the HW structure | ||
489 | * @offset: register offset to write to | ||
490 | * @data: data to write at register offset | ||
491 | * | ||
492 | * Writes the data to PHY register at the offset. | ||
493 | * Assumes semaphore already acquired. | ||
494 | **/ | ||
495 | s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) | ||
496 | { | ||
497 | return __e1000e_write_phy_reg_igp(hw, offset, data, true); | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * __e1000_read_kmrn_reg - Read kumeran register | ||
429 | * @hw: pointer to the HW structure | 502 | * @hw: pointer to the HW structure |
430 | * @offset: register offset to be read | 503 | * @offset: register offset to be read |
431 | * @data: pointer to the read data | 504 | * @data: pointer to the read data |
505 | * @locked: semaphore has already been acquired or not | ||
432 | * | 506 | * |
433 | * Acquires semaphore, if necessary. Then reads the PHY register at offset | 507 | * Acquires semaphore, if necessary. Then reads the PHY register at offset |
434 | * using the kumeran interface. The information retrieved is stored in data. | 508 | * using the kumeran interface. The information retrieved is stored in data. |
435 | * Release any acquired semaphores before exiting. | 509 | * Release any acquired semaphores before exiting. |
436 | **/ | 510 | **/ |
437 | s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) | 511 | static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, |
512 | bool locked) | ||
438 | { | 513 | { |
439 | u32 kmrnctrlsta; | 514 | u32 kmrnctrlsta; |
440 | s32 ret_val; | 515 | s32 ret_val = 0; |
441 | 516 | ||
442 | ret_val = hw->phy.ops.acquire_phy(hw); | 517 | if (!locked) { |
443 | if (ret_val) | 518 | if (!(hw->phy.ops.acquire_phy)) |
444 | return ret_val; | 519 | goto out; |
520 | |||
521 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
522 | if (ret_val) | ||
523 | goto out; | ||
524 | } | ||
445 | 525 | ||
446 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | 526 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & |
447 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; | 527 | E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; |
@@ -452,41 +532,111 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) | |||
452 | kmrnctrlsta = er32(KMRNCTRLSTA); | 532 | kmrnctrlsta = er32(KMRNCTRLSTA); |
453 | *data = (u16)kmrnctrlsta; | 533 | *data = (u16)kmrnctrlsta; |
454 | 534 | ||
455 | hw->phy.ops.release_phy(hw); | 535 | if (!locked) |
536 | hw->phy.ops.release_phy(hw); | ||
456 | 537 | ||
538 | out: | ||
457 | return ret_val; | 539 | return ret_val; |
458 | } | 540 | } |
459 | 541 | ||
460 | /** | 542 | /** |
461 | * e1000e_write_kmrn_reg - Write kumeran register | 543 | * e1000e_read_kmrn_reg - Read kumeran register |
544 | * @hw: pointer to the HW structure | ||
545 | * @offset: register offset to be read | ||
546 | * @data: pointer to the read data | ||
547 | * | ||
548 | * Acquires semaphore then reads the PHY register at offset using the | ||
549 | * kumeran interface. The information retrieved is stored in data. | ||
550 | * Release the acquired semaphore before exiting. | ||
551 | **/ | ||
552 | s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) | ||
553 | { | ||
554 | return __e1000_read_kmrn_reg(hw, offset, data, false); | ||
555 | } | ||
556 | |||
557 | /** | ||
558 | * e1000e_read_kmrn_reg_locked - Read kumeran register | ||
559 | * @hw: pointer to the HW structure | ||
560 | * @offset: register offset to be read | ||
561 | * @data: pointer to the read data | ||
562 | * | ||
563 | * Reads the PHY register at offset using the kumeran interface. The | ||
564 | * information retrieved is stored in data. | ||
565 | * Assumes semaphore already acquired. | ||
566 | **/ | ||
567 | s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) | ||
568 | { | ||
569 | return __e1000_read_kmrn_reg(hw, offset, data, true); | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * __e1000_write_kmrn_reg - Write kumeran register | ||
462 | * @hw: pointer to the HW structure | 574 | * @hw: pointer to the HW structure |
463 | * @offset: register offset to write to | 575 | * @offset: register offset to write to |
464 | * @data: data to write at register offset | 576 | * @data: data to write at register offset |
577 | * @locked: semaphore has already been acquired or not | ||
465 | * | 578 | * |
466 | * Acquires semaphore, if necessary. Then write the data to PHY register | 579 | * Acquires semaphore, if necessary. Then write the data to PHY register |
467 | * at the offset using the kumeran interface. Release any acquired semaphores | 580 | * at the offset using the kumeran interface. Release any acquired semaphores |
468 | * before exiting. | 581 | * before exiting. |
469 | **/ | 582 | **/ |
470 | s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) | 583 | static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, |
584 | bool locked) | ||
471 | { | 585 | { |
472 | u32 kmrnctrlsta; | 586 | u32 kmrnctrlsta; |
473 | s32 ret_val; | 587 | s32 ret_val = 0; |
474 | 588 | ||
475 | ret_val = hw->phy.ops.acquire_phy(hw); | 589 | if (!locked) { |
476 | if (ret_val) | 590 | if (!(hw->phy.ops.acquire_phy)) |
477 | return ret_val; | 591 | goto out; |
592 | |||
593 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
594 | if (ret_val) | ||
595 | goto out; | ||
596 | } | ||
478 | 597 | ||
479 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & | 598 | kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & |
480 | E1000_KMRNCTRLSTA_OFFSET) | data; | 599 | E1000_KMRNCTRLSTA_OFFSET) | data; |
481 | ew32(KMRNCTRLSTA, kmrnctrlsta); | 600 | ew32(KMRNCTRLSTA, kmrnctrlsta); |
482 | 601 | ||
483 | udelay(2); | 602 | udelay(2); |
484 | hw->phy.ops.release_phy(hw); | ||
485 | 603 | ||
604 | if (!locked) | ||
605 | hw->phy.ops.release_phy(hw); | ||
606 | |||
607 | out: | ||
486 | return ret_val; | 608 | return ret_val; |
487 | } | 609 | } |
488 | 610 | ||
489 | /** | 611 | /** |
612 | * e1000e_write_kmrn_reg - Write kumeran register | ||
613 | * @hw: pointer to the HW structure | ||
614 | * @offset: register offset to write to | ||
615 | * @data: data to write at register offset | ||
616 | * | ||
617 | * Acquires semaphore then writes the data to the PHY register at the offset | ||
618 | * using the kumeran interface. Release the acquired semaphore before exiting. | ||
619 | **/ | ||
620 | s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) | ||
621 | { | ||
622 | return __e1000_write_kmrn_reg(hw, offset, data, false); | ||
623 | } | ||
624 | |||
625 | /** | ||
626 | * e1000e_write_kmrn_reg_locked - Write kumeran register | ||
627 | * @hw: pointer to the HW structure | ||
628 | * @offset: register offset to write to | ||
629 | * @data: data to write at register offset | ||
630 | * | ||
631 | * Write the data to PHY register at the offset using the kumeran interface. | ||
632 | * Assumes semaphore already acquired. | ||
633 | **/ | ||
634 | s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) | ||
635 | { | ||
636 | return __e1000_write_kmrn_reg(hw, offset, data, true); | ||
637 | } | ||
638 | |||
639 | /** | ||
490 | * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link | 640 | * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link |
491 | * @hw: pointer to the HW structure | 641 | * @hw: pointer to the HW structure |
492 | * | 642 | * |
@@ -509,15 +659,6 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) | |||
509 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; | 659 | phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; |
510 | 660 | ||
511 | ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); | 661 | ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); |
512 | if (ret_val) | ||
513 | goto out; | ||
514 | |||
515 | /* Set number of link attempts before downshift */ | ||
516 | ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data); | ||
517 | if (ret_val) | ||
518 | goto out; | ||
519 | phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK; | ||
520 | ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data); | ||
521 | 662 | ||
522 | out: | 663 | out: |
523 | return ret_val; | 664 | return ret_val; |
@@ -2105,6 +2246,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2105 | u32 page = offset >> IGP_PAGE_SHIFT; | 2246 | u32 page = offset >> IGP_PAGE_SHIFT; |
2106 | u32 page_shift = 0; | 2247 | u32 page_shift = 0; |
2107 | 2248 | ||
2249 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2250 | if (ret_val) | ||
2251 | return ret_val; | ||
2252 | |||
2108 | /* Page 800 works differently than the rest so it has its own func */ | 2253 | /* Page 800 works differently than the rest so it has its own func */ |
2109 | if (page == BM_WUC_PAGE) { | 2254 | if (page == BM_WUC_PAGE) { |
2110 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | 2255 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, |
@@ -2112,10 +2257,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2112 | goto out; | 2257 | goto out; |
2113 | } | 2258 | } |
2114 | 2259 | ||
2115 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2116 | if (ret_val) | ||
2117 | goto out; | ||
2118 | |||
2119 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | 2260 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); |
2120 | 2261 | ||
2121 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 2262 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
@@ -2135,18 +2276,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) | |||
2135 | /* Page is shifted left, PHY expects (page x 32) */ | 2276 | /* Page is shifted left, PHY expects (page x 32) */ |
2136 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | 2277 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, |
2137 | (page << page_shift)); | 2278 | (page << page_shift)); |
2138 | if (ret_val) { | 2279 | if (ret_val) |
2139 | hw->phy.ops.release_phy(hw); | ||
2140 | goto out; | 2280 | goto out; |
2141 | } | ||
2142 | } | 2281 | } |
2143 | 2282 | ||
2144 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2283 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2145 | data); | 2284 | data); |
2146 | 2285 | ||
2147 | hw->phy.ops.release_phy(hw); | ||
2148 | |||
2149 | out: | 2286 | out: |
2287 | hw->phy.ops.release_phy(hw); | ||
2150 | return ret_val; | 2288 | return ret_val; |
2151 | } | 2289 | } |
2152 | 2290 | ||
@@ -2167,6 +2305,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2167 | u32 page = offset >> IGP_PAGE_SHIFT; | 2305 | u32 page = offset >> IGP_PAGE_SHIFT; |
2168 | u32 page_shift = 0; | 2306 | u32 page_shift = 0; |
2169 | 2307 | ||
2308 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2309 | if (ret_val) | ||
2310 | return ret_val; | ||
2311 | |||
2170 | /* Page 800 works differently than the rest so it has its own func */ | 2312 | /* Page 800 works differently than the rest so it has its own func */ |
2171 | if (page == BM_WUC_PAGE) { | 2313 | if (page == BM_WUC_PAGE) { |
2172 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | 2314 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, |
@@ -2174,10 +2316,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2174 | goto out; | 2316 | goto out; |
2175 | } | 2317 | } |
2176 | 2318 | ||
2177 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2178 | if (ret_val) | ||
2179 | goto out; | ||
2180 | |||
2181 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); | 2319 | hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); |
2182 | 2320 | ||
2183 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 2321 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
@@ -2197,17 +2335,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2197 | /* Page is shifted left, PHY expects (page x 32) */ | 2335 | /* Page is shifted left, PHY expects (page x 32) */ |
2198 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, | 2336 | ret_val = e1000e_write_phy_reg_mdic(hw, page_select, |
2199 | (page << page_shift)); | 2337 | (page << page_shift)); |
2200 | if (ret_val) { | 2338 | if (ret_val) |
2201 | hw->phy.ops.release_phy(hw); | ||
2202 | goto out; | 2339 | goto out; |
2203 | } | ||
2204 | } | 2340 | } |
2205 | 2341 | ||
2206 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2342 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2207 | data); | 2343 | data); |
2208 | hw->phy.ops.release_phy(hw); | ||
2209 | |||
2210 | out: | 2344 | out: |
2345 | hw->phy.ops.release_phy(hw); | ||
2211 | return ret_val; | 2346 | return ret_val; |
2212 | } | 2347 | } |
2213 | 2348 | ||
@@ -2226,17 +2361,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2226 | s32 ret_val; | 2361 | s32 ret_val; |
2227 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2362 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2228 | 2363 | ||
2364 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2365 | if (ret_val) | ||
2366 | return ret_val; | ||
2367 | |||
2229 | /* Page 800 works differently than the rest so it has its own func */ | 2368 | /* Page 800 works differently than the rest so it has its own func */ |
2230 | if (page == BM_WUC_PAGE) { | 2369 | if (page == BM_WUC_PAGE) { |
2231 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, | 2370 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, |
2232 | true); | 2371 | true); |
2233 | return ret_val; | 2372 | goto out; |
2234 | } | 2373 | } |
2235 | 2374 | ||
2236 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2237 | if (ret_val) | ||
2238 | return ret_val; | ||
2239 | |||
2240 | hw->phy.addr = 1; | 2375 | hw->phy.addr = 1; |
2241 | 2376 | ||
2242 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 2377 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
@@ -2245,16 +2380,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2245 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, | 2380 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, |
2246 | page); | 2381 | page); |
2247 | 2382 | ||
2248 | if (ret_val) { | 2383 | if (ret_val) |
2249 | hw->phy.ops.release_phy(hw); | 2384 | goto out; |
2250 | return ret_val; | ||
2251 | } | ||
2252 | } | 2385 | } |
2253 | 2386 | ||
2254 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2387 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2255 | data); | 2388 | data); |
2389 | out: | ||
2256 | hw->phy.ops.release_phy(hw); | 2390 | hw->phy.ops.release_phy(hw); |
2257 | |||
2258 | return ret_val; | 2391 | return ret_val; |
2259 | } | 2392 | } |
2260 | 2393 | ||
@@ -2272,17 +2405,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2272 | s32 ret_val; | 2405 | s32 ret_val; |
2273 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); | 2406 | u16 page = (u16)(offset >> IGP_PAGE_SHIFT); |
2274 | 2407 | ||
2408 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2409 | if (ret_val) | ||
2410 | return ret_val; | ||
2411 | |||
2275 | /* Page 800 works differently than the rest so it has its own func */ | 2412 | /* Page 800 works differently than the rest so it has its own func */ |
2276 | if (page == BM_WUC_PAGE) { | 2413 | if (page == BM_WUC_PAGE) { |
2277 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, | 2414 | ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, |
2278 | false); | 2415 | false); |
2279 | return ret_val; | 2416 | goto out; |
2280 | } | 2417 | } |
2281 | 2418 | ||
2282 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2283 | if (ret_val) | ||
2284 | return ret_val; | ||
2285 | |||
2286 | hw->phy.addr = 1; | 2419 | hw->phy.addr = 1; |
2287 | 2420 | ||
2288 | if (offset > MAX_PHY_MULTI_PAGE_REG) { | 2421 | if (offset > MAX_PHY_MULTI_PAGE_REG) { |
@@ -2290,17 +2423,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2290 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, | 2423 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, |
2291 | page); | 2424 | page); |
2292 | 2425 | ||
2293 | if (ret_val) { | 2426 | if (ret_val) |
2294 | hw->phy.ops.release_phy(hw); | 2427 | goto out; |
2295 | return ret_val; | ||
2296 | } | ||
2297 | } | 2428 | } |
2298 | 2429 | ||
2299 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, | 2430 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, |
2300 | data); | 2431 | data); |
2301 | 2432 | ||
2433 | out: | ||
2302 | hw->phy.ops.release_phy(hw); | 2434 | hw->phy.ops.release_phy(hw); |
2303 | |||
2304 | return ret_val; | 2435 | return ret_val; |
2305 | } | 2436 | } |
2306 | 2437 | ||
@@ -2320,6 +2451,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) | |||
2320 | * 3) Write the address using the address opcode (0x11) | 2451 | * 3) Write the address using the address opcode (0x11) |
2321 | * 4) Read or write the data using the data opcode (0x12) | 2452 | * 4) Read or write the data using the data opcode (0x12) |
2322 | * 5) Restore 769_17.2 to its original value | 2453 | * 5) Restore 769_17.2 to its original value |
2454 | * | ||
2455 | * Assumes semaphore already acquired. | ||
2323 | **/ | 2456 | **/ |
2324 | static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | 2457 | static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, |
2325 | u16 *data, bool read) | 2458 | u16 *data, bool read) |
@@ -2327,20 +2460,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2327 | s32 ret_val; | 2460 | s32 ret_val; |
2328 | u16 reg = BM_PHY_REG_NUM(offset); | 2461 | u16 reg = BM_PHY_REG_NUM(offset); |
2329 | u16 phy_reg = 0; | 2462 | u16 phy_reg = 0; |
2330 | u8 phy_acquired = 1; | ||
2331 | |||
2332 | 2463 | ||
2333 | /* Gig must be disabled for MDIO accesses to page 800 */ | 2464 | /* Gig must be disabled for MDIO accesses to page 800 */ |
2334 | if ((hw->mac.type == e1000_pchlan) && | 2465 | if ((hw->mac.type == e1000_pchlan) && |
2335 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) | 2466 | (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) |
2336 | hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); | 2467 | hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); |
2337 | 2468 | ||
2338 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2339 | if (ret_val) { | ||
2340 | phy_acquired = 0; | ||
2341 | goto out; | ||
2342 | } | ||
2343 | |||
2344 | /* All operations in this function are phy address 1 */ | 2469 | /* All operations in this function are phy address 1 */ |
2345 | hw->phy.addr = 1; | 2470 | hw->phy.addr = 1; |
2346 | 2471 | ||
@@ -2397,8 +2522,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, | |||
2397 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); | 2522 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); |
2398 | 2523 | ||
2399 | out: | 2524 | out: |
2400 | if (phy_acquired == 1) | ||
2401 | hw->phy.ops.release_phy(hw); | ||
2402 | return ret_val; | 2525 | return ret_val; |
2403 | } | 2526 | } |
2404 | 2527 | ||
@@ -2439,52 +2562,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) | |||
2439 | return 0; | 2562 | return 0; |
2440 | } | 2563 | } |
2441 | 2564 | ||
2565 | /** | ||
2566 | * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode | ||
2567 | * @hw: pointer to the HW structure | ||
2568 | * @slow: true for slow mode, false for normal mode | ||
2569 | * | ||
2570 | * Assumes semaphore already acquired. | ||
2571 | **/ | ||
2442 | s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) | 2572 | s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) |
2443 | { | 2573 | { |
2444 | s32 ret_val = 0; | 2574 | s32 ret_val = 0; |
2445 | u16 data = 0; | 2575 | u16 data = 0; |
2446 | 2576 | ||
2447 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2448 | if (ret_val) | ||
2449 | return ret_val; | ||
2450 | |||
2451 | /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ | 2577 | /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ |
2452 | hw->phy.addr = 1; | 2578 | hw->phy.addr = 1; |
2453 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, | 2579 | ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, |
2454 | (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); | 2580 | (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); |
2455 | if (ret_val) { | 2581 | if (ret_val) |
2456 | hw->phy.ops.release_phy(hw); | 2582 | goto out; |
2457 | return ret_val; | 2583 | |
2458 | } | ||
2459 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, | 2584 | ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, |
2460 | (0x2180 | (slow << 10))); | 2585 | (0x2180 | (slow << 10))); |
2586 | if (ret_val) | ||
2587 | goto out; | ||
2461 | 2588 | ||
2462 | /* dummy read when reverting to fast mode - throw away result */ | 2589 | /* dummy read when reverting to fast mode - throw away result */ |
2463 | if (!slow) | 2590 | if (!slow) |
2464 | e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); | 2591 | ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); |
2465 | |||
2466 | hw->phy.ops.release_phy(hw); | ||
2467 | 2592 | ||
2593 | out: | ||
2468 | return ret_val; | 2594 | return ret_val; |
2469 | } | 2595 | } |
2470 | 2596 | ||
2471 | /** | 2597 | /** |
2472 | * e1000_read_phy_reg_hv - Read HV PHY register | 2598 | * __e1000_read_phy_reg_hv - Read HV PHY register |
2473 | * @hw: pointer to the HW structure | 2599 | * @hw: pointer to the HW structure |
2474 | * @offset: register offset to be read | 2600 | * @offset: register offset to be read |
2475 | * @data: pointer to the read data | 2601 | * @data: pointer to the read data |
2602 | * @locked: semaphore has already been acquired or not | ||
2476 | * | 2603 | * |
2477 | * Acquires semaphore, if necessary, then reads the PHY register at offset | 2604 | * Acquires semaphore, if necessary, then reads the PHY register at offset |
2478 | * and storing the retrieved information in data. Release any acquired | 2605 | * and stores the retrieved information in data. Release any acquired |
2479 | * semaphore before exiting. | 2606 | * semaphore before exiting. |
2480 | **/ | 2607 | **/ |
2481 | s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) | 2608 | static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, |
2609 | bool locked) | ||
2482 | { | 2610 | { |
2483 | s32 ret_val; | 2611 | s32 ret_val; |
2484 | u16 page = BM_PHY_REG_PAGE(offset); | 2612 | u16 page = BM_PHY_REG_PAGE(offset); |
2485 | u16 reg = BM_PHY_REG_NUM(offset); | 2613 | u16 reg = BM_PHY_REG_NUM(offset); |
2486 | bool in_slow_mode = false; | 2614 | bool in_slow_mode = false; |
2487 | 2615 | ||
2616 | if (!locked) { | ||
2617 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2618 | if (ret_val) | ||
2619 | return ret_val; | ||
2620 | } | ||
2621 | |||
2488 | /* Workaround failure in MDIO access while cable is disconnected */ | 2622 | /* Workaround failure in MDIO access while cable is disconnected */ |
2489 | if ((hw->phy.type == e1000_phy_82577) && | 2623 | if ((hw->phy.type == e1000_phy_82577) && |
2490 | !(er32(STATUS) & E1000_STATUS_LU)) { | 2624 | !(er32(STATUS) & E1000_STATUS_LU)) { |
@@ -2508,63 +2642,92 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) | |||
2508 | goto out; | 2642 | goto out; |
2509 | } | 2643 | } |
2510 | 2644 | ||
2511 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2512 | if (ret_val) | ||
2513 | goto out; | ||
2514 | |||
2515 | hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); | 2645 | hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); |
2516 | 2646 | ||
2517 | if (page == HV_INTC_FC_PAGE_START) | 2647 | if (page == HV_INTC_FC_PAGE_START) |
2518 | page = 0; | 2648 | page = 0; |
2519 | 2649 | ||
2520 | if (reg > MAX_PHY_MULTI_PAGE_REG) { | 2650 | if (reg > MAX_PHY_MULTI_PAGE_REG) { |
2521 | if ((hw->phy.type != e1000_phy_82578) || | 2651 | u32 phy_addr = hw->phy.addr; |
2522 | ((reg != I82578_ADDR_REG) && | 2652 | |
2523 | (reg != I82578_ADDR_REG + 1))) { | 2653 | hw->phy.addr = 1; |
2524 | u32 phy_addr = hw->phy.addr; | 2654 | |
2525 | 2655 | /* Page is shifted left, PHY expects (page x 32) */ | |
2526 | hw->phy.addr = 1; | 2656 | ret_val = e1000e_write_phy_reg_mdic(hw, |
2527 | 2657 | IGP01E1000_PHY_PAGE_SELECT, | |
2528 | /* Page is shifted left, PHY expects (page x 32) */ | 2658 | (page << IGP_PAGE_SHIFT)); |
2529 | ret_val = e1000e_write_phy_reg_mdic(hw, | 2659 | hw->phy.addr = phy_addr; |
2530 | IGP01E1000_PHY_PAGE_SELECT, | 2660 | |
2531 | (page << IGP_PAGE_SHIFT)); | 2661 | if (ret_val) |
2532 | if (ret_val) { | 2662 | goto out; |
2533 | hw->phy.ops.release_phy(hw); | ||
2534 | goto out; | ||
2535 | } | ||
2536 | hw->phy.addr = phy_addr; | ||
2537 | } | ||
2538 | } | 2663 | } |
2539 | 2664 | ||
2540 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | 2665 | ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, |
2541 | data); | 2666 | data); |
2542 | hw->phy.ops.release_phy(hw); | ||
2543 | |||
2544 | out: | 2667 | out: |
2545 | /* Revert to MDIO fast mode, if applicable */ | 2668 | /* Revert to MDIO fast mode, if applicable */ |
2546 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) | 2669 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) |
2547 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | 2670 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); |
2671 | |||
2672 | if (!locked) | ||
2673 | hw->phy.ops.release_phy(hw); | ||
2548 | 2674 | ||
2549 | return ret_val; | 2675 | return ret_val; |
2550 | } | 2676 | } |
2551 | 2677 | ||
2552 | /** | 2678 | /** |
2553 | * e1000_write_phy_reg_hv - Write HV PHY register | 2679 | * e1000_read_phy_reg_hv - Read HV PHY register |
2680 | * @hw: pointer to the HW structure | ||
2681 | * @offset: register offset to be read | ||
2682 | * @data: pointer to the read data | ||
2683 | * | ||
2684 | * Acquires semaphore then reads the PHY register at offset and stores | ||
2685 | * the retrieved information in data. Release the acquired semaphore | ||
2686 | * before exiting. | ||
2687 | **/ | ||
2688 | s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) | ||
2689 | { | ||
2690 | return __e1000_read_phy_reg_hv(hw, offset, data, false); | ||
2691 | } | ||
2692 | |||
2693 | /** | ||
2694 | * e1000_read_phy_reg_hv_locked - Read HV PHY register | ||
2695 | * @hw: pointer to the HW structure | ||
2696 | * @offset: register offset to be read | ||
2697 | * @data: pointer to the read data | ||
2698 | * | ||
2699 | * Reads the PHY register at offset and stores the retrieved information | ||
2700 | * in data. Assumes semaphore already acquired. | ||
2701 | **/ | ||
2702 | s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) | ||
2703 | { | ||
2704 | return __e1000_read_phy_reg_hv(hw, offset, data, true); | ||
2705 | } | ||
2706 | |||
2707 | /** | ||
2708 | * __e1000_write_phy_reg_hv - Write HV PHY register | ||
2554 | * @hw: pointer to the HW structure | 2709 | * @hw: pointer to the HW structure |
2555 | * @offset: register offset to write to | 2710 | * @offset: register offset to write to |
2556 | * @data: data to write at register offset | 2711 | * @data: data to write at register offset |
2712 | * @locked: semaphore has already been acquired or not | ||
2557 | * | 2713 | * |
2558 | * Acquires semaphore, if necessary, then writes the data to PHY register | 2714 | * Acquires semaphore, if necessary, then writes the data to PHY register |
2559 | * at the offset. Release any acquired semaphores before exiting. | 2715 | * at the offset. Release any acquired semaphores before exiting. |
2560 | **/ | 2716 | **/ |
2561 | s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) | 2717 | static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, |
2718 | bool locked) | ||
2562 | { | 2719 | { |
2563 | s32 ret_val; | 2720 | s32 ret_val; |
2564 | u16 page = BM_PHY_REG_PAGE(offset); | 2721 | u16 page = BM_PHY_REG_PAGE(offset); |
2565 | u16 reg = BM_PHY_REG_NUM(offset); | 2722 | u16 reg = BM_PHY_REG_NUM(offset); |
2566 | bool in_slow_mode = false; | 2723 | bool in_slow_mode = false; |
2567 | 2724 | ||
2725 | if (!locked) { | ||
2726 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2727 | if (ret_val) | ||
2728 | return ret_val; | ||
2729 | } | ||
2730 | |||
2568 | /* Workaround failure in MDIO access while cable is disconnected */ | 2731 | /* Workaround failure in MDIO access while cable is disconnected */ |
2569 | if ((hw->phy.type == e1000_phy_82577) && | 2732 | if ((hw->phy.type == e1000_phy_82577) && |
2570 | !(er32(STATUS) & E1000_STATUS_LU)) { | 2733 | !(er32(STATUS) & E1000_STATUS_LU)) { |
@@ -2588,10 +2751,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) | |||
2588 | goto out; | 2751 | goto out; |
2589 | } | 2752 | } |
2590 | 2753 | ||
2591 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2592 | if (ret_val) | ||
2593 | goto out; | ||
2594 | |||
2595 | hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); | 2754 | hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); |
2596 | 2755 | ||
2597 | if (page == HV_INTC_FC_PAGE_START) | 2756 | if (page == HV_INTC_FC_PAGE_START) |
@@ -2607,50 +2766,70 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) | |||
2607 | ((MAX_PHY_REG_ADDRESS & reg) == 0) && | 2766 | ((MAX_PHY_REG_ADDRESS & reg) == 0) && |
2608 | (data & (1 << 11))) { | 2767 | (data & (1 << 11))) { |
2609 | u16 data2 = 0x7EFF; | 2768 | u16 data2 = 0x7EFF; |
2610 | hw->phy.ops.release_phy(hw); | ||
2611 | ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, | 2769 | ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, |
2612 | &data2, false); | 2770 | &data2, false); |
2613 | if (ret_val) | 2771 | if (ret_val) |
2614 | goto out; | 2772 | goto out; |
2615 | |||
2616 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2617 | if (ret_val) | ||
2618 | goto out; | ||
2619 | } | 2773 | } |
2620 | 2774 | ||
2621 | if (reg > MAX_PHY_MULTI_PAGE_REG) { | 2775 | if (reg > MAX_PHY_MULTI_PAGE_REG) { |
2622 | if ((hw->phy.type != e1000_phy_82578) || | 2776 | u32 phy_addr = hw->phy.addr; |
2623 | ((reg != I82578_ADDR_REG) && | 2777 | |
2624 | (reg != I82578_ADDR_REG + 1))) { | 2778 | hw->phy.addr = 1; |
2625 | u32 phy_addr = hw->phy.addr; | 2779 | |
2626 | 2780 | /* Page is shifted left, PHY expects (page x 32) */ | |
2627 | hw->phy.addr = 1; | 2781 | ret_val = e1000e_write_phy_reg_mdic(hw, |
2628 | 2782 | IGP01E1000_PHY_PAGE_SELECT, | |
2629 | /* Page is shifted left, PHY expects (page x 32) */ | 2783 | (page << IGP_PAGE_SHIFT)); |
2630 | ret_val = e1000e_write_phy_reg_mdic(hw, | 2784 | hw->phy.addr = phy_addr; |
2631 | IGP01E1000_PHY_PAGE_SELECT, | 2785 | |
2632 | (page << IGP_PAGE_SHIFT)); | 2786 | if (ret_val) |
2633 | if (ret_val) { | 2787 | goto out; |
2634 | hw->phy.ops.release_phy(hw); | ||
2635 | goto out; | ||
2636 | } | ||
2637 | hw->phy.addr = phy_addr; | ||
2638 | } | ||
2639 | } | 2788 | } |
2640 | 2789 | ||
2641 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, | 2790 | ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, |
2642 | data); | 2791 | data); |
2643 | hw->phy.ops.release_phy(hw); | ||
2644 | 2792 | ||
2645 | out: | 2793 | out: |
2646 | /* Revert to MDIO fast mode, if applicable */ | 2794 | /* Revert to MDIO fast mode, if applicable */ |
2647 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) | 2795 | if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) |
2648 | ret_val = e1000_set_mdio_slow_mode_hv(hw, false); | 2796 | ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); |
2797 | |||
2798 | if (!locked) | ||
2799 | hw->phy.ops.release_phy(hw); | ||
2649 | 2800 | ||
2650 | return ret_val; | 2801 | return ret_val; |
2651 | } | 2802 | } |
2652 | 2803 | ||
2653 | /** | 2804 | /** |
2805 | * e1000_write_phy_reg_hv - Write HV PHY register | ||
2806 | * @hw: pointer to the HW structure | ||
2807 | * @offset: register offset to write to | ||
2808 | * @data: data to write at register offset | ||
2809 | * | ||
2810 | * Acquires semaphore then writes the data to PHY register at the offset. | ||
2811 | * Release the acquired semaphores before exiting. | ||
2812 | **/ | ||
2813 | s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) | ||
2814 | { | ||
2815 | return __e1000_write_phy_reg_hv(hw, offset, data, false); | ||
2816 | } | ||
2817 | |||
2818 | /** | ||
2819 | * e1000_write_phy_reg_hv_locked - Write HV PHY register | ||
2820 | * @hw: pointer to the HW structure | ||
2821 | * @offset: register offset to write to | ||
2822 | * @data: data to write at register offset | ||
2823 | * | ||
2824 | * Writes the data to PHY register at the offset. Assumes semaphore | ||
2825 | * already acquired. | ||
2826 | **/ | ||
2827 | s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) | ||
2828 | { | ||
2829 | return __e1000_write_phy_reg_hv(hw, offset, data, true); | ||
2830 | } | ||
2831 | |||
2832 | /** | ||
2654 | * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page | 2833 | * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page |
2655 | * @page: page to be accessed | 2834 | * @page: page to be accessed |
2656 | **/ | 2835 | **/ |
@@ -2671,10 +2850,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page) | |||
2671 | * @data: pointer to the data to be read or written | 2850 | * @data: pointer to the data to be read or written |
2672 | * @read: determines if operation is read or written | 2851 | * @read: determines if operation is read or written |
2673 | * | 2852 | * |
2674 | * Acquires semaphore, if necessary, then reads the PHY register at offset | 2853 | * Reads the PHY register at offset and stores the retreived information |
2675 | * and storing the retreived information in data. Release any acquired | 2854 | * in data. Assumes semaphore already acquired. Note that the procedure |
2676 | * semaphores before exiting. Note that the procedure to read these regs | 2855 | * to read these regs uses the address port and data port to read/write. |
2677 | * uses the address port and data port to read/write. | ||
2678 | **/ | 2856 | **/ |
2679 | static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | 2857 | static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, |
2680 | u16 *data, bool read) | 2858 | u16 *data, bool read) |
@@ -2682,20 +2860,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2682 | s32 ret_val; | 2860 | s32 ret_val; |
2683 | u32 addr_reg = 0; | 2861 | u32 addr_reg = 0; |
2684 | u32 data_reg = 0; | 2862 | u32 data_reg = 0; |
2685 | u8 phy_acquired = 1; | ||
2686 | 2863 | ||
2687 | /* This takes care of the difference with desktop vs mobile phy */ | 2864 | /* This takes care of the difference with desktop vs mobile phy */ |
2688 | addr_reg = (hw->phy.type == e1000_phy_82578) ? | 2865 | addr_reg = (hw->phy.type == e1000_phy_82578) ? |
2689 | I82578_ADDR_REG : I82577_ADDR_REG; | 2866 | I82578_ADDR_REG : I82577_ADDR_REG; |
2690 | data_reg = addr_reg + 1; | 2867 | data_reg = addr_reg + 1; |
2691 | 2868 | ||
2692 | ret_val = hw->phy.ops.acquire_phy(hw); | ||
2693 | if (ret_val) { | ||
2694 | hw_dbg(hw, "Could not acquire PHY\n"); | ||
2695 | phy_acquired = 0; | ||
2696 | goto out; | ||
2697 | } | ||
2698 | |||
2699 | /* All operations in this function are phy address 2 */ | 2869 | /* All operations in this function are phy address 2 */ |
2700 | hw->phy.addr = 2; | 2870 | hw->phy.addr = 2; |
2701 | 2871 | ||
@@ -2718,8 +2888,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, | |||
2718 | } | 2888 | } |
2719 | 2889 | ||
2720 | out: | 2890 | out: |
2721 | if (phy_acquired == 1) | ||
2722 | hw->phy.ops.release_phy(hw); | ||
2723 | return ret_val; | 2891 | return ret_val; |
2724 | } | 2892 | } |
2725 | 2893 | ||