diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_phy.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_phy.c | 244 |
1 files changed, 89 insertions, 155 deletions
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 8002931ae823..764035a8c9a1 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************************* | 1 | /******************************************************************************* |
2 | 2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | 3 | Intel 10 Gigabit PCI Express Linux driver |
4 | Copyright(c) 1999 - 2007 Intel Corporation. | 4 | Copyright(c) 1999 - 2008 Intel Corporation. |
5 | 5 | ||
6 | This program is free software; you can redistribute it and/or modify it | 6 | This program is free software; you can redistribute it and/or modify it |
7 | under the terms and conditions of the GNU General Public License, | 7 | under the terms and conditions of the GNU General Public License, |
@@ -20,7 +20,6 @@ | |||
20 | the file called "COPYING". | 20 | the file called "COPYING". |
21 | 21 | ||
22 | Contact Information: | 22 | Contact Information: |
23 | Linux NICS <linux.nics@intel.com> | ||
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | 23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | 24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
26 | 25 | ||
@@ -33,32 +32,36 @@ | |||
33 | #include "ixgbe_common.h" | 32 | #include "ixgbe_common.h" |
34 | #include "ixgbe_phy.h" | 33 | #include "ixgbe_phy.h" |
35 | 34 | ||
35 | static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); | ||
36 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); | 36 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); |
37 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); | 37 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); |
38 | static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); | ||
39 | static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | ||
40 | u32 device_type, u16 phy_data); | ||
41 | 38 | ||
42 | /** | 39 | /** |
43 | * ixgbe_identify_phy - Get physical layer module | 40 | * ixgbe_identify_phy_generic - Get physical layer module |
44 | * @hw: pointer to hardware structure | 41 | * @hw: pointer to hardware structure |
45 | * | 42 | * |
46 | * Determines the physical layer module found on the current adapter. | 43 | * Determines the physical layer module found on the current adapter. |
47 | **/ | 44 | **/ |
48 | s32 ixgbe_identify_phy(struct ixgbe_hw *hw) | 45 | s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) |
49 | { | 46 | { |
50 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | 47 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; |
51 | u32 phy_addr; | 48 | u32 phy_addr; |
52 | 49 | ||
53 | for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { | 50 | if (hw->phy.type == ixgbe_phy_unknown) { |
54 | if (ixgbe_validate_phy_addr(hw, phy_addr)) { | 51 | for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { |
55 | hw->phy.addr = phy_addr; | 52 | if (ixgbe_validate_phy_addr(hw, phy_addr)) { |
56 | ixgbe_get_phy_id(hw); | 53 | hw->phy.addr = phy_addr; |
57 | hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); | 54 | ixgbe_get_phy_id(hw); |
58 | status = 0; | 55 | hw->phy.type = |
59 | break; | 56 | ixgbe_get_phy_type_from_id(hw->phy.id); |
57 | status = 0; | ||
58 | break; | ||
59 | } | ||
60 | } | 60 | } |
61 | } else { | ||
62 | status = 0; | ||
61 | } | 63 | } |
64 | |||
62 | return status; | 65 | return status; |
63 | } | 66 | } |
64 | 67 | ||
@@ -73,10 +76,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) | |||
73 | bool valid = false; | 76 | bool valid = false; |
74 | 77 | ||
75 | hw->phy.addr = phy_addr; | 78 | hw->phy.addr = phy_addr; |
76 | ixgbe_read_phy_reg(hw, | 79 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, |
77 | IXGBE_MDIO_PHY_ID_HIGH, | 80 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); |
78 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | ||
79 | &phy_id); | ||
80 | 81 | ||
81 | if (phy_id != 0xFFFF && phy_id != 0x0) | 82 | if (phy_id != 0xFFFF && phy_id != 0x0) |
82 | valid = true; | 83 | valid = true; |
@@ -95,21 +96,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) | |||
95 | u16 phy_id_high = 0; | 96 | u16 phy_id_high = 0; |
96 | u16 phy_id_low = 0; | 97 | u16 phy_id_low = 0; |
97 | 98 | ||
98 | status = ixgbe_read_phy_reg(hw, | 99 | status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, |
99 | IXGBE_MDIO_PHY_ID_HIGH, | 100 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, |
100 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | 101 | &phy_id_high); |
101 | &phy_id_high); | ||
102 | 102 | ||
103 | if (status == 0) { | 103 | if (status == 0) { |
104 | hw->phy.id = (u32)(phy_id_high << 16); | 104 | hw->phy.id = (u32)(phy_id_high << 16); |
105 | status = ixgbe_read_phy_reg(hw, | 105 | status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, |
106 | IXGBE_MDIO_PHY_ID_LOW, | 106 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, |
107 | IXGBE_MDIO_PMA_PMD_DEV_TYPE, | 107 | &phy_id_low); |
108 | &phy_id_low); | ||
109 | hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); | 108 | hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); |
110 | hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); | 109 | hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); |
111 | } | 110 | } |
112 | |||
113 | return status; | 111 | return status; |
114 | } | 112 | } |
115 | 113 | ||
@@ -123,9 +121,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) | |||
123 | enum ixgbe_phy_type phy_type; | 121 | enum ixgbe_phy_type phy_type; |
124 | 122 | ||
125 | switch (phy_id) { | 123 | switch (phy_id) { |
126 | case TN1010_PHY_ID: | ||
127 | phy_type = ixgbe_phy_tn; | ||
128 | break; | ||
129 | case QT2022_PHY_ID: | 124 | case QT2022_PHY_ID: |
130 | phy_type = ixgbe_phy_qt; | 125 | phy_type = ixgbe_phy_qt; |
131 | break; | 126 | break; |
@@ -138,32 +133,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) | |||
138 | } | 133 | } |
139 | 134 | ||
140 | /** | 135 | /** |
141 | * ixgbe_reset_phy - Performs a PHY reset | 136 | * ixgbe_reset_phy_generic - Performs a PHY reset |
142 | * @hw: pointer to hardware structure | 137 | * @hw: pointer to hardware structure |
143 | **/ | 138 | **/ |
144 | s32 ixgbe_reset_phy(struct ixgbe_hw *hw) | 139 | s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) |
145 | { | 140 | { |
146 | /* | 141 | /* |
147 | * Perform soft PHY reset to the PHY_XS. | 142 | * Perform soft PHY reset to the PHY_XS. |
148 | * This will cause a soft reset to the PHY | 143 | * This will cause a soft reset to the PHY |
149 | */ | 144 | */ |
150 | return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, | 145 | return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, |
151 | IXGBE_MDIO_PHY_XS_DEV_TYPE, | 146 | IXGBE_MDIO_PHY_XS_DEV_TYPE, |
152 | IXGBE_MDIO_PHY_XS_RESET); | 147 | IXGBE_MDIO_PHY_XS_RESET); |
153 | } | 148 | } |
154 | 149 | ||
155 | /** | 150 | /** |
156 | * ixgbe_read_phy_reg - Reads a value from a specified PHY register | 151 | * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register |
157 | * @hw: pointer to hardware structure | 152 | * @hw: pointer to hardware structure |
158 | * @reg_addr: 32 bit address of PHY register to read | 153 | * @reg_addr: 32 bit address of PHY register to read |
159 | * @phy_data: Pointer to read data from PHY register | 154 | * @phy_data: Pointer to read data from PHY register |
160 | **/ | 155 | **/ |
161 | s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | 156 | s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, |
162 | u32 device_type, u16 *phy_data) | 157 | u32 device_type, u16 *phy_data) |
163 | { | 158 | { |
164 | u32 command; | 159 | u32 command; |
165 | u32 i; | 160 | u32 i; |
166 | u32 timeout = 10; | ||
167 | u32 data; | 161 | u32 data; |
168 | s32 status = 0; | 162 | s32 status = 0; |
169 | u16 gssr; | 163 | u16 gssr; |
@@ -179,9 +173,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
179 | if (status == 0) { | 173 | if (status == 0) { |
180 | /* Setup and write the address cycle command */ | 174 | /* Setup and write the address cycle command */ |
181 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 175 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
182 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 176 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
183 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 177 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
184 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); | 178 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); |
185 | 179 | ||
186 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 180 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
187 | 181 | ||
@@ -190,7 +184,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
190 | * The MDI Command bit will clear when the operation is | 184 | * The MDI Command bit will clear when the operation is |
191 | * complete | 185 | * complete |
192 | */ | 186 | */ |
193 | for (i = 0; i < timeout; i++) { | 187 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
194 | udelay(10); | 188 | udelay(10); |
195 | 189 | ||
196 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 190 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
@@ -210,9 +204,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
210 | * command | 204 | * command |
211 | */ | 205 | */ |
212 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 206 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
213 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 207 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
214 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 208 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
215 | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); | 209 | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); |
216 | 210 | ||
217 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 211 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
218 | 212 | ||
@@ -221,7 +215,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
221 | * completed. The MDI Command bit will clear when the | 215 | * completed. The MDI Command bit will clear when the |
222 | * operation is complete | 216 | * operation is complete |
223 | */ | 217 | */ |
224 | for (i = 0; i < timeout; i++) { | 218 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
225 | udelay(10); | 219 | udelay(10); |
226 | 220 | ||
227 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 221 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
@@ -231,8 +225,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
231 | } | 225 | } |
232 | 226 | ||
233 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { | 227 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { |
234 | hw_dbg(hw, | 228 | hw_dbg(hw, "PHY read command didn't complete\n"); |
235 | "PHY read command didn't complete\n"); | ||
236 | status = IXGBE_ERR_PHY; | 229 | status = IXGBE_ERR_PHY; |
237 | } else { | 230 | } else { |
238 | /* | 231 | /* |
@@ -247,22 +240,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
247 | 240 | ||
248 | ixgbe_release_swfw_sync(hw, gssr); | 241 | ixgbe_release_swfw_sync(hw, gssr); |
249 | } | 242 | } |
243 | |||
250 | return status; | 244 | return status; |
251 | } | 245 | } |
252 | 246 | ||
253 | /** | 247 | /** |
254 | * ixgbe_write_phy_reg - Writes a value to specified PHY register | 248 | * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register |
255 | * @hw: pointer to hardware structure | 249 | * @hw: pointer to hardware structure |
256 | * @reg_addr: 32 bit PHY register to write | 250 | * @reg_addr: 32 bit PHY register to write |
257 | * @device_type: 5 bit device type | 251 | * @device_type: 5 bit device type |
258 | * @phy_data: Data to write to the PHY register | 252 | * @phy_data: Data to write to the PHY register |
259 | **/ | 253 | **/ |
260 | static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | 254 | s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, |
261 | u32 device_type, u16 phy_data) | 255 | u32 device_type, u16 phy_data) |
262 | { | 256 | { |
263 | u32 command; | 257 | u32 command; |
264 | u32 i; | 258 | u32 i; |
265 | u32 timeout = 10; | ||
266 | s32 status = 0; | 259 | s32 status = 0; |
267 | u16 gssr; | 260 | u16 gssr; |
268 | 261 | ||
@@ -280,9 +273,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
280 | 273 | ||
281 | /* Setup and write the address cycle command */ | 274 | /* Setup and write the address cycle command */ |
282 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 275 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
283 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 276 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
284 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 277 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
285 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); | 278 | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); |
286 | 279 | ||
287 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 280 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
288 | 281 | ||
@@ -291,19 +284,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
291 | * The MDI Command bit will clear when the operation is | 284 | * The MDI Command bit will clear when the operation is |
292 | * complete | 285 | * complete |
293 | */ | 286 | */ |
294 | for (i = 0; i < timeout; i++) { | 287 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
295 | udelay(10); | 288 | udelay(10); |
296 | 289 | ||
297 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 290 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
298 | 291 | ||
299 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { | 292 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) |
300 | hw_dbg(hw, "PHY address cmd didn't complete\n"); | ||
301 | break; | 293 | break; |
302 | } | ||
303 | } | 294 | } |
304 | 295 | ||
305 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) | 296 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { |
297 | hw_dbg(hw, "PHY address cmd didn't complete\n"); | ||
306 | status = IXGBE_ERR_PHY; | 298 | status = IXGBE_ERR_PHY; |
299 | } | ||
307 | 300 | ||
308 | if (status == 0) { | 301 | if (status == 0) { |
309 | /* | 302 | /* |
@@ -311,9 +304,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
311 | * command | 304 | * command |
312 | */ | 305 | */ |
313 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | | 306 | command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | |
314 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | | 307 | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | |
315 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | | 308 | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | |
316 | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); | 309 | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); |
317 | 310 | ||
318 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); | 311 | IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); |
319 | 312 | ||
@@ -322,20 +315,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
322 | * completed. The MDI Command bit will clear when the | 315 | * completed. The MDI Command bit will clear when the |
323 | * operation is complete | 316 | * operation is complete |
324 | */ | 317 | */ |
325 | for (i = 0; i < timeout; i++) { | 318 | for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { |
326 | udelay(10); | 319 | udelay(10); |
327 | 320 | ||
328 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); | 321 | command = IXGBE_READ_REG(hw, IXGBE_MSCA); |
329 | 322 | ||
330 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { | 323 | if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) |
331 | hw_dbg(hw, "PHY write command did not " | ||
332 | "complete.\n"); | ||
333 | break; | 324 | break; |
334 | } | ||
335 | } | 325 | } |
336 | 326 | ||
337 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) | 327 | if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { |
328 | hw_dbg(hw, "PHY address cmd didn't complete\n"); | ||
338 | status = IXGBE_ERR_PHY; | 329 | status = IXGBE_ERR_PHY; |
330 | } | ||
339 | } | 331 | } |
340 | 332 | ||
341 | ixgbe_release_swfw_sync(hw, gssr); | 333 | ixgbe_release_swfw_sync(hw, gssr); |
@@ -345,67 +337,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, | |||
345 | } | 337 | } |
346 | 338 | ||
347 | /** | 339 | /** |
348 | * ixgbe_setup_tnx_phy_link - Set and restart autoneg | 340 | * ixgbe_setup_phy_link_generic - Set and restart autoneg |
349 | * @hw: pointer to hardware structure | 341 | * @hw: pointer to hardware structure |
350 | * | 342 | * |
351 | * Restart autonegotiation and PHY and waits for completion. | 343 | * Restart autonegotiation and PHY and waits for completion. |
352 | **/ | 344 | **/ |
353 | s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) | 345 | s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) |
354 | { | 346 | { |
355 | s32 status = IXGBE_NOT_IMPLEMENTED; | 347 | s32 status = IXGBE_NOT_IMPLEMENTED; |
356 | u32 time_out; | 348 | u32 time_out; |
357 | u32 max_time_out = 10; | 349 | u32 max_time_out = 10; |
358 | u16 autoneg_speed_selection_register = 0x10; | 350 | u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; |
359 | u16 autoneg_restart_mask = 0x0200; | ||
360 | u16 autoneg_complete_mask = 0x0020; | ||
361 | u16 autoneg_reg = 0; | ||
362 | 351 | ||
363 | /* | 352 | /* |
364 | * Set advertisement settings in PHY based on autoneg_advertised | 353 | * Set advertisement settings in PHY based on autoneg_advertised |
365 | * settings. If autoneg_advertised = 0, then advertise default values | 354 | * settings. If autoneg_advertised = 0, then advertise default values |
366 | * txn devices cannot be "forced" to a autoneg 10G and fail. But can | 355 | * tnx devices cannot be "forced" to a autoneg 10G and fail. But can |
367 | * for a 1G. | 356 | * for a 1G. |
368 | */ | 357 | */ |
369 | ixgbe_read_phy_reg(hw, | 358 | hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, |
370 | autoneg_speed_selection_register, | 359 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); |
371 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
372 | &autoneg_reg); | ||
373 | 360 | ||
374 | if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) | 361 | if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) |
375 | autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ | 362 | autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ |
376 | else | 363 | else |
377 | autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ | 364 | autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ |
378 | 365 | ||
379 | ixgbe_write_phy_reg(hw, | 366 | hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, |
380 | autoneg_speed_selection_register, | 367 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); |
381 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
382 | autoneg_reg); | ||
383 | |||
384 | 368 | ||
385 | /* Restart PHY autonegotiation and wait for completion */ | 369 | /* Restart PHY autonegotiation and wait for completion */ |
386 | ixgbe_read_phy_reg(hw, | 370 | hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, |
387 | IXGBE_MDIO_AUTO_NEG_CONTROL, | 371 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); |
388 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
389 | &autoneg_reg); | ||
390 | 372 | ||
391 | autoneg_reg |= autoneg_restart_mask; | 373 | autoneg_reg |= IXGBE_MII_RESTART; |
392 | 374 | ||
393 | ixgbe_write_phy_reg(hw, | 375 | hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, |
394 | IXGBE_MDIO_AUTO_NEG_CONTROL, | 376 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); |
395 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | ||
396 | autoneg_reg); | ||
397 | 377 | ||
398 | /* Wait for autonegotiation to finish */ | 378 | /* Wait for autonegotiation to finish */ |
399 | for (time_out = 0; time_out < max_time_out; time_out++) { | 379 | for (time_out = 0; time_out < max_time_out; time_out++) { |
400 | udelay(10); | 380 | udelay(10); |
401 | /* Restart PHY autonegotiation and wait for completion */ | 381 | /* Restart PHY autonegotiation and wait for completion */ |
402 | status = ixgbe_read_phy_reg(hw, | 382 | status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, |
403 | IXGBE_MDIO_AUTO_NEG_STATUS, | 383 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, |
404 | IXGBE_MDIO_AUTO_NEG_DEV_TYPE, | 384 | &autoneg_reg); |
405 | &autoneg_reg); | ||
406 | 385 | ||
407 | autoneg_reg &= autoneg_complete_mask; | 386 | autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; |
408 | if (autoneg_reg == autoneg_complete_mask) { | 387 | if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { |
409 | status = 0; | 388 | status = 0; |
410 | break; | 389 | break; |
411 | } | 390 | } |
@@ -418,64 +397,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) | |||
418 | } | 397 | } |
419 | 398 | ||
420 | /** | 399 | /** |
421 | * ixgbe_check_tnx_phy_link - Determine link and speed status | 400 | * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities |
422 | * @hw: pointer to hardware structure | ||
423 | * | ||
424 | * Reads the VS1 register to determine if link is up and the current speed for | ||
425 | * the PHY. | ||
426 | **/ | ||
427 | s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, | ||
428 | bool *link_up) | ||
429 | { | ||
430 | s32 status = 0; | ||
431 | u32 time_out; | ||
432 | u32 max_time_out = 10; | ||
433 | u16 phy_link = 0; | ||
434 | u16 phy_speed = 0; | ||
435 | u16 phy_data = 0; | ||
436 | |||
437 | /* Initialize speed and link to default case */ | ||
438 | *link_up = false; | ||
439 | *speed = IXGBE_LINK_SPEED_10GB_FULL; | ||
440 | |||
441 | /* | ||
442 | * Check current speed and link status of the PHY register. | ||
443 | * This is a vendor specific register and may have to | ||
444 | * be changed for other copper PHYs. | ||
445 | */ | ||
446 | for (time_out = 0; time_out < max_time_out; time_out++) { | ||
447 | udelay(10); | ||
448 | if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { | ||
449 | *link_up = true; | ||
450 | if (phy_speed == | ||
451 | IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) | ||
452 | *speed = IXGBE_LINK_SPEED_1GB_FULL; | ||
453 | break; | ||
454 | } else { | ||
455 | status = ixgbe_read_phy_reg(hw, | ||
456 | IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, | ||
457 | IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, | ||
458 | &phy_data); | ||
459 | phy_link = phy_data & | ||
460 | IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; | ||
461 | phy_speed = phy_data & | ||
462 | IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | return status; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities | ||
471 | * @hw: pointer to hardware structure | 401 | * @hw: pointer to hardware structure |
472 | * @speed: new link speed | 402 | * @speed: new link speed |
473 | * @autoneg: true if autonegotiation enabled | 403 | * @autoneg: true if autonegotiation enabled |
474 | **/ | 404 | **/ |
475 | s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, | 405 | s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, |
476 | bool autoneg, | 406 | ixgbe_link_speed speed, |
477 | bool autoneg_wait_to_complete) | 407 | bool autoneg, |
408 | bool autoneg_wait_to_complete) | ||
478 | { | 409 | { |
410 | |||
479 | /* | 411 | /* |
480 | * Clear autoneg_advertised and set new values based on input link | 412 | * Clear autoneg_advertised and set new values based on input link |
481 | * speed. | 413 | * speed. |
@@ -484,11 +416,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, | |||
484 | 416 | ||
485 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) | 417 | if (speed & IXGBE_LINK_SPEED_10GB_FULL) |
486 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; | 418 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; |
419 | |||
487 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) | 420 | if (speed & IXGBE_LINK_SPEED_1GB_FULL) |
488 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; | 421 | hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; |
489 | 422 | ||
490 | /* Setup link based on the new speed settings */ | 423 | /* Setup link based on the new speed settings */ |
491 | ixgbe_setup_tnx_phy_link(hw); | 424 | hw->phy.ops.setup_link(hw); |
492 | 425 | ||
493 | return 0; | 426 | return 0; |
494 | } | 427 | } |
428 | |||