aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_phy.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_phy.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c241
1 files changed, 88 insertions, 153 deletions
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8002931ae82..63a70176241 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -33,32 +33,36 @@
33#include "ixgbe_common.h" 33#include "ixgbe_common.h"
34#include "ixgbe_phy.h" 34#include "ixgbe_phy.h"
35 35
36static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
36static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 37static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
37static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 38static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
38static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
39static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
40 u32 device_type, u16 phy_data);
41 39
42/** 40/**
43 * ixgbe_identify_phy - Get physical layer module 41 * ixgbe_identify_phy_generic - Get physical layer module
44 * @hw: pointer to hardware structure 42 * @hw: pointer to hardware structure
45 * 43 *
46 * Determines the physical layer module found on the current adapter. 44 * Determines the physical layer module found on the current adapter.
47 **/ 45 **/
48s32 ixgbe_identify_phy(struct ixgbe_hw *hw) 46s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
49{ 47{
50 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 48 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
51 u32 phy_addr; 49 u32 phy_addr;
52 50
53 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 51 if (hw->phy.type == ixgbe_phy_unknown) {
54 if (ixgbe_validate_phy_addr(hw, phy_addr)) { 52 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
55 hw->phy.addr = phy_addr; 53 if (ixgbe_validate_phy_addr(hw, phy_addr)) {
56 ixgbe_get_phy_id(hw); 54 hw->phy.addr = phy_addr;
57 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); 55 ixgbe_get_phy_id(hw);
58 status = 0; 56 hw->phy.type =
59 break; 57 ixgbe_get_phy_type_from_id(hw->phy.id);
58 status = 0;
59 break;
60 }
60 } 61 }
62 } else {
63 status = 0;
61 } 64 }
65
62 return status; 66 return status;
63} 67}
64 68
@@ -73,10 +77,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
73 bool valid = false; 77 bool valid = false;
74 78
75 hw->phy.addr = phy_addr; 79 hw->phy.addr = phy_addr;
76 ixgbe_read_phy_reg(hw, 80 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
77 IXGBE_MDIO_PHY_ID_HIGH, 81 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
78 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
79 &phy_id);
80 82
81 if (phy_id != 0xFFFF && phy_id != 0x0) 83 if (phy_id != 0xFFFF && phy_id != 0x0)
82 valid = true; 84 valid = true;
@@ -95,21 +97,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
95 u16 phy_id_high = 0; 97 u16 phy_id_high = 0;
96 u16 phy_id_low = 0; 98 u16 phy_id_low = 0;
97 99
98 status = ixgbe_read_phy_reg(hw, 100 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
99 IXGBE_MDIO_PHY_ID_HIGH, 101 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
100 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 102 &phy_id_high);
101 &phy_id_high);
102 103
103 if (status == 0) { 104 if (status == 0) {
104 hw->phy.id = (u32)(phy_id_high << 16); 105 hw->phy.id = (u32)(phy_id_high << 16);
105 status = ixgbe_read_phy_reg(hw, 106 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
106 IXGBE_MDIO_PHY_ID_LOW, 107 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
107 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 108 &phy_id_low);
108 &phy_id_low);
109 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 109 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
110 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 110 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
111 } 111 }
112
113 return status; 112 return status;
114} 113}
115 114
@@ -123,9 +122,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
123 enum ixgbe_phy_type phy_type; 122 enum ixgbe_phy_type phy_type;
124 123
125 switch (phy_id) { 124 switch (phy_id) {
126 case TN1010_PHY_ID:
127 phy_type = ixgbe_phy_tn;
128 break;
129 case QT2022_PHY_ID: 125 case QT2022_PHY_ID:
130 phy_type = ixgbe_phy_qt; 126 phy_type = ixgbe_phy_qt;
131 break; 127 break;
@@ -138,32 +134,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138} 134}
139 135
140/** 136/**
141 * ixgbe_reset_phy - Performs a PHY reset 137 * ixgbe_reset_phy_generic - Performs a PHY reset
142 * @hw: pointer to hardware structure 138 * @hw: pointer to hardware structure
143 **/ 139 **/
144s32 ixgbe_reset_phy(struct ixgbe_hw *hw) 140s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
145{ 141{
146 /* 142 /*
147 * Perform soft PHY reset to the PHY_XS. 143 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 144 * This will cause a soft reset to the PHY
149 */ 145 */
150 return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 146 return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
151 IXGBE_MDIO_PHY_XS_DEV_TYPE, 147 IXGBE_MDIO_PHY_XS_DEV_TYPE,
152 IXGBE_MDIO_PHY_XS_RESET); 148 IXGBE_MDIO_PHY_XS_RESET);
153} 149}
154 150
155/** 151/**
156 * ixgbe_read_phy_reg - Reads a value from a specified PHY register 152 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
157 * @hw: pointer to hardware structure 153 * @hw: pointer to hardware structure
158 * @reg_addr: 32 bit address of PHY register to read 154 * @reg_addr: 32 bit address of PHY register to read
159 * @phy_data: Pointer to read data from PHY register 155 * @phy_data: Pointer to read data from PHY register
160 **/ 156 **/
161s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 157s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
162 u32 device_type, u16 *phy_data) 158 u32 device_type, u16 *phy_data)
163{ 159{
164 u32 command; 160 u32 command;
165 u32 i; 161 u32 i;
166 u32 timeout = 10;
167 u32 data; 162 u32 data;
168 s32 status = 0; 163 s32 status = 0;
169 u16 gssr; 164 u16 gssr;
@@ -179,9 +174,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
179 if (status == 0) { 174 if (status == 0) {
180 /* Setup and write the address cycle command */ 175 /* Setup and write the address cycle command */
181 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 176 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
182 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 177 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
183 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 178 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
184 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 179 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
185 180
186 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 181 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
187 182
@@ -190,7 +185,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
190 * The MDI Command bit will clear when the operation is 185 * The MDI Command bit will clear when the operation is
191 * complete 186 * complete
192 */ 187 */
193 for (i = 0; i < timeout; i++) { 188 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
194 udelay(10); 189 udelay(10);
195 190
196 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 191 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -210,9 +205,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
210 * command 205 * command
211 */ 206 */
212 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 207 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
213 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 208 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
214 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 209 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
215 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); 210 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
216 211
217 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 212 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
218 213
@@ -221,7 +216,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
221 * completed. The MDI Command bit will clear when the 216 * completed. The MDI Command bit will clear when the
222 * operation is complete 217 * operation is complete
223 */ 218 */
224 for (i = 0; i < timeout; i++) { 219 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
225 udelay(10); 220 udelay(10);
226 221
227 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 222 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -231,8 +226,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
231 } 226 }
232 227
233 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 228 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
234 hw_dbg(hw, 229 hw_dbg(hw, "PHY read command didn't complete\n");
235 "PHY read command didn't complete\n");
236 status = IXGBE_ERR_PHY; 230 status = IXGBE_ERR_PHY;
237 } else { 231 } else {
238 /* 232 /*
@@ -247,22 +241,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
247 241
248 ixgbe_release_swfw_sync(hw, gssr); 242 ixgbe_release_swfw_sync(hw, gssr);
249 } 243 }
244
250 return status; 245 return status;
251} 246}
252 247
253/** 248/**
254 * ixgbe_write_phy_reg - Writes a value to specified PHY register 249 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
255 * @hw: pointer to hardware structure 250 * @hw: pointer to hardware structure
256 * @reg_addr: 32 bit PHY register to write 251 * @reg_addr: 32 bit PHY register to write
257 * @device_type: 5 bit device type 252 * @device_type: 5 bit device type
258 * @phy_data: Data to write to the PHY register 253 * @phy_data: Data to write to the PHY register
259 **/ 254 **/
260static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 255s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
261 u32 device_type, u16 phy_data) 256 u32 device_type, u16 phy_data)
262{ 257{
263 u32 command; 258 u32 command;
264 u32 i; 259 u32 i;
265 u32 timeout = 10;
266 s32 status = 0; 260 s32 status = 0;
267 u16 gssr; 261 u16 gssr;
268 262
@@ -280,9 +274,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
280 274
281 /* Setup and write the address cycle command */ 275 /* Setup and write the address cycle command */
282 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 276 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
283 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 277 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
284 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 278 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
285 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 279 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
286 280
287 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 281 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
288 282
@@ -291,19 +285,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
291 * The MDI Command bit will clear when the operation is 285 * The MDI Command bit will clear when the operation is
292 * complete 286 * complete
293 */ 287 */
294 for (i = 0; i < timeout; i++) { 288 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
295 udelay(10); 289 udelay(10);
296 290
297 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 291 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
298 292
299 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 293 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
300 hw_dbg(hw, "PHY address cmd didn't complete\n");
301 break; 294 break;
302 }
303 } 295 }
304 296
305 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 297 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
298 hw_dbg(hw, "PHY address cmd didn't complete\n");
306 status = IXGBE_ERR_PHY; 299 status = IXGBE_ERR_PHY;
300 }
307 301
308 if (status == 0) { 302 if (status == 0) {
309 /* 303 /*
@@ -311,9 +305,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
311 * command 305 * command
312 */ 306 */
313 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 307 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
314 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 308 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
315 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 309 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
316 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); 310 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
317 311
318 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 312 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
319 313
@@ -322,20 +316,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
322 * completed. The MDI Command bit will clear when the 316 * completed. The MDI Command bit will clear when the
323 * operation is complete 317 * operation is complete
324 */ 318 */
325 for (i = 0; i < timeout; i++) { 319 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
326 udelay(10); 320 udelay(10);
327 321
328 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 322 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
329 323
330 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 324 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
331 hw_dbg(hw, "PHY write command did not "
332 "complete.\n");
333 break; 325 break;
334 }
335 } 326 }
336 327
337 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 328 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
329 hw_dbg(hw, "PHY address cmd didn't complete\n");
338 status = IXGBE_ERR_PHY; 330 status = IXGBE_ERR_PHY;
331 }
339 } 332 }
340 333
341 ixgbe_release_swfw_sync(hw, gssr); 334 ixgbe_release_swfw_sync(hw, gssr);
@@ -345,67 +338,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
345} 338}
346 339
347/** 340/**
348 * ixgbe_setup_tnx_phy_link - Set and restart autoneg 341 * ixgbe_setup_phy_link_generic - Set and restart autoneg
349 * @hw: pointer to hardware structure 342 * @hw: pointer to hardware structure
350 * 343 *
351 * Restart autonegotiation and PHY and waits for completion. 344 * Restart autonegotiation and PHY and waits for completion.
352 **/ 345 **/
353s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) 346s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
354{ 347{
355 s32 status = IXGBE_NOT_IMPLEMENTED; 348 s32 status = IXGBE_NOT_IMPLEMENTED;
356 u32 time_out; 349 u32 time_out;
357 u32 max_time_out = 10; 350 u32 max_time_out = 10;
358 u16 autoneg_speed_selection_register = 0x10; 351 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
359 u16 autoneg_restart_mask = 0x0200;
360 u16 autoneg_complete_mask = 0x0020;
361 u16 autoneg_reg = 0;
362 352
363 /* 353 /*
364 * Set advertisement settings in PHY based on autoneg_advertised 354 * Set advertisement settings in PHY based on autoneg_advertised
365 * settings. If autoneg_advertised = 0, then advertise default values 355 * settings. If autoneg_advertised = 0, then advertise default values
366 * txn devices cannot be "forced" to a autoneg 10G and fail. But can 356 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
367 * for a 1G. 357 * for a 1G.
368 */ 358 */
369 ixgbe_read_phy_reg(hw, 359 hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
370 autoneg_speed_selection_register, 360 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
372 &autoneg_reg);
373 361
374 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) 362 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
375 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ 363 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
376 else 364 else
377 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ 365 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
378 366
379 ixgbe_write_phy_reg(hw, 367 hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
380 autoneg_speed_selection_register, 368 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
381 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
382 autoneg_reg);
383
384 369
385 /* Restart PHY autonegotiation and wait for completion */ 370 /* Restart PHY autonegotiation and wait for completion */
386 ixgbe_read_phy_reg(hw, 371 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
387 IXGBE_MDIO_AUTO_NEG_CONTROL, 372 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
388 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
389 &autoneg_reg);
390 373
391 autoneg_reg |= autoneg_restart_mask; 374 autoneg_reg |= IXGBE_MII_RESTART;
392 375
393 ixgbe_write_phy_reg(hw, 376 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
394 IXGBE_MDIO_AUTO_NEG_CONTROL, 377 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
395 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
396 autoneg_reg);
397 378
398 /* Wait for autonegotiation to finish */ 379 /* Wait for autonegotiation to finish */
399 for (time_out = 0; time_out < max_time_out; time_out++) { 380 for (time_out = 0; time_out < max_time_out; time_out++) {
400 udelay(10); 381 udelay(10);
401 /* Restart PHY autonegotiation and wait for completion */ 382 /* Restart PHY autonegotiation and wait for completion */
402 status = ixgbe_read_phy_reg(hw, 383 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
403 IXGBE_MDIO_AUTO_NEG_STATUS, 384 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
404 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 385 &autoneg_reg);
405 &autoneg_reg);
406 386
407 autoneg_reg &= autoneg_complete_mask; 387 autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
408 if (autoneg_reg == autoneg_complete_mask) { 388 if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
409 status = 0; 389 status = 0;
410 break; 390 break;
411 } 391 }
@@ -418,64 +398,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
418} 398}
419 399
420/** 400/**
421 * ixgbe_check_tnx_phy_link - Determine link and speed status 401 * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
422 * @hw: pointer to hardware structure
423 *
424 * Reads the VS1 register to determine if link is up and the current speed for
425 * the PHY.
426 **/
427s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
428 bool *link_up)
429{
430 s32 status = 0;
431 u32 time_out;
432 u32 max_time_out = 10;
433 u16 phy_link = 0;
434 u16 phy_speed = 0;
435 u16 phy_data = 0;
436
437 /* Initialize speed and link to default case */
438 *link_up = false;
439 *speed = IXGBE_LINK_SPEED_10GB_FULL;
440
441 /*
442 * Check current speed and link status of the PHY register.
443 * This is a vendor specific register and may have to
444 * be changed for other copper PHYs.
445 */
446 for (time_out = 0; time_out < max_time_out; time_out++) {
447 udelay(10);
448 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
449 *link_up = true;
450 if (phy_speed ==
451 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
452 *speed = IXGBE_LINK_SPEED_1GB_FULL;
453 break;
454 } else {
455 status = ixgbe_read_phy_reg(hw,
456 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
457 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
458 &phy_data);
459 phy_link = phy_data &
460 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
461 phy_speed = phy_data &
462 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
463 }
464 }
465
466 return status;
467}
468
469/**
470 * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
471 * @hw: pointer to hardware structure 402 * @hw: pointer to hardware structure
472 * @speed: new link speed 403 * @speed: new link speed
473 * @autoneg: true if autonegotiation enabled 404 * @autoneg: true if autonegotiation enabled
474 **/ 405 **/
475s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, 406s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
476 bool autoneg, 407 ixgbe_link_speed speed,
477 bool autoneg_wait_to_complete) 408 bool autoneg,
409 bool autoneg_wait_to_complete)
478{ 410{
411
479 /* 412 /*
480 * Clear autoneg_advertised and set new values based on input link 413 * Clear autoneg_advertised and set new values based on input link
481 * speed. 414 * speed.
@@ -484,11 +417,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
484 417
485 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 418 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
486 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 419 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
420
487 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 421 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
488 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 422 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
489 423
490 /* Setup link based on the new speed settings */ 424 /* Setup link based on the new speed settings */
491 ixgbe_setup_tnx_phy_link(hw); 425 hw->phy.ops.setup_link(hw);
492 426
493 return 0; 427 return 0;
494} 428}
429