aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_phy.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_phy.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c166
1 files changed, 84 insertions, 82 deletions
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 14e9606aa3b3..453e966762f0 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -44,7 +44,6 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
44static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); 44static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
45static bool ixgbe_get_i2c_data(u32 *i2cctl); 45static bool ixgbe_get_i2c_data(u32 *i2cctl);
46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); 46static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
47static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
48static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 47static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
49static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 48static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
50 49
@@ -61,8 +60,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
61 60
62 if (hw->phy.type == ixgbe_phy_unknown) { 61 if (hw->phy.type == ixgbe_phy_unknown) {
63 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 62 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
64 if (ixgbe_validate_phy_addr(hw, phy_addr)) { 63 if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
65 hw->phy.addr = phy_addr;
66 ixgbe_get_phy_id(hw); 64 ixgbe_get_phy_id(hw);
67 hw->phy.type = 65 hw->phy.type =
68 ixgbe_get_phy_type_from_id(hw->phy.id); 66 ixgbe_get_phy_type_from_id(hw->phy.id);
@@ -78,26 +76,6 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
78} 76}
79 77
80/** 78/**
81 * ixgbe_validate_phy_addr - Determines phy address is valid
82 * @hw: pointer to hardware structure
83 *
84 **/
85static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
86{
87 u16 phy_id = 0;
88 bool valid = false;
89
90 hw->phy.addr = phy_addr;
91 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
92 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
93
94 if (phy_id != 0xFFFF && phy_id != 0x0)
95 valid = true;
96
97 return valid;
98}
99
100/**
101 * ixgbe_get_phy_id - Get the phy type 79 * ixgbe_get_phy_id - Get the phy type
102 * @hw: pointer to hardware structure 80 * @hw: pointer to hardware structure
103 * 81 *
@@ -108,14 +86,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
108 u16 phy_id_high = 0; 86 u16 phy_id_high = 0;
109 u16 phy_id_low = 0; 87 u16 phy_id_low = 0;
110 88
111 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, 89 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
112 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
113 &phy_id_high); 90 &phy_id_high);
114 91
115 if (status == 0) { 92 if (status == 0) {
116 hw->phy.id = (u32)(phy_id_high << 16); 93 hw->phy.id = (u32)(phy_id_high << 16);
117 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, 94 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
118 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
119 &phy_id_low); 95 &phy_id_low);
120 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 96 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
121 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 97 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
@@ -160,9 +136,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
160 * Perform soft PHY reset to the PHY_XS. 136 * Perform soft PHY reset to the PHY_XS.
161 * This will cause a soft reset to the PHY 137 * This will cause a soft reset to the PHY
162 */ 138 */
163 return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 139 return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
164 IXGBE_MDIO_PHY_XS_DEV_TYPE, 140 MDIO_CTRL1_RESET);
165 IXGBE_MDIO_PHY_XS_RESET);
166} 141}
167 142
168/** 143/**
@@ -192,7 +167,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
192 /* Setup and write the address cycle command */ 167 /* Setup and write the address cycle command */
193 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 168 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
194 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 169 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
195 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 170 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
196 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 171 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
197 172
198 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 173 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -223,7 +198,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
223 */ 198 */
224 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 199 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
225 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 200 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
226 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 201 (hw->phy.mdio.prtad <<
202 IXGBE_MSCA_PHY_ADDR_SHIFT) |
227 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); 203 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
228 204
229 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 205 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -292,7 +268,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
292 /* Setup and write the address cycle command */ 268 /* Setup and write the address cycle command */
293 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 269 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
294 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 270 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
295 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 271 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
296 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 272 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
297 273
298 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 274 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -323,7 +299,8 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
323 */ 299 */
324 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 300 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
325 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 301 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
326 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 302 (hw->phy.mdio.prtad <<
303 IXGBE_MSCA_PHY_ADDR_SHIFT) |
327 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); 304 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
328 305
329 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 306 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -365,7 +342,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
365 s32 status = IXGBE_NOT_IMPLEMENTED; 342 s32 status = IXGBE_NOT_IMPLEMENTED;
366 u32 time_out; 343 u32 time_out;
367 u32 max_time_out = 10; 344 u32 max_time_out = 10;
368 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 345 u16 autoneg_reg;
369 346
370 /* 347 /*
371 * Set advertisement settings in PHY based on autoneg_advertised 348 * Set advertisement settings in PHY based on autoneg_advertised
@@ -373,36 +350,31 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
373 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can 350 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
374 * for a 1G. 351 * for a 1G.
375 */ 352 */
376 hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, 353 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
377 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
378 354
379 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) 355 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
380 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ 356 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
381 else 357 else
382 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ 358 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
383 359
384 hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, 360 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
385 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
386 361
387 /* Restart PHY autonegotiation and wait for completion */ 362 /* Restart PHY autonegotiation and wait for completion */
388 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, 363 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg);
389 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
390 364
391 autoneg_reg |= IXGBE_MII_RESTART; 365 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
392 366
393 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, 367 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg);
394 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
395 368
396 /* Wait for autonegotiation to finish */ 369 /* Wait for autonegotiation to finish */
397 for (time_out = 0; time_out < max_time_out; time_out++) { 370 for (time_out = 0; time_out < max_time_out; time_out++) {
398 udelay(10); 371 udelay(10);
399 /* Restart PHY autonegotiation and wait for completion */ 372 /* Restart PHY autonegotiation and wait for completion */
400 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, 373 status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
401 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
402 &autoneg_reg); 374 &autoneg_reg);
403 375
404 autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; 376 autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
405 if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { 377 if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
406 status = 0; 378 status = 0;
407 break; 379 break;
408 } 380 }
@@ -457,23 +429,21 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
457 s32 ret_val = 0; 429 s32 ret_val = 0;
458 u32 i; 430 u32 i;
459 431
460 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 432 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
461 IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
462 433
463 /* reset the PHY and poll for completion */ 434 /* reset the PHY and poll for completion */
464 hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 435 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
465 IXGBE_MDIO_PHY_XS_DEV_TYPE, 436 (phy_data | MDIO_CTRL1_RESET));
466 (phy_data | IXGBE_MDIO_PHY_XS_RESET));
467 437
468 for (i = 0; i < 100; i++) { 438 for (i = 0; i < 100; i++) {
469 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 439 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
470 IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); 440 &phy_data);
471 if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) 441 if ((phy_data & MDIO_CTRL1_RESET) == 0)
472 break; 442 break;
473 msleep(10); 443 msleep(10);
474 } 444 }
475 445
476 if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { 446 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
477 hw_dbg(hw, "PHY reset did not complete.\n"); 447 hw_dbg(hw, "PHY reset did not complete.\n");
478 ret_val = IXGBE_ERR_PHY; 448 ret_val = IXGBE_ERR_PHY;
479 goto out; 449 goto out;
@@ -509,7 +479,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
509 for (i = 0; i < edata; i++) { 479 for (i = 0; i < edata; i++) {
510 hw->eeprom.ops.read(hw, data_offset, &eword); 480 hw->eeprom.ops.read(hw, data_offset, &eword);
511 hw->phy.ops.write_reg(hw, phy_offset, 481 hw->phy.ops.write_reg(hw, phy_offset,
512 IXGBE_TWINAX_DEV, eword); 482 MDIO_MMD_PMAPMD, eword);
513 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, 483 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
514 phy_offset); 484 phy_offset);
515 data_offset++; 485 data_offset++;
@@ -552,18 +522,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
552{ 522{
553 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 523 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
554 u32 vendor_oui = 0; 524 u32 vendor_oui = 0;
525 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
555 u8 identifier = 0; 526 u8 identifier = 0;
556 u8 comp_codes_1g = 0; 527 u8 comp_codes_1g = 0;
557 u8 comp_codes_10g = 0; 528 u8 comp_codes_10g = 0;
558 u8 oui_bytes[3] = {0, 0, 0}; 529 u8 oui_bytes[3] = {0, 0, 0};
559 u8 transmission_media = 0; 530 u8 cable_tech = 0;
560 u16 enforce_sfp = 0; 531 u16 enforce_sfp = 0;
561 532
533 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
534 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
535 status = IXGBE_ERR_SFP_NOT_PRESENT;
536 goto out;
537 }
538
562 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, 539 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
563 &identifier); 540 &identifier);
564 541
565 if (status == IXGBE_ERR_SFP_NOT_PRESENT) { 542 if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) {
543 status = IXGBE_ERR_SFP_NOT_PRESENT;
566 hw->phy.sfp_type = ixgbe_sfp_type_not_present; 544 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
545 if (hw->phy.type != ixgbe_phy_nl) {
546 hw->phy.id = 0;
547 hw->phy.type = ixgbe_phy_unknown;
548 }
567 goto out; 549 goto out;
568 } 550 }
569 551
@@ -572,8 +554,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
572 &comp_codes_1g); 554 &comp_codes_1g);
573 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, 555 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
574 &comp_codes_10g); 556 &comp_codes_10g);
575 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA, 557 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY,
576 &transmission_media); 558 &cable_tech);
577 559
578 /* ID Module 560 /* ID Module
579 * ========= 561 * =========
@@ -586,7 +568,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
586 * 6 SFP_SR/LR_CORE1 - 82599-specific 568 * 6 SFP_SR/LR_CORE1 - 82599-specific
587 */ 569 */
588 if (hw->mac.type == ixgbe_mac_82598EB) { 570 if (hw->mac.type == ixgbe_mac_82598EB) {
589 if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE) 571 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
590 hw->phy.sfp_type = ixgbe_sfp_type_da_cu; 572 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
591 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 573 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
592 hw->phy.sfp_type = ixgbe_sfp_type_sr; 574 hw->phy.sfp_type = ixgbe_sfp_type_sr;
@@ -595,7 +577,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
595 else 577 else
596 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 578 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
597 } else if (hw->mac.type == ixgbe_mac_82599EB) { 579 } else if (hw->mac.type == ixgbe_mac_82599EB) {
598 if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE) 580 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
599 if (hw->bus.lan_id == 0) 581 if (hw->bus.lan_id == 0)
600 hw->phy.sfp_type = 582 hw->phy.sfp_type =
601 ixgbe_sfp_type_da_cu_core0; 583 ixgbe_sfp_type_da_cu_core0;
@@ -620,8 +602,19 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
620 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 602 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
621 } 603 }
622 604
605 if (hw->phy.sfp_type != stored_sfp_type)
606 hw->phy.sfp_setup_needed = true;
607
608 /* Determine if the SFP+ PHY is dual speed or not. */
609 hw->phy.multispeed_fiber = false;
610 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
611 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
612 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
613 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
614 hw->phy.multispeed_fiber = true;
615
623 /* Determine PHY vendor */ 616 /* Determine PHY vendor */
624 if (hw->phy.type == ixgbe_phy_unknown) { 617 if (hw->phy.type != ixgbe_phy_nl) {
625 hw->phy.id = identifier; 618 hw->phy.id = identifier;
626 hw->phy.ops.read_i2c_eeprom(hw, 619 hw->phy.ops.read_i2c_eeprom(hw,
627 IXGBE_SFF_VENDOR_OUI_BYTE0, 620 IXGBE_SFF_VENDOR_OUI_BYTE0,
@@ -640,8 +633,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
640 633
641 switch (vendor_oui) { 634 switch (vendor_oui) {
642 case IXGBE_SFF_VENDOR_OUI_TYCO: 635 case IXGBE_SFF_VENDOR_OUI_TYCO:
643 if (transmission_media & 636 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
644 IXGBE_SFF_TWIN_AX_CAPABLE)
645 hw->phy.type = ixgbe_phy_tw_tyco; 637 hw->phy.type = ixgbe_phy_tw_tyco;
646 break; 638 break;
647 case IXGBE_SFF_VENDOR_OUI_FTL: 639 case IXGBE_SFF_VENDOR_OUI_FTL:
@@ -654,31 +646,42 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
654 hw->phy.type = ixgbe_phy_sfp_intel; 646 hw->phy.type = ixgbe_phy_sfp_intel;
655 break; 647 break;
656 default: 648 default:
657 if (transmission_media & 649 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
658 IXGBE_SFF_TWIN_AX_CAPABLE)
659 hw->phy.type = ixgbe_phy_tw_unknown; 650 hw->phy.type = ixgbe_phy_tw_unknown;
660 else 651 else
661 hw->phy.type = ixgbe_phy_sfp_unknown; 652 hw->phy.type = ixgbe_phy_sfp_unknown;
662 break; 653 break;
663 } 654 }
664 } 655 }
665 if (hw->mac.type == ixgbe_mac_82598EB || 656
666 (hw->phy.sfp_type != ixgbe_sfp_type_sr && 657 /* All passive DA cables are supported */
667 hw->phy.sfp_type != ixgbe_sfp_type_lr && 658 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
668 hw->phy.sfp_type != ixgbe_sfp_type_srlr_core0 && 659 status = 0;
669 hw->phy.sfp_type != ixgbe_sfp_type_srlr_core1)) { 660 goto out;
661 }
662
663 /* 1G SFP modules are not supported */
664 if (comp_codes_10g == 0) {
665 hw->phy.type = ixgbe_phy_sfp_unsupported;
666 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
667 goto out;
668 }
669
670 /* Anything else 82598-based is supported */
671 if (hw->mac.type == ixgbe_mac_82598EB) {
670 status = 0; 672 status = 0;
671 goto out; 673 goto out;
672 } 674 }
673 675
674 hw->eeprom.ops.read(hw, IXGBE_PHY_ENFORCE_INTEL_SFP_OFFSET, 676 /* This is guaranteed to be 82599, no need to check for NULL */
675 &enforce_sfp); 677 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
676 if (!(enforce_sfp & IXGBE_PHY_ALLOW_ANY_SFP)) { 678 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
677 /* Make sure we're a supported PHY type */ 679 /* Make sure we're a supported PHY type */
678 if (hw->phy.type == ixgbe_phy_sfp_intel) { 680 if (hw->phy.type == ixgbe_phy_sfp_intel) {
679 status = 0; 681 status = 0;
680 } else { 682 } else {
681 hw_dbg(hw, "SFP+ module not supported\n"); 683 hw_dbg(hw, "SFP+ module not supported\n");
684 hw->phy.type = ixgbe_phy_sfp_unsupported;
682 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 685 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
683 } 686 }
684 } else { 687 } else {
@@ -1279,7 +1282,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1279 udelay(10); 1282 udelay(10);
1280 status = hw->phy.ops.read_reg(hw, 1283 status = hw->phy.ops.read_reg(hw,
1281 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, 1284 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
1282 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 1285 MDIO_MMD_VEND1,
1283 &phy_data); 1286 &phy_data);
1284 phy_link = phy_data & 1287 phy_link = phy_data &
1285 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; 1288 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
@@ -1307,8 +1310,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1307{ 1310{
1308 s32 status = 0; 1311 s32 status = 0;
1309 1312
1310 status = hw->phy.ops.read_reg(hw, TNX_FW_REV, 1313 status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1,
1311 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
1312 firmware_version); 1314 firmware_version);
1313 1315
1314 return status; 1316 return status;