aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorAuke Kok <auke-jan.h.kok@intel.com>2008-06-27 14:00:18 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-07-04 08:46:59 -0400
commit652fff321490fc3fcc8e8d302826a9c2379f03d2 (patch)
tree00e9675980cbee174305533dd3d0274715afa5e6 /drivers/net/igb
parentd67ce5338c7c71313f01e508d893bb8104ce459a (diff)
igb: eliminate hw from the hw_dbg macro arguments
Various cosmetic cleanups. Comment fixes. Eliminate the hw part out of the hw_dbg macro since it's always used. Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/e1000_82575.c115
-rw-r--r--drivers/net/igb/e1000_82575.h6
-rw-r--r--drivers/net/igb/e1000_defines.h27
-rw-r--r--drivers/net/igb/e1000_hw.h8
-rw-r--r--drivers/net/igb/e1000_mac.c75
-rw-r--r--drivers/net/igb/e1000_nvm.c22
-rw-r--r--drivers/net/igb/e1000_phy.c76
7 files changed, 147 insertions, 182 deletions
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index e6dd387fdb0e..84ef695ccaca 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -272,7 +272,7 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
272 u32 i, i2ccmd = 0; 272 u32 i, i2ccmd = 0;
273 273
274 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 274 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
275 hw_dbg(hw, "PHY Address %u is out of range\n", offset); 275 hw_dbg("PHY Address %u is out of range\n", offset);
276 return -E1000_ERR_PARAM; 276 return -E1000_ERR_PARAM;
277 } 277 }
278 278
@@ -295,11 +295,11 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
295 break; 295 break;
296 } 296 }
297 if (!(i2ccmd & E1000_I2CCMD_READY)) { 297 if (!(i2ccmd & E1000_I2CCMD_READY)) {
298 hw_dbg(hw, "I2CCMD Read did not complete\n"); 298 hw_dbg("I2CCMD Read did not complete\n");
299 return -E1000_ERR_PHY; 299 return -E1000_ERR_PHY;
300 } 300 }
301 if (i2ccmd & E1000_I2CCMD_ERROR) { 301 if (i2ccmd & E1000_I2CCMD_ERROR) {
302 hw_dbg(hw, "I2CCMD Error bit set\n"); 302 hw_dbg("I2CCMD Error bit set\n");
303 return -E1000_ERR_PHY; 303 return -E1000_ERR_PHY;
304 } 304 }
305 305
@@ -326,7 +326,7 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
326 u16 phy_data_swapped; 326 u16 phy_data_swapped;
327 327
328 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 328 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
329 hw_dbg(hw, "PHY Address %d is out of range\n", offset); 329 hw_dbg("PHY Address %d is out of range\n", offset);
330 return -E1000_ERR_PARAM; 330 return -E1000_ERR_PARAM;
331 } 331 }
332 332
@@ -353,11 +353,11 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
353 break; 353 break;
354 } 354 }
355 if (!(i2ccmd & E1000_I2CCMD_READY)) { 355 if (!(i2ccmd & E1000_I2CCMD_READY)) {
356 hw_dbg(hw, "I2CCMD Write did not complete\n"); 356 hw_dbg("I2CCMD Write did not complete\n");
357 return -E1000_ERR_PHY; 357 return -E1000_ERR_PHY;
358 } 358 }
359 if (i2ccmd & E1000_I2CCMD_ERROR) { 359 if (i2ccmd & E1000_I2CCMD_ERROR) {
360 hw_dbg(hw, "I2CCMD Error bit set\n"); 360 hw_dbg("I2CCMD Error bit set\n");
361 return -E1000_ERR_PHY; 361 return -E1000_ERR_PHY;
362 } 362 }
363 363
@@ -368,7 +368,7 @@ static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
368 * igb_get_phy_id_82575 - Retrieve PHY addr and id 368 * igb_get_phy_id_82575 - Retrieve PHY addr and id
369 * @hw: pointer to the HW structure 369 * @hw: pointer to the HW structure
370 * 370 *
371 * Retreives the PHY address and ID for both PHY's which do and do not use 371 * Retrieves the PHY address and ID for both PHY's which do and do not use
372 * sgmi interface. 372 * sgmi interface.
373 **/ 373 **/
374static s32 igb_get_phy_id_82575(struct e1000_hw *hw) 374static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
@@ -397,9 +397,8 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
397 for (phy->addr = 1; phy->addr < 8; phy->addr++) { 397 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
398 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); 398 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
399 if (ret_val == 0) { 399 if (ret_val == 0) {
400 hw_dbg(hw, "Vendor ID 0x%08X read at address %u\n", 400 hw_dbg("Vendor ID 0x%08X read at address %u\n",
401 phy_id, 401 phy_id, phy->addr);
402 phy->addr);
403 /* 402 /*
404 * At the time of this writing, The M88 part is 403 * At the time of this writing, The M88 part is
405 * the only supported SGMII PHY product. 404 * the only supported SGMII PHY product.
@@ -407,8 +406,7 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
407 if (phy_id == M88_VENDOR) 406 if (phy_id == M88_VENDOR)
408 break; 407 break;
409 } else { 408 } else {
410 hw_dbg(hw, "PHY address %u was unreadable\n", 409 hw_dbg("PHY address %u was unreadable\n", phy->addr);
411 phy->addr);
412 } 410 }
413 } 411 }
414 412
@@ -440,7 +438,7 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
440 * available to us at this time. 438 * available to us at this time.
441 */ 439 */
442 440
443 hw_dbg(hw, "Soft resetting SGMII attached PHY...\n"); 441 hw_dbg("Soft resetting SGMII attached PHY...\n");
444 442
445 /* 443 /*
446 * SFP documentation requires the following to configure the SPF module 444 * SFP documentation requires the following to configure the SPF module
@@ -475,34 +473,29 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
475 s32 ret_val; 473 s32 ret_val;
476 u16 data; 474 u16 data;
477 475
478 ret_val = hw->phy.ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 476 ret_val = phy->ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
479 &data);
480 if (ret_val) 477 if (ret_val)
481 goto out; 478 goto out;
482 479
483 if (active) { 480 if (active) {
484 data |= IGP02E1000_PM_D0_LPLU; 481 data |= IGP02E1000_PM_D0_LPLU;
485 ret_val = hw->phy.ops.write_phy_reg(hw, 482 ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
486 IGP02E1000_PHY_POWER_MGMT, 483 data);
487 data);
488 if (ret_val) 484 if (ret_val)
489 goto out; 485 goto out;
490 486
491 /* When LPLU is enabled, we should disable SmartSpeed */ 487 /* When LPLU is enabled, we should disable SmartSpeed */
492 ret_val = hw->phy.ops.read_phy_reg(hw, 488 ret_val = phy->ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
493 IGP01E1000_PHY_PORT_CONFIG, 489 &data);
494 &data);
495 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 490 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
496 ret_val = hw->phy.ops.write_phy_reg(hw, 491 ret_val = phy->ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
497 IGP01E1000_PHY_PORT_CONFIG, 492 data);
498 data);
499 if (ret_val) 493 if (ret_val)
500 goto out; 494 goto out;
501 } else { 495 } else {
502 data &= ~IGP02E1000_PM_D0_LPLU; 496 data &= ~IGP02E1000_PM_D0_LPLU;
503 ret_val = hw->phy.ops.write_phy_reg(hw, 497 ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
504 IGP02E1000_PHY_POWER_MGMT, 498 data);
505 data);
506 /* 499 /*
507 * LPLU and SmartSpeed are mutually exclusive. LPLU is used 500 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
508 * during Dx states where the power conservation is most 501 * during Dx states where the power conservation is most
@@ -510,29 +503,25 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
510 * SmartSpeed, so performance is maintained. 503 * SmartSpeed, so performance is maintained.
511 */ 504 */
512 if (phy->smart_speed == e1000_smart_speed_on) { 505 if (phy->smart_speed == e1000_smart_speed_on) {
513 ret_val = hw->phy.ops.read_phy_reg(hw, 506 ret_val = phy->ops.read_phy_reg(hw,
514 IGP01E1000_PHY_PORT_CONFIG, 507 IGP01E1000_PHY_PORT_CONFIG, &data);
515 &data);
516 if (ret_val) 508 if (ret_val)
517 goto out; 509 goto out;
518 510
519 data |= IGP01E1000_PSCFR_SMART_SPEED; 511 data |= IGP01E1000_PSCFR_SMART_SPEED;
520 ret_val = hw->phy.ops.write_phy_reg(hw, 512 ret_val = phy->ops.write_phy_reg(hw,
521 IGP01E1000_PHY_PORT_CONFIG, 513 IGP01E1000_PHY_PORT_CONFIG, data);
522 data);
523 if (ret_val) 514 if (ret_val)
524 goto out; 515 goto out;
525 } else if (phy->smart_speed == e1000_smart_speed_off) { 516 } else if (phy->smart_speed == e1000_smart_speed_off) {
526 ret_val = hw->phy.ops.read_phy_reg(hw, 517 ret_val = phy->ops.read_phy_reg(hw,
527 IGP01E1000_PHY_PORT_CONFIG, 518 IGP01E1000_PHY_PORT_CONFIG, &data);
528 &data);
529 if (ret_val) 519 if (ret_val)
530 goto out; 520 goto out;
531 521
532 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 522 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
533 ret_val = hw->phy.ops.write_phy_reg(hw, 523 ret_val = phy->ops.write_phy_reg(hw,
534 IGP01E1000_PHY_PORT_CONFIG, 524 IGP01E1000_PHY_PORT_CONFIG, data);
535 data);
536 if (ret_val) 525 if (ret_val)
537 goto out; 526 goto out;
538 } 527 }
@@ -546,7 +535,7 @@ out:
546 * igb_acquire_nvm_82575 - Request for access to EEPROM 535 * igb_acquire_nvm_82575 - Request for access to EEPROM
547 * @hw: pointer to the HW structure 536 * @hw: pointer to the HW structure
548 * 537 *
549 * Acquire the necessary semaphores for exclussive access to the EEPROM. 538 * Acquire the necessary semaphores for exclusive access to the EEPROM.
550 * Set the EEPROM access request bit and wait for EEPROM access grant bit. 539 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
551 * Return successful if access grant bit set, else clear the request for 540 * Return successful if access grant bit set, else clear the request for
552 * EEPROM access and return -E1000_ERR_NVM (-1). 541 * EEPROM access and return -E1000_ERR_NVM (-1).
@@ -617,7 +606,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
617 } 606 }
618 607
619 if (i == timeout) { 608 if (i == timeout) {
620 hw_dbg(hw, "Can't access resource, SW_FW_SYNC timeout.\n"); 609 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
621 ret_val = -E1000_ERR_SWFW_SYNC; 610 ret_val = -E1000_ERR_SWFW_SYNC;
622 goto out; 611 goto out;
623 } 612 }
@@ -679,7 +668,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
679 timeout--; 668 timeout--;
680 } 669 }
681 if (!timeout) 670 if (!timeout)
682 hw_dbg(hw, "MNG configuration cycle has not completed.\n"); 671 hw_dbg("MNG configuration cycle has not completed.\n");
683 672
684 /* If EEPROM is not marked present, init the PHY manually */ 673 /* If EEPROM is not marked present, init the PHY manually */
685 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) && 674 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
@@ -718,7 +707,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
718 * @speed: stores the current speed 707 * @speed: stores the current speed
719 * @duplex: stores the current duplex 708 * @duplex: stores the current duplex
720 * 709 *
721 * Using the physical coding sub-layer (PCS), retreive the current speed and 710 * Using the physical coding sub-layer (PCS), retrieve the current speed and
722 * duplex, then store the values in the pointers provided. 711 * duplex, then store the values in the pointers provided.
723 **/ 712 **/
724static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, 713static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
@@ -802,9 +791,9 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
802 */ 791 */
803 ret_val = igb_disable_pcie_master(hw); 792 ret_val = igb_disable_pcie_master(hw);
804 if (ret_val) 793 if (ret_val)
805 hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); 794 hw_dbg("PCI-E Master disable polling has failed.\n");
806 795
807 hw_dbg(hw, "Masking off all interrupts\n"); 796 hw_dbg("Masking off all interrupts\n");
808 wr32(E1000_IMC, 0xffffffff); 797 wr32(E1000_IMC, 0xffffffff);
809 798
810 wr32(E1000_RCTL, 0); 799 wr32(E1000_RCTL, 0);
@@ -815,7 +804,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
815 804
816 ctrl = rd32(E1000_CTRL); 805 ctrl = rd32(E1000_CTRL);
817 806
818 hw_dbg(hw, "Issuing a global reset to MAC\n"); 807 hw_dbg("Issuing a global reset to MAC\n");
819 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST); 808 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
820 809
821 ret_val = igb_get_auto_rd_done(hw); 810 ret_val = igb_get_auto_rd_done(hw);
@@ -825,7 +814,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
825 * return with an error. This can happen in situations 814 * return with an error. This can happen in situations
826 * where there is no eeprom and prevents getting link. 815 * where there is no eeprom and prevents getting link.
827 */ 816 */
828 hw_dbg(hw, "Auto Read Done did not complete\n"); 817 hw_dbg("Auto Read Done did not complete\n");
829 } 818 }
830 819
831 /* If EEPROM is not present, run manual init scripts */ 820 /* If EEPROM is not present, run manual init scripts */
@@ -856,18 +845,18 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
856 /* Initialize identification LED */ 845 /* Initialize identification LED */
857 ret_val = igb_id_led_init(hw); 846 ret_val = igb_id_led_init(hw);
858 if (ret_val) { 847 if (ret_val) {
859 hw_dbg(hw, "Error initializing identification LED\n"); 848 hw_dbg("Error initializing identification LED\n");
860 /* This is not fatal and we should not stop init due to this */ 849 /* This is not fatal and we should not stop init due to this */
861 } 850 }
862 851
863 /* Disabling VLAN filtering */ 852 /* Disabling VLAN filtering */
864 hw_dbg(hw, "Initializing the IEEE VLAN\n"); 853 hw_dbg("Initializing the IEEE VLAN\n");
865 igb_clear_vfta(hw); 854 igb_clear_vfta(hw);
866 855
867 /* Setup the receive address */ 856 /* Setup the receive address */
868 igb_init_rx_addrs(hw, rar_count); 857 igb_init_rx_addrs(hw, rar_count);
869 /* Zero out the Multicast HASH table */ 858 /* Zero out the Multicast HASH table */
870 hw_dbg(hw, "Zeroing the MTA\n"); 859 hw_dbg("Zeroing the MTA\n");
871 for (i = 0; i < mac->mta_reg_count; i++) 860 for (i = 0; i < mac->mta_reg_count; i++)
872 array_wr32(E1000_MTA, i, 0); 861 array_wr32(E1000_MTA, i, 0);
873 862
@@ -937,10 +926,10 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
937 * PHY will be set to 10H, 10F, 100H or 100F 926 * PHY will be set to 10H, 10F, 100H or 100F
938 * depending on user settings. 927 * depending on user settings.
939 */ 928 */
940 hw_dbg(hw, "Forcing Speed and Duplex\n"); 929 hw_dbg("Forcing Speed and Duplex\n");
941 ret_val = igb_phy_force_speed_duplex(hw); 930 ret_val = igb_phy_force_speed_duplex(hw);
942 if (ret_val) { 931 if (ret_val) {
943 hw_dbg(hw, "Error Forcing Speed and Duplex\n"); 932 hw_dbg("Error Forcing Speed and Duplex\n");
944 goto out; 933 goto out;
945 } 934 }
946 } 935 }
@@ -953,20 +942,17 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
953 * Check link status. Wait up to 100 microseconds for link to become 942 * Check link status. Wait up to 100 microseconds for link to become
954 * valid. 943 * valid.
955 */ 944 */
956 ret_val = igb_phy_has_link(hw, 945 ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
957 COPPER_LINK_UP_LIMIT,
958 10,
959 &link);
960 if (ret_val) 946 if (ret_val)
961 goto out; 947 goto out;
962 948
963 if (link) { 949 if (link) {
964 hw_dbg(hw, "Valid link established!!!\n"); 950 hw_dbg("Valid link established!!!\n");
965 /* Config the MAC and PHY after link is up */ 951 /* Config the MAC and PHY after link is up */
966 igb_config_collision_dist(hw); 952 igb_config_collision_dist(hw);
967 ret_val = igb_config_fc_after_link_up(hw); 953 ret_val = igb_config_fc_after_link_up(hw);
968 } else { 954 } else {
969 hw_dbg(hw, "Unable to establish link!!!\n"); 955 hw_dbg("Unable to establish link!!!\n");
970 } 956 }
971 957
972out: 958out:
@@ -1022,7 +1008,7 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1022 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1008 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1023 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1009 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1024 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1010 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1025 hw_dbg(hw, "Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); 1011 hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
1026 } else { 1012 } else {
1027 /* Set PCS register for forced speed */ 1013 /* Set PCS register for forced speed */
1028 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ 1014 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */
@@ -1030,7 +1016,7 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
1030 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1016 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */
1031 E1000_PCS_LCTL_FSD | /* Force Speed */ 1017 E1000_PCS_LCTL_FSD | /* Force Speed */
1032 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1018 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */
1033 hw_dbg(hw, "Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1019 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1034 } 1020 }
1035 wr32(E1000_PCS_LCTL, reg); 1021 wr32(E1000_PCS_LCTL, reg);
1036 1022
@@ -1071,7 +1057,7 @@ static s32 igb_configure_pcs_link_82575(struct e1000_hw *hw)
1071 */ 1057 */
1072 reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE; 1058 reg |= E1000_PCS_LCTL_AN_RESTART | E1000_PCS_LCTL_AN_ENABLE;
1073 } else { 1059 } else {
1074 /* Set PCS regiseter for forced speed */ 1060 /* Set PCS register for forced speed */
1075 1061
1076 /* Turn off bits for full duplex, speed, and autoneg */ 1062 /* Turn off bits for full duplex, speed, and autoneg */
1077 reg &= ~(E1000_PCS_LCTL_FSV_1000 | 1063 reg &= ~(E1000_PCS_LCTL_FSV_1000 |
@@ -1092,8 +1078,7 @@ static s32 igb_configure_pcs_link_82575(struct e1000_hw *hw)
1092 E1000_PCS_LCTL_FORCE_LINK | 1078 E1000_PCS_LCTL_FORCE_LINK |
1093 E1000_PCS_LCTL_FLV_LINK_UP; 1079 E1000_PCS_LCTL_FLV_LINK_UP;
1094 1080
1095 hw_dbg(hw, 1081 hw_dbg("Wrote 0x%08X to PCS_LCTL to configure forced link\n",
1096 "Wrote 0x%08X to PCS_LCTL to configure forced link\n",
1097 reg); 1082 reg);
1098 } 1083 }
1099 wr32(E1000_PCS_LCTL, reg); 1084 wr32(E1000_PCS_LCTL, reg);
@@ -1138,7 +1123,7 @@ out:
1138static s32 igb_reset_init_script_82575(struct e1000_hw *hw) 1123static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1139{ 1124{
1140 if (hw->mac.type == e1000_82575) { 1125 if (hw->mac.type == e1000_82575) {
1141 hw_dbg(hw, "Running reset init script for 82575\n"); 1126 hw_dbg("Running reset init script for 82575\n");
1142 /* SerDes configuration via SERDESCTRL */ 1127 /* SerDes configuration via SERDESCTRL */
1143 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C); 1128 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1144 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78); 1129 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index 76ea846663db..5fb79dd0e940 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -56,7 +56,7 @@
56#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE 56#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
57#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE 57#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
58 58
59/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ 59/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
60 60
61/* Receive Descriptor - Advanced */ 61/* Receive Descriptor - Advanced */
62union e1000_adv_rx_desc { 62union e1000_adv_rx_desc {
@@ -145,6 +145,6 @@ struct e1000_adv_tx_context_desc {
145 145
146 146
147 147
148#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ 148#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
149 149
150#endif 150#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 8da9ffedc425..1006d53fd688 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation. 4 Copyright(c) 2007 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -91,12 +91,12 @@
91#define E1000_MAX_SGMII_PHY_REG_ADDR 255 91#define E1000_MAX_SGMII_PHY_REG_ADDR 255
92#define E1000_I2CCMD_PHY_TIMEOUT 200 92#define E1000_I2CCMD_PHY_TIMEOUT 200
93 93
94/* Receive Decriptor bit definitions */ 94/* Receive Descriptor bit definitions */
95#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 95#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
96#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 96#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
97#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 97#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
98#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 98#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
99#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 99#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
100#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 100#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
101#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ 101#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
102#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 102#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
@@ -379,7 +379,7 @@
379#define E1000_ICR_RXO 0x00000040 /* rx overrun */ 379#define E1000_ICR_RXO 0x00000040 /* rx overrun */
380#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 380#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
381#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ 381#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
382#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ 382#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
383#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ 383#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
384#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ 384#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
385#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ 385#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
@@ -443,12 +443,6 @@
443#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 443#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
444#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 444#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
445#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 445#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
446/* queue 0 Rx descriptor FIFO parity error */
447/* queue 0 Tx descriptor FIFO parity error */
448/* host arb read buffer parity error */
449/* packet buffer parity error */
450/* queue 1 Rx descriptor FIFO parity error */
451/* queue 1 Tx descriptor FIFO parity error */
452 446
453/* Extended Interrupt Mask Set */ 447/* Extended Interrupt Mask Set */
454#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ 448#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
@@ -457,12 +451,6 @@
457/* Interrupt Cause Set */ 451/* Interrupt Cause Set */
458#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 452#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
459#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 453#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
460/* queue 0 Rx descriptor FIFO parity error */
461/* queue 0 Tx descriptor FIFO parity error */
462/* host arb read buffer parity error */
463/* packet buffer parity error */
464/* queue 1 Rx descriptor FIFO parity error */
465/* queue 1 Tx descriptor FIFO parity error */
466 454
467/* Extended Interrupt Cause Set */ 455/* Extended Interrupt Cause Set */
468 456
@@ -567,7 +555,6 @@
567/* 1000BASE-T Control Register */ 555/* 1000BASE-T Control Register */
568#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 556#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
569#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ 557#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
570 /* 0=DTE device */
571#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ 558#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
572 /* 0=Configure PHY as Slave */ 559 /* 0=Configure PHY as Slave */
573#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ 560#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
@@ -581,7 +568,7 @@
581/* PHY 1000 MII Register/Bit Definitions */ 568/* PHY 1000 MII Register/Bit Definitions */
582/* PHY Registers defined by IEEE */ 569/* PHY Registers defined by IEEE */
583#define PHY_CONTROL 0x00 /* Control Register */ 570#define PHY_CONTROL 0x00 /* Control Register */
584#define PHY_STATUS 0x01 /* Status Regiser */ 571#define PHY_STATUS 0x01 /* Status Register */
585#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ 572#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
586#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ 573#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
587#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ 574#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
@@ -708,8 +695,8 @@
708/* Auto crossover enabled all speeds */ 695/* Auto crossover enabled all speeds */
709#define M88E1000_PSCR_AUTO_X_MODE 0x0060 696#define M88E1000_PSCR_AUTO_X_MODE 0x0060
710/* 697/*
711 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold 698 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
712 * 0=Normal 10BASE-T RX Threshold 699 * 0=Normal 10BASE-T Rx Threshold
713 */ 700 */
714/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ 701/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
715#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ 702#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 7b2c70a3b8cc..746c3ea09e27 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -586,14 +586,10 @@ struct e1000_hw {
586 586
587#ifdef DEBUG 587#ifdef DEBUG
588extern char *igb_get_hw_dev_name(struct e1000_hw *hw); 588extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
589#define hw_dbg(hw, format, arg...) \ 589#define hw_dbg(format, arg...) \
590 printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg) 590 printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg)
591#else 591#else
592static inline int __attribute__ ((format (printf, 2, 3))) 592#define hw_dbg(format, arg...)
593hw_dbg(struct e1000_hw *hw, const char *format, ...)
594{
595 return 0;
596}
597#endif 593#endif
598 594
599#endif 595#endif
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 0dadcfdfa176..47ad2c4277c3 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -158,12 +158,12 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
158 u32 i; 158 u32 i;
159 159
160 /* Setup the receive address */ 160 /* Setup the receive address */
161 hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); 161 hw_dbg("Programming MAC Address into RAR[0]\n");
162 162
163 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 163 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
164 164
165 /* Zero out the other (rar_entry_count - 1) receive addresses */ 165 /* Zero out the other (rar_entry_count - 1) receive addresses */
166 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); 166 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
167 for (i = 1; i < rar_count; i++) { 167 for (i = 1; i < rar_count; i++) {
168 array_wr32(E1000_RA, (i << 1), 0); 168 array_wr32(E1000_RA, (i << 1), 0);
169 wrfl(); 169 wrfl();
@@ -193,7 +193,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
193 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 193 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
194 &nvm_alt_mac_addr_offset); 194 &nvm_alt_mac_addr_offset);
195 if (ret_val) { 195 if (ret_val) {
196 hw_dbg(hw, "NVM Read Error\n"); 196 hw_dbg("NVM Read Error\n");
197 goto out; 197 goto out;
198 } 198 }
199 199
@@ -209,7 +209,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
209 offset = nvm_alt_mac_addr_offset + (i >> 1); 209 offset = nvm_alt_mac_addr_offset + (i >> 1);
210 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); 210 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
211 if (ret_val) { 211 if (ret_val) {
212 hw_dbg(hw, "NVM Read Error\n"); 212 hw_dbg("NVM Read Error\n");
213 goto out; 213 goto out;
214 } 214 }
215 215
@@ -336,7 +336,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
336 } 336 }
337 337
338 /* Clear the old settings from the MTA */ 338 /* Clear the old settings from the MTA */
339 hw_dbg(hw, "Clearing MTA\n"); 339 hw_dbg("Clearing MTA\n");
340 for (i = 0; i < hw->mac.mta_reg_count; i++) { 340 for (i = 0; i < hw->mac.mta_reg_count; i++) {
341 array_wr32(E1000_MTA, i, 0); 341 array_wr32(E1000_MTA, i, 0);
342 wrfl(); 342 wrfl();
@@ -345,7 +345,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
345 /* Load any remaining multicast addresses into the hash table. */ 345 /* Load any remaining multicast addresses into the hash table. */
346 for (; mc_addr_count > 0; mc_addr_count--) { 346 for (; mc_addr_count > 0; mc_addr_count--) {
347 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 347 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
348 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); 348 hw_dbg("Hash value = 0x%03X\n", hash_value);
349 igb_mta_set(hw, hash_value); 349 igb_mta_set(hw, hash_value);
350 mc_addr_list += ETH_ALEN; 350 mc_addr_list += ETH_ALEN;
351 } 351 }
@@ -540,7 +540,7 @@ s32 igb_check_for_copper_link(struct e1000_hw *hw)
540 */ 540 */
541 ret_val = igb_config_fc_after_link_up(hw); 541 ret_val = igb_config_fc_after_link_up(hw);
542 if (ret_val) 542 if (ret_val)
543 hw_dbg(hw, "Error configuring flow control\n"); 543 hw_dbg("Error configuring flow control\n");
544 544
545out: 545out:
546 return ret_val; 546 return ret_val;
@@ -578,7 +578,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
578 */ 578 */
579 hw->fc.original_type = hw->fc.type; 579 hw->fc.original_type = hw->fc.type;
580 580
581 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type); 581 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.type);
582 582
583 /* Call the necessary media_type subroutine to configure the link. */ 583 /* Call the necessary media_type subroutine to configure the link. */
584 ret_val = hw->mac.ops.setup_physical_interface(hw); 584 ret_val = hw->mac.ops.setup_physical_interface(hw);
@@ -591,8 +591,7 @@ s32 igb_setup_link(struct e1000_hw *hw)
591 * control is disabled, because it does not hurt anything to 591 * control is disabled, because it does not hurt anything to
592 * initialize these registers. 592 * initialize these registers.
593 */ 593 */
594 hw_dbg(hw, 594 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
595 "Initializing the Flow Control address, type and timer regs\n");
596 wr32(E1000_FCT, FLOW_CONTROL_TYPE); 595 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
597 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); 596 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
598 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); 597 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
@@ -689,7 +688,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
689 &nvm_data); 688 &nvm_data);
690 689
691 if (ret_val) { 690 if (ret_val) {
692 hw_dbg(hw, "NVM Read Error\n"); 691 hw_dbg("NVM Read Error\n");
693 goto out; 692 goto out;
694 } 693 }
695 694
@@ -740,7 +739,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
740 * 3: Both Rx and TX flow control (symmetric) is enabled. 739 * 3: Both Rx and TX flow control (symmetric) is enabled.
741 * other: No other values should be possible at this point. 740 * other: No other values should be possible at this point.
742 */ 741 */
743 hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type); 742 hw_dbg("hw->fc.type = %u\n", hw->fc.type);
744 743
745 switch (hw->fc.type) { 744 switch (hw->fc.type) {
746 case e1000_fc_none: 745 case e1000_fc_none:
@@ -758,7 +757,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
758 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 757 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
759 break; 758 break;
760 default: 759 default:
761 hw_dbg(hw, "Flow control param set incorrectly\n"); 760 hw_dbg("Flow control param set incorrectly\n");
762 ret_val = -E1000_ERR_CONFIG; 761 ret_val = -E1000_ERR_CONFIG;
763 goto out; 762 goto out;
764 } 763 }
@@ -801,7 +800,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
801 } 800 }
802 801
803 if (ret_val) { 802 if (ret_val) {
804 hw_dbg(hw, "Error forcing flow control settings\n"); 803 hw_dbg("Error forcing flow control settings\n");
805 goto out; 804 goto out;
806 } 805 }
807 806
@@ -827,7 +826,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
827 goto out; 826 goto out;
828 827
829 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 828 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
830 hw_dbg(hw, "Copper PHY and Auto Neg " 829 hw_dbg("Copper PHY and Auto Neg "
831 "has not completed.\n"); 830 "has not completed.\n");
832 goto out; 831 goto out;
833 } 832 }
@@ -893,11 +892,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
893 */ 892 */
894 if (hw->fc.original_type == e1000_fc_full) { 893 if (hw->fc.original_type == e1000_fc_full) {
895 hw->fc.type = e1000_fc_full; 894 hw->fc.type = e1000_fc_full;
896 hw_dbg(hw, "Flow Control = FULL.\r\n"); 895 hw_dbg("Flow Control = FULL.\r\n");
897 } else { 896 } else {
898 hw->fc.type = e1000_fc_rx_pause; 897 hw->fc.type = e1000_fc_rx_pause;
899 hw_dbg(hw, "Flow Control = " 898 hw_dbg("Flow Control = "
900 "RX PAUSE frames only.\r\n"); 899 "RX PAUSE frames only.\r\n");
901 } 900 }
902 } 901 }
903 /* 902 /*
@@ -913,7 +912,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
913 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 912 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
914 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 913 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
915 hw->fc.type = e1000_fc_tx_pause; 914 hw->fc.type = e1000_fc_tx_pause;
916 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); 915 hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
917 } 916 }
918 /* 917 /*
919 * For transmitting PAUSE frames ONLY. 918 * For transmitting PAUSE frames ONLY.
@@ -928,7 +927,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
928 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 927 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
929 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 928 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
930 hw->fc.type = e1000_fc_rx_pause; 929 hw->fc.type = e1000_fc_rx_pause;
931 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); 930 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
932 } 931 }
933 /* 932 /*
934 * Per the IEEE spec, at this point flow control should be 933 * Per the IEEE spec, at this point flow control should be
@@ -955,10 +954,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
955 hw->fc.original_type == e1000_fc_tx_pause) || 954 hw->fc.original_type == e1000_fc_tx_pause) ||
956 hw->fc.strict_ieee) { 955 hw->fc.strict_ieee) {
957 hw->fc.type = e1000_fc_none; 956 hw->fc.type = e1000_fc_none;
958 hw_dbg(hw, "Flow Control = NONE.\r\n"); 957 hw_dbg("Flow Control = NONE.\r\n");
959 } else { 958 } else {
960 hw->fc.type = e1000_fc_rx_pause; 959 hw->fc.type = e1000_fc_rx_pause;
961 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); 960 hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
962 } 961 }
963 962
964 /* 963 /*
@@ -968,7 +967,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
968 */ 967 */
969 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); 968 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
970 if (ret_val) { 969 if (ret_val) {
971 hw_dbg(hw, "Error getting link speed and duplex\n"); 970 hw_dbg("Error getting link speed and duplex\n");
972 goto out; 971 goto out;
973 } 972 }
974 973
@@ -981,7 +980,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
981 */ 980 */
982 ret_val = igb_force_mac_fc(hw); 981 ret_val = igb_force_mac_fc(hw);
983 if (ret_val) { 982 if (ret_val) {
984 hw_dbg(hw, "Error forcing flow control settings\n"); 983 hw_dbg("Error forcing flow control settings\n");
985 goto out; 984 goto out;
986 } 985 }
987 } 986 }
@@ -1007,21 +1006,21 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1007 status = rd32(E1000_STATUS); 1006 status = rd32(E1000_STATUS);
1008 if (status & E1000_STATUS_SPEED_1000) { 1007 if (status & E1000_STATUS_SPEED_1000) {
1009 *speed = SPEED_1000; 1008 *speed = SPEED_1000;
1010 hw_dbg(hw, "1000 Mbs, "); 1009 hw_dbg("1000 Mbs, ");
1011 } else if (status & E1000_STATUS_SPEED_100) { 1010 } else if (status & E1000_STATUS_SPEED_100) {
1012 *speed = SPEED_100; 1011 *speed = SPEED_100;
1013 hw_dbg(hw, "100 Mbs, "); 1012 hw_dbg("100 Mbs, ");
1014 } else { 1013 } else {
1015 *speed = SPEED_10; 1014 *speed = SPEED_10;
1016 hw_dbg(hw, "10 Mbs, "); 1015 hw_dbg("10 Mbs, ");
1017 } 1016 }
1018 1017
1019 if (status & E1000_STATUS_FD) { 1018 if (status & E1000_STATUS_FD) {
1020 *duplex = FULL_DUPLEX; 1019 *duplex = FULL_DUPLEX;
1021 hw_dbg(hw, "Full Duplex\n"); 1020 hw_dbg("Full Duplex\n");
1022 } else { 1021 } else {
1023 *duplex = HALF_DUPLEX; 1022 *duplex = HALF_DUPLEX;
1024 hw_dbg(hw, "Half Duplex\n"); 1023 hw_dbg("Half Duplex\n");
1025 } 1024 }
1026 1025
1027 return 0; 1026 return 0;
@@ -1051,7 +1050,7 @@ s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1051 } 1050 }
1052 1051
1053 if (i == timeout) { 1052 if (i == timeout) {
1054 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); 1053 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1055 ret_val = -E1000_ERR_NVM; 1054 ret_val = -E1000_ERR_NVM;
1056 goto out; 1055 goto out;
1057 } 1056 }
@@ -1071,7 +1070,7 @@ s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1071 if (i == timeout) { 1070 if (i == timeout) {
1072 /* Release semaphores */ 1071 /* Release semaphores */
1073 igb_put_hw_semaphore(hw); 1072 igb_put_hw_semaphore(hw);
1074 hw_dbg(hw, "Driver can't access the NVM\n"); 1073 hw_dbg("Driver can't access the NVM\n");
1075 ret_val = -E1000_ERR_NVM; 1074 ret_val = -E1000_ERR_NVM;
1076 goto out; 1075 goto out;
1077 } 1076 }
@@ -1117,7 +1116,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1117 } 1116 }
1118 1117
1119 if (i == AUTO_READ_DONE_TIMEOUT) { 1118 if (i == AUTO_READ_DONE_TIMEOUT) {
1120 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); 1119 hw_dbg("Auto read by HW from NVM has not completed.\n");
1121 ret_val = -E1000_ERR_RESET; 1120 ret_val = -E1000_ERR_RESET;
1122 goto out; 1121 goto out;
1123 } 1122 }
@@ -1140,7 +1139,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1140 1139
1141 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 1140 ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1142 if (ret_val) { 1141 if (ret_val) {
1143 hw_dbg(hw, "NVM Read Error\n"); 1142 hw_dbg("NVM Read Error\n");
1144 goto out; 1143 goto out;
1145 } 1144 }
1146 1145
@@ -1322,7 +1321,7 @@ s32 igb_disable_pcie_master(struct e1000_hw *hw)
1322 } 1321 }
1323 1322
1324 if (!timeout) { 1323 if (!timeout) {
1325 hw_dbg(hw, "Master requests are pending.\n"); 1324 hw_dbg("Master requests are pending.\n");
1326 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; 1325 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1327 goto out; 1326 goto out;
1328 } 1327 }
@@ -1342,7 +1341,7 @@ void igb_reset_adaptive(struct e1000_hw *hw)
1342 struct e1000_mac_info *mac = &hw->mac; 1341 struct e1000_mac_info *mac = &hw->mac;
1343 1342
1344 if (!mac->adaptive_ifs) { 1343 if (!mac->adaptive_ifs) {
1345 hw_dbg(hw, "Not in Adaptive IFS mode!\n"); 1344 hw_dbg("Not in Adaptive IFS mode!\n");
1346 goto out; 1345 goto out;
1347 } 1346 }
1348 1347
@@ -1372,7 +1371,7 @@ void igb_update_adaptive(struct e1000_hw *hw)
1372 struct e1000_mac_info *mac = &hw->mac; 1371 struct e1000_mac_info *mac = &hw->mac;
1373 1372
1374 if (!mac->adaptive_ifs) { 1373 if (!mac->adaptive_ifs) {
1375 hw_dbg(hw, "Not in Adaptive IFS mode!\n"); 1374 hw_dbg("Not in Adaptive IFS mode!\n");
1376 goto out; 1375 goto out;
1377 } 1376 }
1378 1377
@@ -1413,7 +1412,7 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1413 s32 ret_val = 0; 1412 s32 ret_val = 0;
1414 1413
1415 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { 1414 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1416 hw_dbg(hw, "Invalid MDI setting detected\n"); 1415 hw_dbg("Invalid MDI setting detected\n");
1417 hw->phy.mdix = 1; 1416 hw->phy.mdix = 1;
1418 ret_val = -E1000_ERR_CONFIG; 1417 ret_val = -E1000_ERR_CONFIG;
1419 goto out; 1418 goto out;
@@ -1452,7 +1451,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1452 break; 1451 break;
1453 } 1452 }
1454 if (!(regvalue & E1000_GEN_CTL_READY)) { 1453 if (!(regvalue & E1000_GEN_CTL_READY)) {
1455 hw_dbg(hw, "Reg %08x did not indicate ready\n", reg); 1454 hw_dbg("Reg %08x did not indicate ready\n", reg);
1456 ret_val = -E1000_ERR_PHY; 1455 ret_val = -E1000_ERR_PHY;
1457 goto out; 1456 goto out;
1458 } 1457 }
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index 780ba798ce8f..a84e4e429fa7 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -202,7 +202,7 @@ s32 igb_acquire_nvm(struct e1000_hw *hw)
202 if (!timeout) { 202 if (!timeout) {
203 eecd &= ~E1000_EECD_REQ; 203 eecd &= ~E1000_EECD_REQ;
204 wr32(E1000_EECD, eecd); 204 wr32(E1000_EECD, eecd);
205 hw_dbg(hw, "Could not acquire NVM grant\n"); 205 hw_dbg("Could not acquire NVM grant\n");
206 ret_val = -E1000_ERR_NVM; 206 ret_val = -E1000_ERR_NVM;
207 } 207 }
208 208
@@ -337,7 +337,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
337 } 337 }
338 338
339 if (!timeout) { 339 if (!timeout) {
340 hw_dbg(hw, "SPI NVM Status error\n"); 340 hw_dbg("SPI NVM Status error\n");
341 ret_val = -E1000_ERR_NVM; 341 ret_val = -E1000_ERR_NVM;
342 goto out; 342 goto out;
343 } 343 }
@@ -368,7 +368,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
368 */ 368 */
369 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 369 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
370 (words == 0)) { 370 (words == 0)) {
371 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 371 hw_dbg("nvm parameter(s) out of bounds\n");
372 ret_val = -E1000_ERR_NVM; 372 ret_val = -E1000_ERR_NVM;
373 goto out; 373 goto out;
374 } 374 }
@@ -414,7 +414,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
414 */ 414 */
415 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 415 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
416 (words == 0)) { 416 (words == 0)) {
417 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 417 hw_dbg("nvm parameter(s) out of bounds\n");
418 ret_val = -E1000_ERR_NVM; 418 ret_val = -E1000_ERR_NVM;
419 goto out; 419 goto out;
420 } 420 }
@@ -489,14 +489,14 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
489 489
490 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 490 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
491 if (ret_val) { 491 if (ret_val) {
492 hw_dbg(hw, "NVM Read Error\n"); 492 hw_dbg("NVM Read Error\n");
493 goto out; 493 goto out;
494 } 494 }
495 *part_num = (u32)(nvm_data << 16); 495 *part_num = (u32)(nvm_data << 16);
496 496
497 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 497 ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
498 if (ret_val) { 498 if (ret_val) {
499 hw_dbg(hw, "NVM Read Error\n"); 499 hw_dbg("NVM Read Error\n");
500 goto out; 500 goto out;
501 } 501 }
502 *part_num |= nvm_data; 502 *part_num |= nvm_data;
@@ -522,7 +522,7 @@ s32 igb_read_mac_addr(struct e1000_hw *hw)
522 offset = i >> 1; 522 offset = i >> 1;
523 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data); 523 ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
524 if (ret_val) { 524 if (ret_val) {
525 hw_dbg(hw, "NVM Read Error\n"); 525 hw_dbg("NVM Read Error\n");
526 goto out; 526 goto out;
527 } 527 }
528 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); 528 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
@@ -556,14 +556,14 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
556 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 556 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
557 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); 557 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data);
558 if (ret_val) { 558 if (ret_val) {
559 hw_dbg(hw, "NVM Read Error\n"); 559 hw_dbg("NVM Read Error\n");
560 goto out; 560 goto out;
561 } 561 }
562 checksum += nvm_data; 562 checksum += nvm_data;
563 } 563 }
564 564
565 if (checksum != (u16) NVM_SUM) { 565 if (checksum != (u16) NVM_SUM) {
566 hw_dbg(hw, "NVM Checksum Invalid\n"); 566 hw_dbg("NVM Checksum Invalid\n");
567 ret_val = -E1000_ERR_NVM; 567 ret_val = -E1000_ERR_NVM;
568 goto out; 568 goto out;
569 } 569 }
@@ -589,7 +589,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
589 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 589 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
590 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data); 590 ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data);
591 if (ret_val) { 591 if (ret_val) {
592 hw_dbg(hw, "NVM Read Error while updating checksum.\n"); 592 hw_dbg("NVM Read Error while updating checksum.\n");
593 goto out; 593 goto out;
594 } 594 }
595 checksum += nvm_data; 595 checksum += nvm_data;
@@ -597,7 +597,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
597 checksum = (u16) NVM_SUM - checksum; 597 checksum = (u16) NVM_SUM - checksum;
598 ret_val = hw->nvm.ops.write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); 598 ret_val = hw->nvm.ops.write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
599 if (ret_val) 599 if (ret_val)
600 hw_dbg(hw, "NVM Write Error while updating checksum.\n"); 600 hw_dbg("NVM Write Error while updating checksum.\n");
601 601
602out: 602out:
603 return ret_val; 603 return ret_val;
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 220e4716da9e..17fddb91c9f5 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -144,7 +144,7 @@ static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
144 s32 ret_val = 0; 144 s32 ret_val = 0;
145 145
146 if (offset > MAX_PHY_REG_ADDRESS) { 146 if (offset > MAX_PHY_REG_ADDRESS) {
147 hw_dbg(hw, "PHY Address %d is out of range\n", offset); 147 hw_dbg("PHY Address %d is out of range\n", offset);
148 ret_val = -E1000_ERR_PARAM; 148 ret_val = -E1000_ERR_PARAM;
149 goto out; 149 goto out;
150 } 150 }
@@ -172,12 +172,12 @@ static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
172 break; 172 break;
173 } 173 }
174 if (!(mdic & E1000_MDIC_READY)) { 174 if (!(mdic & E1000_MDIC_READY)) {
175 hw_dbg(hw, "MDI Read did not complete\n"); 175 hw_dbg("MDI Read did not complete\n");
176 ret_val = -E1000_ERR_PHY; 176 ret_val = -E1000_ERR_PHY;
177 goto out; 177 goto out;
178 } 178 }
179 if (mdic & E1000_MDIC_ERROR) { 179 if (mdic & E1000_MDIC_ERROR) {
180 hw_dbg(hw, "MDI Error\n"); 180 hw_dbg("MDI Error\n");
181 ret_val = -E1000_ERR_PHY; 181 ret_val = -E1000_ERR_PHY;
182 goto out; 182 goto out;
183 } 183 }
@@ -202,7 +202,7 @@ static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
202 s32 ret_val = 0; 202 s32 ret_val = 0;
203 203
204 if (offset > MAX_PHY_REG_ADDRESS) { 204 if (offset > MAX_PHY_REG_ADDRESS) {
205 hw_dbg(hw, "PHY Address %d is out of range\n", offset); 205 hw_dbg("PHY Address %d is out of range\n", offset);
206 ret_val = -E1000_ERR_PARAM; 206 ret_val = -E1000_ERR_PARAM;
207 goto out; 207 goto out;
208 } 208 }
@@ -231,12 +231,12 @@ static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
231 break; 231 break;
232 } 232 }
233 if (!(mdic & E1000_MDIC_READY)) { 233 if (!(mdic & E1000_MDIC_READY)) {
234 hw_dbg(hw, "MDI Write did not complete\n"); 234 hw_dbg("MDI Write did not complete\n");
235 ret_val = -E1000_ERR_PHY; 235 ret_val = -E1000_ERR_PHY;
236 goto out; 236 goto out;
237 } 237 }
238 if (mdic & E1000_MDIC_ERROR) { 238 if (mdic & E1000_MDIC_ERROR) {
239 hw_dbg(hw, "MDI Error\n"); 239 hw_dbg("MDI Error\n");
240 ret_val = -E1000_ERR_PHY; 240 ret_val = -E1000_ERR_PHY;
241 goto out; 241 goto out;
242 } 242 }
@@ -423,7 +423,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
423 /* Commit the changes. */ 423 /* Commit the changes. */
424 ret_val = igb_phy_sw_reset(hw); 424 ret_val = igb_phy_sw_reset(hw);
425 if (ret_val) { 425 if (ret_val) {
426 hw_dbg(hw, "Error committing the PHY changes\n"); 426 hw_dbg("Error committing the PHY changes\n");
427 goto out; 427 goto out;
428 } 428 }
429 429
@@ -451,7 +451,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
451 451
452 ret_val = hw->phy.ops.reset_phy(hw); 452 ret_val = hw->phy.ops.reset_phy(hw);
453 if (ret_val) { 453 if (ret_val) {
454 hw_dbg(hw, "Error resetting the PHY.\n"); 454 hw_dbg("Error resetting the PHY.\n");
455 goto out; 455 goto out;
456 } 456 }
457 457
@@ -467,7 +467,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
467 if (hw->phy.ops.set_d3_lplu_state) 467 if (hw->phy.ops.set_d3_lplu_state)
468 ret_val = hw->phy.ops.set_d3_lplu_state(hw, false); 468 ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
469 if (ret_val) { 469 if (ret_val) {
470 hw_dbg(hw, "Error Disabling LPLU D3\n"); 470 hw_dbg("Error Disabling LPLU D3\n");
471 goto out; 471 goto out;
472 } 472 }
473 } 473 }
@@ -475,7 +475,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
475 /* disable lplu d0 during driver init */ 475 /* disable lplu d0 during driver init */
476 ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); 476 ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
477 if (ret_val) { 477 if (ret_val) {
478 hw_dbg(hw, "Error Disabling LPLU D0\n"); 478 hw_dbg("Error Disabling LPLU D0\n");
479 goto out; 479 goto out;
480 } 480 }
481 /* Configure mdi-mdix settings */ 481 /* Configure mdi-mdix settings */
@@ -597,13 +597,13 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw)
597 if (phy->autoneg_advertised == 0) 597 if (phy->autoneg_advertised == 0)
598 phy->autoneg_advertised = phy->autoneg_mask; 598 phy->autoneg_advertised = phy->autoneg_mask;
599 599
600 hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); 600 hw_dbg("Reconfiguring auto-neg advertisement params\n");
601 ret_val = igb_phy_setup_autoneg(hw); 601 ret_val = igb_phy_setup_autoneg(hw);
602 if (ret_val) { 602 if (ret_val) {
603 hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); 603 hw_dbg("Error Setting up Auto-Negotiation\n");
604 goto out; 604 goto out;
605 } 605 }
606 hw_dbg(hw, "Restarting Auto-Neg\n"); 606 hw_dbg("Restarting Auto-Neg\n");
607 607
608 /* 608 /*
609 * Restart auto-negotiation by setting the Auto Neg Enable bit and 609 * Restart auto-negotiation by setting the Auto Neg Enable bit and
@@ -625,8 +625,8 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw)
625 if (phy->autoneg_wait_to_complete) { 625 if (phy->autoneg_wait_to_complete) {
626 ret_val = igb_wait_autoneg(hw); 626 ret_val = igb_wait_autoneg(hw);
627 if (ret_val) { 627 if (ret_val) {
628 hw_dbg(hw, "Error while waiting for " 628 hw_dbg("Error while waiting for "
629 "autoneg to complete\n"); 629 "autoneg to complete\n");
630 goto out; 630 goto out;
631 } 631 }
632 } 632 }
@@ -689,39 +689,39 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
689 NWAY_AR_10T_HD_CAPS); 689 NWAY_AR_10T_HD_CAPS);
690 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); 690 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
691 691
692 hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); 692 hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
693 693
694 /* Do we want to advertise 10 Mb Half Duplex? */ 694 /* Do we want to advertise 10 Mb Half Duplex? */
695 if (phy->autoneg_advertised & ADVERTISE_10_HALF) { 695 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
696 hw_dbg(hw, "Advertise 10mb Half duplex\n"); 696 hw_dbg("Advertise 10mb Half duplex\n");
697 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; 697 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
698 } 698 }
699 699
700 /* Do we want to advertise 10 Mb Full Duplex? */ 700 /* Do we want to advertise 10 Mb Full Duplex? */
701 if (phy->autoneg_advertised & ADVERTISE_10_FULL) { 701 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
702 hw_dbg(hw, "Advertise 10mb Full duplex\n"); 702 hw_dbg("Advertise 10mb Full duplex\n");
703 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; 703 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
704 } 704 }
705 705
706 /* Do we want to advertise 100 Mb Half Duplex? */ 706 /* Do we want to advertise 100 Mb Half Duplex? */
707 if (phy->autoneg_advertised & ADVERTISE_100_HALF) { 707 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
708 hw_dbg(hw, "Advertise 100mb Half duplex\n"); 708 hw_dbg("Advertise 100mb Half duplex\n");
709 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; 709 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
710 } 710 }
711 711
712 /* Do we want to advertise 100 Mb Full Duplex? */ 712 /* Do we want to advertise 100 Mb Full Duplex? */
713 if (phy->autoneg_advertised & ADVERTISE_100_FULL) { 713 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
714 hw_dbg(hw, "Advertise 100mb Full duplex\n"); 714 hw_dbg("Advertise 100mb Full duplex\n");
715 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; 715 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
716 } 716 }
717 717
718 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ 718 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
719 if (phy->autoneg_advertised & ADVERTISE_1000_HALF) 719 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
720 hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); 720 hw_dbg("Advertise 1000mb Half duplex request denied!\n");
721 721
722 /* Do we want to advertise 1000 Mb Full Duplex? */ 722 /* Do we want to advertise 1000 Mb Full Duplex? */
723 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { 723 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
724 hw_dbg(hw, "Advertise 1000mb Full duplex\n"); 724 hw_dbg("Advertise 1000mb Full duplex\n");
725 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 725 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
726 } 726 }
727 727
@@ -780,7 +780,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
780 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 780 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
781 break; 781 break;
782 default: 782 default:
783 hw_dbg(hw, "Flow control param set incorrectly\n"); 783 hw_dbg("Flow control param set incorrectly\n");
784 ret_val = -E1000_ERR_CONFIG; 784 ret_val = -E1000_ERR_CONFIG;
785 goto out; 785 goto out;
786 } 786 }
@@ -790,7 +790,7 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
790 if (ret_val) 790 if (ret_val)
791 goto out; 791 goto out;
792 792
793 hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 793 hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
794 794
795 if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 795 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
796 ret_val = hw->phy.ops.write_phy_reg(hw, 796 ret_val = hw->phy.ops.write_phy_reg(hw,
@@ -846,13 +846,12 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
846 if (ret_val) 846 if (ret_val)
847 goto out; 847 goto out;
848 848
849 hw_dbg(hw, "IGP PSCR: %X\n", phy_data); 849 hw_dbg("IGP PSCR: %X\n", phy_data);
850 850
851 udelay(1); 851 udelay(1);
852 852
853 if (phy->autoneg_wait_to_complete) { 853 if (phy->autoneg_wait_to_complete) {
854 hw_dbg(hw, 854 hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
855 "Waiting for forced speed/duplex link on IGP phy.\n");
856 855
857 ret_val = igb_phy_has_link(hw, 856 ret_val = igb_phy_has_link(hw,
858 PHY_FORCE_LIMIT, 857 PHY_FORCE_LIMIT,
@@ -862,7 +861,7 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
862 goto out; 861 goto out;
863 862
864 if (!link) 863 if (!link)
865 hw_dbg(hw, "Link taking longer than expected.\n"); 864 hw_dbg("Link taking longer than expected.\n");
866 865
867 /* Try once more */ 866 /* Try once more */
868 ret_val = igb_phy_has_link(hw, 867 ret_val = igb_phy_has_link(hw,
@@ -909,7 +908,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
909 if (ret_val) 908 if (ret_val)
910 goto out; 909 goto out;
911 910
912 hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); 911 hw_dbg("M88E1000 PSCR: %X\n", phy_data);
913 912
914 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); 913 ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
915 if (ret_val) 914 if (ret_val)
@@ -927,8 +926,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
927 udelay(1); 926 udelay(1);
928 927
929 if (phy->autoneg_wait_to_complete) { 928 if (phy->autoneg_wait_to_complete) {
930 hw_dbg(hw, 929 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
931 "Waiting for forced speed/duplex link on M88 phy.\n");
932 930
933 ret_val = igb_phy_has_link(hw, 931 ret_val = igb_phy_has_link(hw,
934 PHY_FORCE_LIMIT, 932 PHY_FORCE_LIMIT,
@@ -1028,11 +1026,11 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1028 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { 1026 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
1029 ctrl &= ~E1000_CTRL_FD; 1027 ctrl &= ~E1000_CTRL_FD;
1030 *phy_ctrl &= ~MII_CR_FULL_DUPLEX; 1028 *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
1031 hw_dbg(hw, "Half Duplex\n"); 1029 hw_dbg("Half Duplex\n");
1032 } else { 1030 } else {
1033 ctrl |= E1000_CTRL_FD; 1031 ctrl |= E1000_CTRL_FD;
1034 *phy_ctrl |= MII_CR_FULL_DUPLEX; 1032 *phy_ctrl |= MII_CR_FULL_DUPLEX;
1035 hw_dbg(hw, "Full Duplex\n"); 1033 hw_dbg("Full Duplex\n");
1036 } 1034 }
1037 1035
1038 /* Forcing 10mb or 100mb? */ 1036 /* Forcing 10mb or 100mb? */
@@ -1040,12 +1038,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1040 ctrl |= E1000_CTRL_SPD_100; 1038 ctrl |= E1000_CTRL_SPD_100;
1041 *phy_ctrl |= MII_CR_SPEED_100; 1039 *phy_ctrl |= MII_CR_SPEED_100;
1042 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); 1040 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1043 hw_dbg(hw, "Forcing 100mb\n"); 1041 hw_dbg("Forcing 100mb\n");
1044 } else { 1042 } else {
1045 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1043 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1046 *phy_ctrl |= MII_CR_SPEED_10; 1044 *phy_ctrl |= MII_CR_SPEED_10;
1047 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); 1045 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1048 hw_dbg(hw, "Forcing 10mb\n"); 1046 hw_dbg("Forcing 10mb\n");
1049 } 1047 }
1050 1048
1051 igb_config_collision_dist(hw); 1049 igb_config_collision_dist(hw);
@@ -1459,7 +1457,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1459 bool link; 1457 bool link;
1460 1458
1461 if (hw->phy.media_type != e1000_media_type_copper) { 1459 if (hw->phy.media_type != e1000_media_type_copper) {
1462 hw_dbg(hw, "Phy info is only valid for copper media\n"); 1460 hw_dbg("Phy info is only valid for copper media\n");
1463 ret_val = -E1000_ERR_CONFIG; 1461 ret_val = -E1000_ERR_CONFIG;
1464 goto out; 1462 goto out;
1465 } 1463 }
@@ -1469,7 +1467,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
1469 goto out; 1467 goto out;
1470 1468
1471 if (!link) { 1469 if (!link) {
1472 hw_dbg(hw, "Phy info is only valid if link is up\n"); 1470 hw_dbg("Phy info is only valid if link is up\n");
1473 ret_val = -E1000_ERR_CONFIG; 1471 ret_val = -E1000_ERR_CONFIG;
1474 goto out; 1472 goto out;
1475 } 1473 }
@@ -1543,7 +1541,7 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
1543 goto out; 1541 goto out;
1544 1542
1545 if (!link) { 1543 if (!link) {
1546 hw_dbg(hw, "Phy info is only valid if link is up\n"); 1544 hw_dbg("Phy info is only valid if link is up\n");
1547 ret_val = -E1000_ERR_CONFIG; 1545 ret_val = -E1000_ERR_CONFIG;
1548 goto out; 1546 goto out;
1549 } 1547 }
@@ -1728,7 +1726,7 @@ s32 igb_phy_force_speed_duplex(struct e1000_hw *hw)
1728 **/ 1726 **/
1729s32 igb_phy_init_script_igp3(struct e1000_hw *hw) 1727s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
1730{ 1728{
1731 hw_dbg(hw, "Running IGP 3 PHY init script\n"); 1729 hw_dbg("Running IGP 3 PHY init script\n");
1732 1730
1733 /* PHY init IGP 3 */ 1731 /* PHY init IGP 3 */
1734 /* Enable rise/fall, 10-mode work in class-A */ 1732 /* Enable rise/fall, 10-mode work in class-A */